diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index a4b3a0a120..54e24dbc23 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -1301,7 +1301,7 @@ private int alterMaterializedView(Hive db, AlterMaterializedViewDesc alterMVDesc throw new AssertionError("Unsupported alter materialized view type! : " + alterMVDesc.getOp()); } - db.alterTable(mv,environmentContext); + db.alterTable(mv, environmentContext, false); return 0; } @@ -1451,7 +1451,7 @@ private int alterTableAlterPart(Hive db, AlterTableAlterPartDesc alterPartitionD tbl.getTTable().setPartitionKeys(newPartitionKeys); - db.alterTable(tbl, null); + db.alterTable(tbl, null, false); work.getInputs().add(new ReadEntity(tbl)); // We've already locked the table as the input, don't relock it as the output. @@ -1477,7 +1477,7 @@ private int touch(Hive db, AlterTableSimpleDesc touchDesc) environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); if (touchDesc.getPartSpec() == null) { - db.alterTable(tbl, environmentContext); + db.alterTable(tbl, environmentContext, false); work.getInputs().add(new ReadEntity(tbl)); addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); } else { @@ -3747,7 +3747,9 @@ private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException, List partitions = new ArrayList(); partitions.add(part.getName()); cols = Hive.getFieldsFromDeserializer(colPath, deserializer); - colStats = db.getPartitionColumnStatistics(dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), partitions, colNames).get(part.getName()); + colStats = db.getPartitionColumnStatistics( + dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), partitions, colNames).get( + part.getName()); } } else { cols = Hive.getFieldsFromDeserializer(colPath, deserializer); @@ -3966,7 +3968,8 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { } environmentContext.putToProperties(HiveMetaHook.ALTER_TABLE_OPERATION_TYPE, alterTbl.getOp().name()); if (allPartitions == null) { - db.alterTable(alterTbl.getOldName(), tbl, alterTbl.getIsCascade(), environmentContext); + String[] names = Utilities.getDbTableName(alterTbl.getOldName()); + db.alterTable(names[0], names[1], tbl, alterTbl.getIsCascade(), environmentContext, false); } else { db.alterPartitions(Warehouse.getQualifiedName(tbl.getTTable()), allPartitions, environmentContext); } @@ -4943,7 +4946,7 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { // create the table if (crtTbl.getReplaceMode()) { // replace-mode creates are really alters using CreateTableDesc. - db.alterTable(tbl, null); + db.alterTable(tbl, null, false); } else { if ((foreignKeys != null && foreignKeys.size() > 0) || (primaryKeys != null && primaryKeys.size() > 0) || @@ -5173,7 +5176,7 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { oldview.setOutputFormatClass(crtView.getOutputFormat()); } oldview.checkValidity(null); - db.alterTable(crtView.getViewName(), oldview, null); + db.alterTable(crtView.getViewName(), oldview, null, false); addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_NO_LOCK)); } else { // We create new view diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 19097f5e70..af1030c28b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -793,7 +793,7 @@ private void updatePartitionBucketSortColumns(Hive db, Table table, Partition pa } if (updateBucketCols || updateSortCols) { - db.alterPartition(table.getDbName(), table.getTableName(), partn, null); + db.alterPartition(table.getDbName(), table.getTableName(), partn, null, false); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java index fc56a8be3b..0ed93d05b4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java +++ ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java @@ -63,7 +63,7 @@ public void run(HookContext hookContext) throws Exception { String tblName = re.getTable().getTableName(); Table t = db.getTable(dbName, tblName); t.setLastAccessTime(lastAccessTime); - db.alterTable(dbName + "." + tblName, t, null); + db.alterTable(dbName + "." + tblName, t, null, false); break; } case PARTITION: { @@ -73,9 +73,9 @@ public void run(HookContext hookContext) throws Exception { Table t = db.getTable(dbName, tblName); p = db.getPartition(t, p.getSpec(), false); p.setLastAccessTime(lastAccessTime); - db.alterPartition(dbName, tblName, p, null); + db.alterPartition(dbName, tblName, p, null, false); t.setLastAccessTime(lastAccessTime); - db.alterTable(dbName + "." + tblName, t, null); + db.alterTable(dbName + "." + tblName, t, null, false); break; } default: diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index c0a9be0dee..cec4570fe1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -581,15 +581,9 @@ public void createTable(String tableName, List columns, List par createTable(tbl); } - public void alterTable(Table newTbl, EnvironmentContext environmentContext) + public void alterTable(Table newTbl, EnvironmentContext environmentContext, boolean isTxnScope) throws HiveException { - alterTable(newTbl.getDbName(), newTbl.getTableName(), newTbl, false, environmentContext); - } - - - public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext) - throws HiveException { - alterTable(fullyQlfdTblName, newTbl, false, environmentContext); + alterTable(newTbl.getDbName(), newTbl.getTableName(), newTbl, false, environmentContext, isTxnScope); } /** @@ -605,25 +599,14 @@ public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext * if the changes in metadata is not acceptable * @throws TException */ - public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext, - boolean transactional) - throws HiveException { + public void alterTable(String fullyQlfdTblName, Table newTbl, + EnvironmentContext environmentContext, boolean isInTxnScope) throws HiveException { String[] names = Utilities.getDbTableName(fullyQlfdTblName); - alterTable(names[0], names[1], newTbl, false, environmentContext, transactional); + alterTable(names[0], names[1], newTbl, false, environmentContext, isInTxnScope); } - public void alterTable(String fullyQlfdTblName, Table newTbl, boolean cascade, EnvironmentContext environmentContext) - throws HiveException { - String[] names = Utilities.getDbTableName(fullyQlfdTblName); - alterTable(names[0], names[1], newTbl, cascade, environmentContext); - } - public void alterTable(String dbName, String tblName, Table newTbl, boolean cascade, - EnvironmentContext environmentContext) - throws HiveException { - alterTable(dbName, tblName, newTbl, cascade, environmentContext, true); - } public void alterTable(String dbName, String tblName, Table newTbl, boolean cascade, - EnvironmentContext environmentContext, boolean transactional) + EnvironmentContext environmentContext, boolean isInTxnScope) throws HiveException { try { @@ -640,9 +623,8 @@ public void alterTable(String dbName, String tblName, Table newTbl, boolean casc } // Take a table snapshot and set it to newTbl. - if (transactional) { - setTableSnapshotForTransactionalTable(conf, newTbl); - } + setTableSnapshotForTransactionalTable(conf, newTbl, isInTxnScope); + getMSC().alter_table_with_environmentContext(dbName, tblName, newTbl.getTTable(), environmentContext); } catch (MetaException e) { @@ -675,25 +657,8 @@ public void updateCreationMetadata(String dbName, String tableName, CreationMeta public void alterPartition(String tblName, Partition newPart, EnvironmentContext environmentContext) throws InvalidOperationException, HiveException { String[] names = Utilities.getDbTableName(tblName); - alterPartition(names[0], names[1], newPart, environmentContext); - } - - /** - * Updates the existing partition metadata with the new metadata. - * - * @param dbName - * name of the exiting table's database - * @param tblName - * name of the existing table - * @param newPart - * new partition - * @throws InvalidOperationException - * if the changes in metadata is not acceptable - * @throws TException - */ - public void alterPartition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext) - throws InvalidOperationException, HiveException { - alterPartition(dbName, tblName, newPart, environmentContext, true); + // Only called from DDLTask for now. + alterPartition(names[0], names[1], newPart, environmentContext, false); } /** @@ -714,7 +679,7 @@ public void alterPartition(String dbName, String tblName, Partition newPart, Env * @throws TException */ public void alterPartition(String dbName, String tblName, Partition newPart, - EnvironmentContext environmentContext, boolean transactional) + EnvironmentContext environmentContext, boolean isInTxnScope) throws InvalidOperationException, HiveException { try { validatePartition(newPart); @@ -723,9 +688,7 @@ public void alterPartition(String dbName, String tblName, Partition newPart, location = Utilities.getQualifiedPath(conf, new Path(location)); newPart.setLocation(location); } - if (transactional) { - setTableSnapshotForTransactionalPartition(conf, newPart); - } + setTableSnapshotForTransactionalPartition(conf, newPart, isInTxnScope); getSynchronizedMSC().alter_partition(dbName, tblName, newPart.getTPartition(), environmentContext); } catch (MetaException e) { @@ -917,7 +880,8 @@ public void createTable(Table tbl, boolean ifNotExists, } } // Set table snapshot to api.Table to make it persistent. - setTableSnapshotForTransactionalTable(conf, tbl); + // For new table, trying to fetch write ID from metastore will fail, so avoid it. + setTableSnapshotForTransactionalTable(conf, tbl, true); if (primaryKeys == null && foreignKeys == null && uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null && checkConstraints == null) { @@ -1813,7 +1777,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par Partition newTPart = oldPart != null ? oldPart : new Partition(tbl, partSpec, newPartPath); alterPartitionSpecInMemory(tbl, partSpec, newTPart.getTPartition(), inheritTableSpecs, newPartPath.toString()); validatePartition(newTPart); - setTableSnapshotForTransactionalPartition(conf, newTPart); + setTableSnapshotForTransactionalPartition(conf, newTPart, true); // If config is set, table is not temporary and partition being inserted exists, capture // the list of files added. For not yet existing partitions (insert overwrite to new partition @@ -2427,7 +2391,7 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); } - alterTable(tbl, environmentContext); + alterTable(tbl.getDbName(), tbl.getTableName(), tbl, false, environmentContext, true); if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) { fireInsertEvent(tbl, null, (loadFileType == LoadFileType.REPLACE_ALL), newFiles); @@ -2471,7 +2435,7 @@ public Partition createPartition(Table tbl, Map partSpec) throws List in = new ArrayList(size); AcidUtils.TableSnapshot tableSnapshot = - AcidUtils.getTableSnapshot(conf, tbl); + AcidUtils.getTableSnapshot(conf, tbl, false); for (int i = 0; i < size; ++i) { org.apache.hadoop.hive.metastore.api.Partition tmpPart = convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i), conf); @@ -4449,7 +4413,7 @@ public boolean setPartitionColumnStatistics( ColumnStatisticsDesc statsDesc = colStat.getStatsDesc(); Table tbl = getTable(statsDesc.getDbName(), statsDesc.getTableName()); - AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, true); request.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : 0); request.setValidWriteIdList(tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); return getMSC().setPartitionColumnStatistics(request); @@ -4472,7 +4436,7 @@ public boolean setPartitionColumnStatistics( try { if (checkTransactional) { Table tbl = getTable(dbName, tableName); - AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, true); if (tableSnapshot.getTxnId() > 0) { retv = getMSC().getTableColumnStatistics(dbName, tableName, colNames, tableSnapshot != null ? tableSnapshot.getTxnId() : -1, @@ -4502,7 +4466,7 @@ public boolean setPartitionColumnStatistics( try { if (checkTransactional) { Table tbl = getTable(dbName, tableName); - AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, true); txnId = tableSnapshot != null ? tableSnapshot.getTxnId() : -1; writeIdList = tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null; } @@ -4527,7 +4491,7 @@ public AggrStats getAggrColStatsFor(String dbName, String tblName, try { if (checkTransactional) { Table tbl = getTable(dbName, tblName); - AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, true); txnId = tableSnapshot != null ? tableSnapshot.getTxnId() : -1; writeIdList = tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null; } @@ -5332,22 +5296,22 @@ public StorageHandlerInfo getStorageHandlerInfo(Table table) } private void setTableSnapshotForTransactionalTable( - HiveConf conf, Table newTbl) - throws LockException { - + HiveConf conf, Table newTbl, boolean isInTxnScope) throws LockException { org.apache.hadoop.hive.metastore.api.Table newTTbl = newTbl.getTTable(); - AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, newTbl); + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot( + conf, newTbl, isInTxnScope); newTTbl.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : -1); newTTbl.setValidWriteIdList( tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); } - private void setTableSnapshotForTransactionalPartition(HiveConf conf, Partition partition) - throws LockException { - - AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, partition.getTable()); + private void setTableSnapshotForTransactionalPartition( + HiveConf conf, Partition partition, boolean isInTxnScope) throws LockException { + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot( + conf, partition.getTable(), isInTxnScope); org.apache.hadoop.hive.metastore.api.Partition tpartition = partition.getTPartition(); + tpartition.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : -1); tpartition.setValidWriteIdList( tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java index 18a27c4172..f44280b7be 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java @@ -383,7 +383,7 @@ else if (getGbyKeyType(cgbyOp) == GbyKeyType.CONSTANT && rowCnt == 0) { List oneRow = new ArrayList(); AcidUtils.TableSnapshot tableSnapshot = - AcidUtils.getTableSnapshot(pctx.getConf(), tbl); + AcidUtils.getTableSnapshot(pctx.getConf(), tbl, true); for (AggregationDesc aggr : pgbyOp.getConf().getAggregators()) { if (aggr.getDistinct()) { @@ -919,7 +919,7 @@ private ColumnStatisticsData validateSingleColStat(List sta partNames.add(part.getName()); } AcidUtils.TableSnapshot tableSnapshot = - AcidUtils.getTableSnapshot(hive.getConf(), tbl); + AcidUtils.getTableSnapshot(hive.getConf(), tbl, true); Map> result = hive.getMSC().getPartitionColumnStatistics( tbl.getDbName(), tbl.getTableName(), partNames, Lists.newArrayList(colName), diff --git ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index 930282d73e..63a4a9581f 100755 --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -600,7 +600,7 @@ public void testAutoPurgeTablesAndPartitions() throws Throwable { Table table = createPartitionedTable(dbName, tableName); table.getParameters().put("auto.purge", "true"); - hm.alterTable(tableName, table, null); + hm.alterTable(tableName, table, null, false); Map partitionSpec = new ImmutableMap.Builder() .put("ds", "20141216")