diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java index 4f5cbbb019..8df5fc81d3 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java @@ -1387,27 +1387,28 @@ public void testBootstrapReplLoadRetryAfterFailureForPartitions() throws Throwab // Inject a behavior where REPL LOAD failed when try to load table "t2" and partition "uk". // So, table "t2" will exist and partition "india" will exist, rest failed as operation failed. - BehaviourInjection getPartitionStub - = new BehaviourInjection() { - @Nullable + BehaviourInjection, Boolean> alterPartitionStub + = new BehaviourInjection, Boolean>() { @Override - public Partition apply(@Nullable Partition ptn) { - if (ptn.getValues().get(0).equals("india")) { - injectionPathCalled = true; - LOG.warn("####getPartition Stub called"); - return null; + public Boolean apply(List ptns) { + for (Partition ptn : ptns) { + if (ptn.getValues().get(0).equals("india")) { + injectionPathCalled = true; + LOG.warn("####getPartition Stub called"); + return false; + } } - return ptn; + return true; } }; - InjectableBehaviourObjectStore.setGetPartitionBehaviour(getPartitionStub); + InjectableBehaviourObjectStore.setAlterPartitionsBehaviour(alterPartitionStub); // Make sure that there's some order in which the objects are loaded. List withConfigs = Arrays.asList("'hive.repl.approx.max.load.tasks'='1'", "'hive.in.repl.test.files.sorted'='true'"); replica.loadFailure(replicatedDbName, tuple.dumpLocation, withConfigs); - InjectableBehaviourObjectStore.resetGetPartitionBehaviour(); // reset the behaviour - getPartitionStub.assertInjectionsPerformed(true, false); + InjectableBehaviourObjectStore.setAlterPartitionsBehaviour(null); // reset the behaviour + alterPartitionStub.assertInjectionsPerformed(true, false); replica.run("use " + replicatedDbName) .run("repl status " + replicatedDbName) diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java index f2ea30bf36..7b28405c5c 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java @@ -185,7 +185,7 @@ public void tearDown() { */ @Test public void schemaEvolutionAddColDynamicPartitioningInsert() throws Exception { - String tblName = "dpct"; + String tblName = "sedpct"; executeStatementOnDriver("drop table if exists " + tblName, driver); executeStatementOnDriver("CREATE TABLE " + tblName + "(a INT, b STRING) " + " PARTITIONED BY(ds string)" + @@ -253,7 +253,7 @@ public void schemaEvolutionAddColDynamicPartitioningInsert() throws Exception { @Test public void schemaEvolutionAddColDynamicPartitioningUpdate() throws Exception { - String tblName = "udpct"; + String tblName = "acdpct"; executeStatementOnDriver("drop table if exists " + tblName, driver); executeStatementOnDriver("CREATE TABLE " + tblName + "(a INT, b STRING) " + " PARTITIONED BY(ds string)" + @@ -527,7 +527,7 @@ private void testStatsAfterCompactionPartTbl(boolean newStreamingAPI) throws Exc @Test public void dynamicPartitioningInsert() throws Exception { - String tblName = "dpct"; + String tblName = "idpct"; executeStatementOnDriver("drop table if exists " + tblName, driver); executeStatementOnDriver("CREATE TABLE " + tblName + "(a INT, b STRING) " + " PARTITIONED BY(ds string)" + diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 213dd9eb5e..475f126723 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -61,6 +61,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Iterator; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -732,6 +733,27 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str environmentContext, null, -1, null); } + private List getExistingPartitions(final RawStore msdb, + final List new_parts, final Table tbl, final String catName, + final String dbname, final String name) + throws MetaException, NoSuchObjectException, InvalidOperationException { + + // Get list of partition values + List partValues = new LinkedList<>(); + for (Partition tmpPart : new_parts) { + partValues.add(Warehouse.makePartName(tbl.getPartitionKeys(), tmpPart.getValues())); + } + + // Get existing partitions from store + List oldParts = msdb.getPartitionsByNames(catName, dbname, name, partValues); + if (new_parts.size() != oldParts.size()) { + throw new InvalidOperationException("Alter partition operation failed: " + + "new parts size " + new_parts.size() + + " not matching with old parts size " + oldParts.size()); + } + return oldParts; + } + @Override public List alterPartitions(final RawStore msdb, Warehouse wh, final String catName, final String dbname, final String name, @@ -740,7 +762,8 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str String writeIdList, long writeId, IHMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { - List oldParts = new ArrayList<>(); + LOG.info("Starting alterPartitions"); + List oldParts = null; List> partValsList = new ArrayList<>(); List transactionalListeners = null; if (handler != null) { @@ -761,6 +784,15 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str blockPartitionLocationChangesOnReplSource(msdb.getDatabase(catName, dbname), tbl, environmentContext); + oldParts = getExistingPartitions(msdb, new_parts, tbl, + catName, dbname, name); + LOG.info("Got existing partitions"); + + Map oldPartMap = new HashMap<>(); + for (Partition oldPart : oldParts) { + oldPartMap.put(oldPart.getValues().toString(), oldPart); + } + for (Partition tmpPart: new_parts) { // Set DDL time to now if not specified if (tmpPart.getParameters() == null || @@ -770,8 +802,8 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str .currentTimeMillis() / 1000)); } - Partition oldTmpPart = msdb.getPartition(catName, dbname, name, tmpPart.getValues()); - oldParts.add(oldTmpPart); + //Partition oldTmpPart1 = msdb.getPartition(catName, dbname, name, tmpPart.getValues()); + Partition oldTmpPart = oldPartMap.get(tmpPart.getValues().toString()); partValsList.add(tmpPart.getValues()); if (MetaStoreServerUtils.requireCalStats(oldTmpPart, tmpPart, tbl, environmentContext)) { @@ -791,13 +823,11 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str } } + LOG.info("update partitions"); msdb.alterPartitions(catName, dbname, name, partValsList, new_parts, writeId, writeIdList); - Iterator oldPartsIt = oldParts.iterator(); for (Partition newPart : new_parts) { - Partition oldPart; - if (oldPartsIt.hasNext()) { - oldPart = oldPartsIt.next(); - } else { + Partition oldPart = oldPartMap.get(newPart.getValues().toString()); + if (oldPart == null) { throw new InvalidOperationException("Missing old partition corresponding to new partition " + "when invoking MetaStoreEventListener for alterPartitions event."); } @@ -818,6 +848,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str } } + LOG.info("Finished alterPartitions"); return oldParts; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index fb9d87006c..da4be6b31f 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -4286,12 +4286,21 @@ public void updateCreationMetadata(String catName, String dbname, String tablena * @param newPart Partition object containing new information */ private Partition alterPartitionNoTxn(String catName, String dbname, String name, - List part_vals, Partition newPart, String validWriteIds, Ref oldCd) + List part_vals, Partition newPart, String validWriteIds, Ref oldCd) + throws InvalidObjectException, MetaException { + MTable table = this.getMTable(newPart.getCatName(), newPart.getDbName(), newPart.getTableName()); + return alterPartitionNoTxn(catName, dbname, name, part_vals, newPart, + validWriteIds, oldCd, table); + } + + private Partition alterPartitionNoTxn(String catName, String dbname, + String name, List part_vals, Partition newPart, + String validWriteIds, + Ref oldCd, MTable table) throws InvalidObjectException, MetaException { catName = normalizeIdentifier(catName); name = normalizeIdentifier(name); dbname = normalizeIdentifier(dbname); - MTable table = this.getMTable(newPart.getCatName(), newPart.getDbName(), newPart.getTableName()); MPartition oldp = getMPartition(catName, dbname, name, part_vals); MPartition newp = convertToMPart(newPart, table, false); MColumnDescriptor oldCD = null; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java index 6c7fe116cc..8673186389 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java @@ -91,6 +91,8 @@ public CallerArguments(String dbName) { private static com.google.common.base.Function getCurrNotiEventIdModifier = null; + private static com.google.common.base.Function, Boolean> alterPartitionsModifier = null; + // Methods to set/reset getTable modifier public static void setGetTableBehaviour(com.google.common.base.Function modifier){ getTableModifier = (modifier == null) ? com.google.common.base.Functions.identity() : modifier; @@ -152,6 +154,11 @@ public static void resetAlterTableModifier() { setAlterTableModifier(null); } + public static void setAlterPartitionsBehaviour(com.google.common.base.Function, Boolean> modifier){ + alterPartitionsModifier = modifier; + } + + // ObjectStore methods to be overridden with injected behavior @Override public Table getTable(String catName, String dbName, String tableName) throws MetaException { @@ -295,4 +302,19 @@ public CurrentNotificationEventId getCurrentNotificationEventId() { } return id; } + + @Override + public List alterPartitions(String catName, String dbname, String name, + List> part_vals, List newParts, + long writeId, String queryWriteIdList) + throws InvalidObjectException, MetaException { + if (alterPartitionsModifier != null) { + Boolean success = alterPartitionsModifier.apply(newParts); + if ((success != null) && !success) { + throw new MetaException("InjectableBehaviourObjectStore: Invalid alterPartitions operation on Catalog : " + + catName + " DB: " + dbname + " table: " + name); + } + } + return super.alterPartitions(catName, dbname, name, part_vals, newParts, writeId, queryWriteIdList); + } }