commit 5da610cd7e7760eab34f3fa866c0e169d25bfcc0 Author: Vihang Karajgaonkar Date: Thu Apr 27 11:15:27 2017 -0700 HIVE-16143 : Improve msck repair batching diff --git a/common/src/java/org/apache/hive/common/util/RetryUtilities.java b/common/src/java/org/apache/hive/common/util/RetryUtilities.java new file mode 100644 index 0000000000000000000000000000000000000000..df9658733770b9dd7b01cc693c0f3b4f96a4ab1d --- /dev/null +++ b/common/src/java/org/apache/hive/common/util/RetryUtilities.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hive.common.util; + +import java.util.concurrent.Callable; + +import jline.internal.Log; + +public class RetryUtilities { + public static class RetryException extends Exception { + private static final long serialVersionUID = 1L; + + public RetryException(Exception ex) { + super(ex); + } + + public RetryException(String msg) { + super(msg); + } + } + + public static interface ExponentialBackOffRetry { + public T execute(int batchSize) throws Exception; + } + + public static abstract class ExponentiallyDecayingBatchWork + implements ExponentialBackOffRetry, Callable { + private int batchSize; + private int decayingFactor; + + public ExponentiallyDecayingBatchWork(int batchSize, int reducingFactor) { + if (batchSize <= 0) { + throw new IllegalArgumentException(String.format( + "Invalid batch size %d provided. Batch size must be greater than 0", batchSize)); + } + this.batchSize = batchSize; + if (reducingFactor <= 1) { + throw new IllegalArgumentException(String.format( + "Invalid decaying factor %d provided. Decaying factor must be greater than 1", + batchSize)); + } + this.decayingFactor = reducingFactor; + } + + @Override + public T call() throws Exception { + while (true) { + int size = getNextBatchSize(); + if (size == 0) { + throw new RetryException("Maximum number of retry attempts exhausted"); + } + try { + return execute(size); + } catch (Exception ex) { + Log.warn(String.format("Exception thrown while processing using a batch size %d", size), + ex); + } + } + } + + private int getNextBatchSize() { + int ret = batchSize; + batchSize /= decayingFactor; + return ret; + } + } +} diff --git a/common/src/test/org/apache/hive/common/util/TestRetryUtilities.java b/common/src/test/org/apache/hive/common/util/TestRetryUtilities.java new file mode 100644 index 0000000000000000000000000000000000000000..38479d3507a37a0e5a10ab5218c63d4bd1db237a --- /dev/null +++ b/common/src/test/org/apache/hive/common/util/TestRetryUtilities.java @@ -0,0 +1,124 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hive.common.util; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hive.common.util.RetryUtilities.ExponentiallyDecayingBatchWork; +import org.apache.hive.common.util.RetryUtilities.RetryException; +import org.junit.Assert; +import org.junit.Test; + +public class TestRetryUtilities { + + private class DummyExponentiallyDecayingBatchWork extends ExponentiallyDecayingBatchWork { + public DummyExponentiallyDecayingBatchWork(int batchSize, int reducingFactor, + int throwException) { + super(batchSize, reducingFactor); + this.exceptionCount = throwException; + } + + final List batchSizes = new ArrayList<>(); + int exceptionCount = 0; + + @Override + public Void execute(int size) throws Exception { + batchSizes.add(size); + if (exceptionCount > 0) { + exceptionCount--; + throw new Exception("Dummy exception"); + } + return null; + } + + public int getCount() { + return batchSizes.size(); + } + + public int[] getBatchSizes() { + int[] ret = new int[batchSizes.size()]; + int i = 0; + for (int b : batchSizes) { + ret[i++] = b; + } + return ret; + } + } + + @Test(expected = IllegalArgumentException.class) + public void testZeroBatchSize() { + new DummyExponentiallyDecayingBatchWork(0, 2, 0); + } + + @Test(expected = IllegalArgumentException.class) + public void testNegativeBatchSize() { + new DummyExponentiallyDecayingBatchWork(-1, 2, 0); + } + + @Test(expected = IllegalArgumentException.class) + public void testZeroDecayingFactor() { + new DummyExponentiallyDecayingBatchWork(5, 0, 0); + } + + @Test(expected = IllegalArgumentException.class) + public void testOneDecayingFactor() { + new DummyExponentiallyDecayingBatchWork(10, 1, 0); + } + + @Test + public void testNumberOfAttempts() throws Exception { + // test perfectly divisible batchsize and decaying factor + DummyExponentiallyDecayingBatchWork dummy = new DummyExponentiallyDecayingBatchWork(10, 2, 0); + dummy.call(); + Assert.assertEquals("Unexpected number of executions of execute method", 1, dummy.getCount()); + // there were no exception. Batchsize doesn't change until there is an exception + Assert.assertArrayEquals(new int[] { 10 }, dummy.getBatchSizes()); + // test batchsize is not divisible by decaying factor + dummy = new DummyExponentiallyDecayingBatchWork(11, 2, 0); + dummy.call(); + Assert.assertEquals("Unexpected number of executions of execute method", 1, dummy.getCount()); + // there were no exception. Batchsize doesn't change until there is an exception + Assert.assertArrayEquals(new int[] { 11 }, dummy.getBatchSizes()); + + dummy = new DummyExponentiallyDecayingBatchWork(11, 3, 1); + // batches will be sized 11,3 + dummy.call(); + Assert.assertEquals("Unexpected number of executions of execute method", 2, dummy.getCount()); + Assert.assertArrayEquals(new int[] { 11, 3 }, dummy.getBatchSizes()); + + dummy = new DummyExponentiallyDecayingBatchWork(11, 3, 2); + // batches will be sized 11,3,1 + dummy.call(); + Assert.assertEquals("Unexpected number of executions of execute method", 3, dummy.getCount()); + Assert.assertArrayEquals(new int[] { 11, 3, 1 }, dummy.getBatchSizes()); + + dummy = new DummyExponentiallyDecayingBatchWork(12, 3, 2); + // batches will be sized 12,4,1 + dummy.call(); + Assert.assertEquals("Unexpected number of executions of execute method", 3, dummy.getCount()); + Assert.assertArrayEquals(new int[] { 12, 4, 1 }, dummy.getBatchSizes()); + } + + @Test(expected = RetryException.class) + public void testRetriesExhausted() throws Exception { + // attempts at execute will be made using batchsizes 11, 3, 1, throws retry exception + DummyExponentiallyDecayingBatchWork dummy = new DummyExponentiallyDecayingBatchWork(11, 3, 3); + dummy.call(); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 917e565f28b2c9aaea18033ea3b6b20fa41fcd0a..8dc94ceea9b73c4f5b9eb8c2ef3fde613138980c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -248,10 +248,12 @@ import org.apache.hive.common.util.AnnotationUtils; import org.apache.hive.common.util.HiveStringUtils; import org.apache.hive.common.util.ReflectionUtil; +import org.apache.hive.common.util.RetryUtilities; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.stringtemplate.v4.ST; +import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Iterables; /** @@ -1987,34 +1989,21 @@ private int msck(Hive db, MsckDesc msckDesc) { } } Table table = db.getTable(msckDesc.getTableName()); - AddPartitionDesc apd = new AddPartitionDesc( - table.getDbName(), table.getTableName(), false); + int batchSize = conf.getIntVar(ConfVars.HIVE_MSCK_REPAIR_BATCH_SIZE); + int decayingFactor = 2; + if (batchSize == 0) { + //batching is not enabled. Try to add all the partitions in one call with no + //retries. + batchSize = partsNotInMs.size(); + //if the decayingFactor is greater than batchSize only one attempt will be + //made by ExponentiallyDecayingBatchWork below + decayingFactor = batchSize + 1; + } try { - int batch_size = conf.getIntVar(ConfVars.HIVE_MSCK_REPAIR_BATCH_SIZE); - if (batch_size > 0 && partsNotInMs.size() > batch_size) { - int counter = 0; - for (CheckResult.PartitionResult part : partsNotInMs) { - counter++; - apd.addPartition(Warehouse.makeSpecFromName(part.getPartitionName()), null); - repairOutput.add("Repair: Added partition to metastore " + msckDesc.getTableName() - + ':' + part.getPartitionName()); - if (counter % batch_size == 0 || counter == partsNotInMs.size()) { - db.createPartitions(apd); - apd = new AddPartitionDesc(table.getDbName(), table.getTableName(), false); - } - } - } else { - for (CheckResult.PartitionResult part : partsNotInMs) { - apd.addPartition(Warehouse.makeSpecFromName(part.getPartitionName()), null); - repairOutput.add("Repair: Added partition to metastore " + msckDesc.getTableName() - + ':' + part.getPartitionName()); - } - db.createPartitions(apd); - } + createPartitionsInBatches(db, repairOutput, partsNotInMs, table, batchSize, + decayingFactor); } catch (Exception e) { - LOG.info("Could not bulk-add partitions to metastore; trying one by one", e); - repairOutput.clear(); - msckAddPartitionsOneByOne(db, table, partsNotInMs, repairOutput); + throw new HiveException(e); } } } catch (HiveException e) { @@ -2066,6 +2055,44 @@ private int msck(Hive db, MsckDesc msckDesc) { return 0; } + @VisibleForTesting + void createPartitionsInBatches(Hive db, List repairOutput, + Set partsNotInMs, Table table, int batchSize, int decayingFactor) + throws Exception { + String addMsgFormat = "Repair: Added partition to metastore " + + table.getTableName() + ":%s"; + Set batchWork = new HashSet<>(partsNotInMs); + new RetryUtilities.ExponentiallyDecayingBatchWork(batchSize, decayingFactor) { + @Override + public Void execute(int size) throws Exception { + while (!batchWork.isEmpty()) { + //get the current batch size + int currentBatchSize = size; + AddPartitionDesc apd = + new AddPartitionDesc(table.getDbName(), table.getTableName(), false); + //store the partitions temporarily until processed + List lastBatch = new ArrayList<>(currentBatchSize); + List addMsgs = new ArrayList<>(currentBatchSize); + //add the number of partitions given by the current batchsize + for (CheckResult.PartitionResult part : batchWork) { + if (currentBatchSize == 0) { + break; + } + apd.addPartition(Warehouse.makeSpecFromName(part.getPartitionName()), null); + lastBatch.add(part); + addMsgs.add(String.format(addMsgFormat, part.getPartitionName())); + currentBatchSize--; + } + db.createPartitions(apd); + // if last batch is successful remove it from partsNotInMs + batchWork.removeAll(lastBatch); + repairOutput.addAll(addMsgs); + } + return null; + } + }.call(); + } + /** * Write the result of msck to a writer. * diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java new file mode 100644 index 0000000000000000000000000000000000000000..d9b3b2012d59bf107c650400dde78221a94edcf0 --- /dev/null +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java @@ -0,0 +1,279 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec; + +import static org.junit.Assert.fail; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; +import org.apache.hadoop.hive.ql.metadata.CheckResult.PartitionResult; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.mapred.TextInputFormat; +import org.apache.hadoop.util.StringUtils; +import org.apache.hive.common.util.RetryUtilities.RetryException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; + +public class TestMsckCreatePartitionsInBatches { + private static HiveConf hiveConf; + private static DDLTask ddlTask; + private final String tableName = "test_msck_batch"; + private static Hive db; + private List repairOutput; + private Table table; + + @BeforeClass + public static void setupClass() throws HiveException { + hiveConf = new HiveConf(TestMsckCreatePartitionsInBatches.class); + hiveConf.setIntVar(ConfVars.HIVE_MSCK_REPAIR_BATCH_SIZE, 5); + hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, + "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); + SessionState.start(hiveConf); + db = Hive.get(hiveConf); + ddlTask = new DDLTask(); + } + + @Before + public void before() throws Exception { + createPartitionedTable("default", tableName); + table = db.getTable(tableName); + repairOutput = new ArrayList(); + } + + @After + public void after() throws Exception { + cleanUpTableQuietly("default", tableName); + } + + private Table createPartitionedTable(String dbName, String tableName) throws Exception { + try { + db.dropTable(dbName, tableName); + db.createTable(tableName, Arrays.asList("key", "value"), // Data columns. + Arrays.asList("city"), // Partition columns. + TextInputFormat.class, HiveIgnoreKeyTextOutputFormat.class); + return db.getTable(dbName, tableName); + } catch (Exception exception) { + fail("Unable to drop and create table " + dbName + "." + tableName + " because " + + StringUtils.stringifyException(exception)); + throw exception; + } + } + + private void cleanUpTableQuietly(String dbName, String tableName) { + try { + db.dropTable(dbName, tableName, true, true, true); + } catch (Exception exception) { + fail("Unexpected exception: " + StringUtils.stringifyException(exception)); + } + } + + private Set createPartsNotInMs(int numOfParts) { + Set partsNotInMs = new HashSet<>(); + for (int i = 0; i < numOfParts; i++) { + PartitionResult result = new PartitionResult(); + result.setPartitionName("city=dummyCity_" + String.valueOf(i)); + partsNotInMs.add(result); + } + return partsNotInMs; + } + + /** + * Tests the number of times Hive.createPartitions calls are executed with total number of + * partitions to be added are equally divisible by batch size + * + * @throws Exception + */ + @Test + public void testNumberOfCreatePartitionCalls() throws Exception { + // create 10 dummy partitions + Set partsNotInMs = createPartsNotInMs(10); + Hive spyDb = Mockito.spy(db); + // batch size of 5 and decaying factor of 2 + ddlTask.createPartitionsInBatches(spyDb, repairOutput, partsNotInMs, table, 5, 2); + // there should be 2 calls to create partitions with each batch size of 5 + ArgumentCaptor argument = ArgumentCaptor.forClass(AddPartitionDesc.class); + Mockito.verify(spyDb, Mockito.times(2)).createPartitions(argument.capture()); + // confirm the batch sizes were 5, 5 in the two calls to create partitions + List apds = argument.getAllValues(); + int retryAttempt = 1; + Assert.assertEquals(String.format("Unexpected batch size in retry attempt %d ", retryAttempt++), + 5, apds.get(0).getPartitionCount()); + Assert.assertEquals(String.format("Unexpected batch size in retry attempt %d ", retryAttempt++), + 5, apds.get(1).getPartitionCount()); + } + + /** + * Tests the number of times Hive.createPartitions calls are executed with total number of + * partitions to be added are not exactly divisible by batch size + * + * @throws Exception + */ + @Test + public void testUnevenNumberOfCreatePartitionCalls() throws Exception { + // create 9 dummy partitions + Set partsNotInMs = createPartsNotInMs(9); + Hive spyDb = Mockito.spy(db); + // batch size of 5 and decaying factor of 2 + ddlTask.createPartitionsInBatches(spyDb, repairOutput, partsNotInMs, table, 5, 2); + // there should be 2 calls to create partitions with batch sizes of 5, 4 + ArgumentCaptor argument = ArgumentCaptor.forClass(AddPartitionDesc.class); + Mockito.verify(spyDb, Mockito.times(2)).createPartitions(argument.capture()); + // confirm the batch sizes were 5, 4 in the two calls to create partitions + List apds = argument.getAllValues(); + int retryAttempt = 1; + Assert.assertEquals(String.format("Unexpected batch size in retry attempt %d ", retryAttempt++), + 5, apds.get(0).getPartitionCount()); + Assert.assertEquals(String.format("Unexpected batch size in retry attempt %d ", retryAttempt++), + 4, apds.get(1).getPartitionCount()); + } + + /** + * Tests the number of times Hive.createPartitions calls are executed with total number of + * partitions exactly equal to batch size + * + * @throws Exception + */ + @Test + public void testEqualNumberOfPartitions() throws Exception { + // create 13 dummy partitions + Set partsNotInMs = createPartsNotInMs(13); + Hive spyDb = Mockito.spy(db); + // batch size of 13 and decaying factor of 2 + ddlTask.createPartitionsInBatches(spyDb, repairOutput, partsNotInMs, table, 13, 2); + // there should be 1 call to create partitions with batch sizes of 13 + ArgumentCaptor argument = ArgumentCaptor.forClass(AddPartitionDesc.class); + Mockito.verify(spyDb, Mockito.times(1)).createPartitions(argument.capture()); + Assert.assertEquals("Unexpected number of batch size", 13, + argument.getValue().getPartitionCount()); + } + + /** + * Tests the number of times Hive.createPartitions calls are executed with total number of + * partitions to is less than batch size + * + * @throws Exception + */ + @Test + public void testSmallNumberOfPartitions() throws Exception { + // create 10 dummy partitions + Set partsNotInMs = createPartsNotInMs(10); + Hive spyDb = Mockito.spy(db); + // batch size of 20 and decaying factor of 2 + ddlTask.createPartitionsInBatches(spyDb, repairOutput, partsNotInMs, table, 20, 2); + // there should be 1 call to create partitions with batch sizes of 10 + Mockito.verify(spyDb, Mockito.times(1)).createPartitions(Mockito.anyObject()); + ArgumentCaptor argument = ArgumentCaptor.forClass(AddPartitionDesc.class); + Mockito.verify(spyDb).createPartitions(argument.capture()); + Assert.assertEquals("Unexpected number of batch size", 10, + argument.getValue().getPartitionCount()); + } + + /** + * Tests the number of calls to createPartitions and the respective batch sizes when first call to + * createPartitions throws HiveException. The batch size should be reduced by the decayingFactor + * + * @throws Exception + */ + @Test + public void testBatchingWhenException() throws Exception { + // create 13 dummy partitions + Set partsNotInMs = createPartsNotInMs(23); + Hive spyDb = Mockito.spy(db); + // first call to createPartitions should throw exception + Mockito.doThrow(HiveException.class).doCallRealMethod().doCallRealMethod().when(spyDb) + .createPartitions(Mockito.any(AddPartitionDesc.class)); + + // test with a batch size of 30 and decaying factor of 2 + ddlTask.createPartitionsInBatches(spyDb, repairOutput, partsNotInMs, table, 30, 2); + // confirm the batch sizes were 23, 15, 8 in the three calls to create partitions + ArgumentCaptor argument = ArgumentCaptor.forClass(AddPartitionDesc.class); + // there should be 3 calls to create partitions with batch sizes of 23, 15, 8 + Mockito.verify(spyDb, Mockito.times(3)).createPartitions(argument.capture()); + List apds = argument.getAllValues(); + int retryAttempt = 1; + Assert.assertEquals( + String.format("Unexpected batch size in retry attempt %d ", retryAttempt++), 23, + apds.get(0).getPartitionCount()); + Assert.assertEquals( + String.format("Unexpected batch size in retry attempt %d ", retryAttempt++), 15, + apds.get(1).getPartitionCount()); + Assert.assertEquals( + String.format("Unexpected batch size in retry attempt %d ", retryAttempt++), 8, + apds.get(2).getPartitionCount()); + } + + /** + * Tests the retries exhausted case when Hive.createPartitions method call always keep throwing + * HiveException. The batch sizes should exponentially decreased based on the decaying factor and + * ultimately give up when it reaches 0 + * + * @throws Exception + */ + @Test + public void testRetriesExhausted() throws Exception { + Set partsNotInMs = createPartsNotInMs(17); + Hive spyDb = Mockito.spy(db); + Mockito.doThrow(HiveException.class).when(spyDb) + .createPartitions(Mockito.any(AddPartitionDesc.class)); + // batch size of 5 and decaying factor of 2 + Exception ex = null; + try { + ddlTask.createPartitionsInBatches(spyDb, repairOutput, partsNotInMs, table, 30, 2); + } catch (Exception retryEx) { + ex = retryEx; + } + Assert.assertFalse("Exception was expected but was not thrown", ex == null); + Assert.assertTrue("Unexpected class of exception thrown", ex instanceof RetryException); + // there should be 5 calls to create partitions with batch sizes of 17, 15, 7, 3, 1 + ArgumentCaptor argument = ArgumentCaptor.forClass(AddPartitionDesc.class); + Mockito.verify(spyDb, Mockito.times(5)).createPartitions(argument.capture()); + List apds = argument.getAllValues(); + int retryAttempt = 1; + Assert.assertEquals( + String.format("Unexpected batch size in retry attempt %d ", retryAttempt++), 17, + apds.get(0).getPartitionCount()); + Assert.assertEquals( + String.format("Unexpected batch size in retry attempt %d ", retryAttempt++), 15, + apds.get(1).getPartitionCount()); + Assert.assertEquals( + String.format("Unexpected batch size in retry attempt %d ", retryAttempt++), 7, + apds.get(2).getPartitionCount()); + Assert.assertEquals( + String.format("Unexpected batch size in retry attempt %d ", retryAttempt++), 3, + apds.get(3).getPartitionCount()); + Assert.assertEquals( + String.format("Unexpected batch size in retry attempt %d ", retryAttempt++), 1, + apds.get(4).getPartitionCount()); + } +} diff --git a/ql/src/test/results/clientpositive/msck_repair_0.q.out b/ql/src/test/results/clientpositive/msck_repair_0.q.out index 2e0d9dc423071ebbd9a55606f196cf7752e27b1a..d60a07cf293249f5a6b14bf459ec9eeac3499f63 100644 --- a/ql/src/test/results/clientpositive/msck_repair_0.q.out +++ b/ql/src/test/results/clientpositive/msck_repair_0.q.out @@ -30,7 +30,7 @@ POSTHOOK: query: MSCK REPAIR TABLE default.repairtable POSTHOOK: type: MSCK POSTHOOK: Output: default@repairtable Partitions not in metastore: repairtable:p1=c/p2=a -Repair: Added partition to metastore default.repairtable:p1=c/p2=a +Repair: Added partition to metastore repairtable:p1=c/p2=a PREHOOK: query: MSCK TABLE repairtable PREHOOK: type: MSCK PREHOOK: Output: default@repairtable @@ -44,7 +44,7 @@ POSTHOOK: query: MSCK REPAIR TABLE default.repairtable POSTHOOK: type: MSCK POSTHOOK: Output: default@repairtable Partitions not in metastore: repairtable:p1=e/p2=f -Repair: Added partition to metastore default.repairtable:p1=e/p2=f +Repair: Added partition to metastore repairtable:p1=e/p2=f PREHOOK: query: DROP TABLE default.repairtable PREHOOK: type: DROPTABLE PREHOOK: Input: default@repairtable diff --git a/ql/src/test/results/clientpositive/msck_repair_1.q.out b/ql/src/test/results/clientpositive/msck_repair_1.q.out index 3f2fe75b194f1248bd5c073dd7db6b71b2ffc2ba..448d6de8a0484f3001ee65bf9b14dadec66a55a2 100644 --- a/ql/src/test/results/clientpositive/msck_repair_1.q.out +++ b/ql/src/test/results/clientpositive/msck_repair_1.q.out @@ -30,7 +30,7 @@ POSTHOOK: query: MSCK REPAIR TABLE default.repairtable POSTHOOK: type: MSCK POSTHOOK: Output: default@repairtable Partitions not in metastore: repairtable:p1=c/p2=a -Repair: Added partition to metastore default.repairtable:p1=c/p2=a +Repair: Added partition to metastore repairtable:p1=c/p2=a PREHOOK: query: MSCK TABLE repairtable PREHOOK: type: MSCK PREHOOK: Output: default@repairtable diff --git a/ql/src/test/results/clientpositive/msck_repair_2.q.out b/ql/src/test/results/clientpositive/msck_repair_2.q.out index 3f2fe75b194f1248bd5c073dd7db6b71b2ffc2ba..448d6de8a0484f3001ee65bf9b14dadec66a55a2 100644 --- a/ql/src/test/results/clientpositive/msck_repair_2.q.out +++ b/ql/src/test/results/clientpositive/msck_repair_2.q.out @@ -30,7 +30,7 @@ POSTHOOK: query: MSCK REPAIR TABLE default.repairtable POSTHOOK: type: MSCK POSTHOOK: Output: default@repairtable Partitions not in metastore: repairtable:p1=c/p2=a -Repair: Added partition to metastore default.repairtable:p1=c/p2=a +Repair: Added partition to metastore repairtable:p1=c/p2=a PREHOOK: query: MSCK TABLE repairtable PREHOOK: type: MSCK PREHOOK: Output: default@repairtable diff --git a/ql/src/test/results/clientpositive/msck_repair_3.q.out b/ql/src/test/results/clientpositive/msck_repair_3.q.out index 3f2fe75b194f1248bd5c073dd7db6b71b2ffc2ba..448d6de8a0484f3001ee65bf9b14dadec66a55a2 100644 --- a/ql/src/test/results/clientpositive/msck_repair_3.q.out +++ b/ql/src/test/results/clientpositive/msck_repair_3.q.out @@ -30,7 +30,7 @@ POSTHOOK: query: MSCK REPAIR TABLE default.repairtable POSTHOOK: type: MSCK POSTHOOK: Output: default@repairtable Partitions not in metastore: repairtable:p1=c/p2=a -Repair: Added partition to metastore default.repairtable:p1=c/p2=a +Repair: Added partition to metastore repairtable:p1=c/p2=a PREHOOK: query: MSCK TABLE repairtable PREHOOK: type: MSCK PREHOOK: Output: default@repairtable diff --git a/ql/src/test/results/clientpositive/msck_repair_batchsize.q.out b/ql/src/test/results/clientpositive/msck_repair_batchsize.q.out index ba99024163a1f2c59d59e9ed7ea276c154c99d24..cae6505e931c40da90fcaa6d16f1d5d5f1f6f12a 100644 --- a/ql/src/test/results/clientpositive/msck_repair_batchsize.q.out +++ b/ql/src/test/results/clientpositive/msck_repair_batchsize.q.out @@ -30,9 +30,9 @@ POSTHOOK: query: MSCK REPAIR TABLE default.repairtable POSTHOOK: type: MSCK POSTHOOK: Output: default@repairtable Partitions not in metastore: repairtable:p1=a/p2=a repairtable:p1=b/p2=a repairtable:p1=c/p2=a -Repair: Added partition to metastore default.repairtable:p1=a/p2=a -Repair: Added partition to metastore default.repairtable:p1=b/p2=a -Repair: Added partition to metastore default.repairtable:p1=c/p2=a +Repair: Added partition to metastore repairtable:p1=c/p2=a +Repair: Added partition to metastore repairtable:p1=b/p2=a +Repair: Added partition to metastore repairtable:p1=a/p2=a PREHOOK: query: MSCK TABLE repairtable PREHOOK: type: MSCK PREHOOK: Output: default@repairtable diff --git a/ql/src/test/results/clientpositive/repair.q.out b/ql/src/test/results/clientpositive/repair.q.out index c1834640a35500c521a904a115a718c94546df10..6e072a2ec8cc7a80b184f29c8cc91941e537731d 100644 --- a/ql/src/test/results/clientpositive/repair.q.out +++ b/ql/src/test/results/clientpositive/repair.q.out @@ -30,8 +30,8 @@ POSTHOOK: query: MSCK REPAIR TABLE default.repairtable POSTHOOK: type: MSCK POSTHOOK: Output: default@repairtable Partitions not in metastore: repairtable:p1=a/p2=a repairtable:p1=b/p2=a -Repair: Added partition to metastore default.repairtable:p1=a/p2=a -Repair: Added partition to metastore default.repairtable:p1=b/p2=a +Repair: Added partition to metastore repairtable:p1=a/p2=a +Repair: Added partition to metastore repairtable:p1=b/p2=a PREHOOK: query: MSCK TABLE repairtable PREHOOK: type: MSCK PREHOOK: Output: default@repairtable