diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java index dc7b2877bf..0c123e9796 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java @@ -142,12 +142,14 @@ public void setup() throws Exception { hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + TxnDbUtil.setConfValues(hiveConf); TxnDbUtil.cleanDb(hiveConf); TxnDbUtil.prepDb(hiveConf); conf = hiveConf; HiveConf.setBoolVar(conf, ConfVars.HIVE_MM_ALLOW_ORIGINALS, true); + HiveConf.setIntVar(conf, ConfVars.HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD, 0); msClient = new HiveMetaStoreClient(conf); driver = DriverFactory.newDriver(hiveConf); SessionState.start(new CliSessionState(hiveConf)); @@ -793,6 +795,271 @@ private void majorCompactAfterAbort(boolean newStreamingAPI) throws Exception { checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 1); } + @Test + public void testCleanAbortCompactAfterAbortTwoPartitions() throws Exception { + String dbName = "default"; + String tblName = "cws"; + + HiveStreamingConnection connection1 = prepareTableTwoPartitionsAndConnection(dbName, tblName, 1); + HiveStreamingConnection connection2 = prepareTableTwoPartitionsAndConnection(dbName, tblName, 1); + + connection1.beginTransaction(); + connection1.write("1,1".getBytes()); + connection1.write("2,2".getBytes()); + connection1.abortTransaction(); + + connection2.beginTransaction(); + connection2.write("1,3".getBytes()); + connection2.write("2,3".getBytes()); + connection2.write("3,3".getBytes()); + connection2.abortTransaction(); + + assertAndCompactCleanAbort(dbName, tblName); + + connection1.close(); + connection2.close(); + } + + @Test + public void testCleanAbortCompactAfterAbort() throws Exception { + String dbName = "default"; + String tblName = "cws"; + + // Create three folders with two different transactions + HiveStreamingConnection connection1 = prepareTableAndConnection(dbName, tblName, 1); + HiveStreamingConnection connection2 = prepareTableAndConnection(dbName, tblName, 1); + + connection1.beginTransaction(); + connection1.write("1,1".getBytes()); + connection1.write("2,2".getBytes()); + connection1.abortTransaction(); + + connection2.beginTransaction(); + connection2.write("1,3".getBytes()); + connection2.write("2,3".getBytes()); + connection2.write("3,3".getBytes()); + connection2.abortTransaction(); + + assertAndCompactCleanAbort(dbName, tblName); + + connection1.close(); + connection2.close(); + } + + private void assertAndCompactCleanAbort(String dbName, String tblName) throws Exception { + IMetaStoreClient msClient = new HiveMetaStoreClient(conf); + TxnStore txnHandler = TxnUtils.getTxnStore(conf); + Table table = msClient.getTable(dbName, tblName); + FileSystem fs = FileSystem.get(conf); + FileStatus[] stat = + fs.listStatus(new Path(table.getSd().getLocation())); + if (3 != stat.length) { + Assert.fail("Expecting three directories corresponding to three partitions, FileStatus[] stat " + Arrays.toString(stat)); + } + + int count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_OPERATION_TYPE='p'"); + // We should have two rows corresponding to the two aborted transactions + Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 2, count); + + runInitiator(conf); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_TYPE='p'"); + // Only one job is added to the queue per table. This job corresponds to all the entries for a particular table + // with rows in TXN_COMPONENTS + Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from COMPACTION_QUEUE"), 1, count); + + ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest()); + Assert.assertEquals(1, rsp.getCompacts().size()); + Assert.assertEquals(TxnStore.CLEANING_RESPONSE, rsp.getCompacts().get(0).getState()); + Assert.assertEquals("cws", rsp.getCompacts().get(0).getTablename()); + Assert.assertEquals(CompactionType.CLEAN_ABORTED, + rsp.getCompacts().get(0).getType()); + + runCleaner(conf); + + // After the cleaner runs TXN_COMPONENTS and COMPACTION_QUEUE should have zero rows, also the folders should have been deleted. + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS"); + Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 0, count); + + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE"); + Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from COMPACTION_QUEUE"), 0, count); + + stat = + fs.listStatus(new Path(table.getSd().getLocation())); + if (0 != stat.length) { + Assert.fail("Expecting compaction to have cleaned the directories, FileStatus[] stat " + Arrays.toString(stat)); + } + + rsp = txnHandler.showCompact(new ShowCompactRequest()); + Assert.assertEquals(1, rsp.getCompacts().size()); + Assert.assertEquals(TxnStore.SUCCEEDED_RESPONSE, rsp.getCompacts().get(0).getState()); + Assert.assertEquals("cws", rsp.getCompacts().get(0).getTablename()); + Assert.assertEquals(CompactionType.CLEAN_ABORTED, + rsp.getCompacts().get(0).getType()); + } + + @Test + public void testCleanAbortCompactSeveralTables() throws Exception { + String dbName = "default"; + String tblName1 = "cws1"; + String tblName2 = "cws2"; + + HiveStreamingConnection connection1 = prepareTableAndConnection(dbName, tblName1, 1); + HiveStreamingConnection connection2 = prepareTableAndConnection(dbName, tblName2, 1); + + connection1.beginTransaction(); + connection1.write("1,1".getBytes()); + connection1.write("2,2".getBytes()); + connection1.abortTransaction(); + + connection2.beginTransaction(); + connection2.write("1,1".getBytes()); + connection2.write("2,2".getBytes()); + connection2.abortTransaction(); + + IMetaStoreClient msClient = new HiveMetaStoreClient(conf); + FileSystem fs = FileSystem.get(conf); + Table table1 = msClient.getTable(dbName, tblName1); + FileStatus[] stat = + fs.listStatus(new Path(table1.getSd().getLocation())); + if (2 != stat.length) { + Assert.fail("Expecting two directories corresponding to two partitions, FileStatus[] stat " + Arrays.toString(stat)); + } + Table table2 = msClient.getTable(dbName, tblName2); + stat = fs.listStatus(new Path(table2.getSd().getLocation())); + if (2 != stat.length) { + Assert.fail("Expecting two directories corresponding to two partitions, FileStatus[] stat " + Arrays.toString(stat)); + } + + int count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_OPERATION_TYPE='p'"); + // We should have two rows corresponding to the two aborted transactions + Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 2, count); + + runInitiator(conf); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_TYPE='p'"); + // Only one job is added to the queue per table. This job corresponds to all the entries for a particular table + // with rows in TXN_COMPONENTS + Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from COMPACTION_QUEUE"), 2, count); + + runCleaner(conf); + + // After the cleaner runs TXN_COMPONENTS and COMPACTION_QUEUE should have zero rows, also the folders should have been deleted. + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS"); + Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 0, count); + + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE"); + Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from COMPACTION_QUEUE"), 0, count); + + stat = + fs.listStatus(new Path(table1.getSd().getLocation())); + if (0 != stat.length) { + Assert.fail("Expecting compaction to have cleaned the directories, FileStatus[] stat " + Arrays.toString(stat)); + } + + connection1.close(); + connection2.close(); + } + + @Test + public void testCleanAbortCorrectlyCleaned() throws Exception { + // Test that at commit the tables are cleaned properly + String dbName = "default"; + String tblName = "cws"; + HiveStreamingConnection connection = prepareTableAndConnection(dbName, tblName, 1); + connection.beginTransaction(); + connection.write("1,1".getBytes()); + connection.write("2,2".getBytes()); + + int count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_OPERATION_TYPE='p'"); + // We should have two rows corresponding to the two aborted transactions + Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 1, count); + + connection.commitTransaction(); + + // After commit the row should have been deleted + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_OPERATION_TYPE='p'"); + // We should have two rows corresponding to the two aborted transactions + Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 0, count); + } + + @Test + public void testCleanAbortAndMinorCompact() throws Exception { + String dbName = "default"; + String tblName = "cws"; + + HiveStreamingConnection connection = prepareTableAndConnection(dbName, tblName, 1); + + connection.beginTransaction(); + connection.write("1,1".getBytes()); + connection.write("2,2".getBytes()); + connection.abortTransaction(); + + executeStatementOnDriver("insert into " + tblName + " partition (a) values (1, '1')", driver); + + conf.setIntVar(HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_NUM_THRESHOLD, 0); + runInitiator(conf); + runWorker(conf); + + int count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE"); + Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from COMPACTION_QUEUE"), 2, count); + // Cleaning should happen in threads concurrently for the minor compaction and the clean abort one. + runCleaner(conf); + + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS"); + Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 0, count); + + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE"); + Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from COMPACTION_QUEUE"), 0, count); + } + + private HiveStreamingConnection prepareTableAndConnection(String dbName, String tblName, int batchSize) throws Exception { + String agentInfo = "UT_" + Thread.currentThread().getName(); + + executeStatementOnDriver("drop table if exists " + tblName, driver); + executeStatementOnDriver("CREATE TABLE " + tblName + "(b STRING) " + + " PARTITIONED BY (a INT)" + //currently ACID requires table to be bucketed + " STORED AS ORC TBLPROPERTIES ('transactional'='true')", driver); + + StrictDelimitedInputWriter writer = StrictDelimitedInputWriter.newBuilder() + .withFieldDelimiter(',') + .build(); + + // Create three folders with two different transactions + return HiveStreamingConnection.newBuilder() + .withDatabase(dbName) + .withTable(tblName) + .withAgentInfo(agentInfo) + .withHiveConf(conf) + .withRecordWriter(writer) + .withStreamingOptimizations(true) + // Transaction size has to be one or exception should happen. + .withTransactionBatchSize(batchSize) + .connect(); + } + + private HiveStreamingConnection prepareTableTwoPartitionsAndConnection(String dbName, String tblName, int batchSize) throws Exception { + String agentInfo = "UT_" + Thread.currentThread().getName(); + + executeStatementOnDriver("drop table if exists " + tblName, driver); + executeStatementOnDriver("CREATE TABLE " + tblName + "(c STRING) " + + " PARTITIONED BY (a INT, b INT)" + //currently ACID requires table to be bucketed + " STORED AS ORC TBLPROPERTIES ('transactional'='true')", driver); + + StrictDelimitedInputWriter writer = StrictDelimitedInputWriter.newBuilder() + .withFieldDelimiter(',') + .build(); + + // Create three folders with two different transactions + return HiveStreamingConnection.newBuilder() + .withDatabase(dbName) + .withTable(tblName) + .withAgentInfo(agentInfo) + .withHiveConf(conf) + .withRecordWriter(writer) + .withStreamingOptimizations(true) + // Transaction size has to be one or exception should happen. + .withTransactionBatchSize(batchSize) + .connect(); + } @Test public void mmTable() throws Exception { diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 5dbf634825..5c20c52f67 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -27,6 +27,8 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Properties; @@ -42,6 +44,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hive.common.*; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -2532,4 +2535,68 @@ public static void validateAcidPartitionLocation(String location, Configuration throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(ex.getMessage()), ex); } } + + public static List deleteDeltaDirectories(Path rootPartition, + Configuration conf, Set writeIds) throws IOException { + FileSystem fs = rootPartition.getFileSystem(conf); + PathFilter filter = (p) -> { + String name = p.getName(); + for (Long wId: writeIds) { + if (name.startsWith(deltaSubdir(wId, wId)) && !name.contains("=")) { + return true; + } + } + return false; + }; + List deleted = new ArrayList<>(); + deleteDeltaDirectoriesAux(rootPartition, fs, filter, deleted); + return deleted; + } + + private static void deleteDeltaDirectoriesAux(Path root, FileSystem fs, PathFilter filter, List deleted) throws IOException { + RemoteIterator it = listIterator(fs, root, null); + while (it.hasNext()) { + FileStatus fStatus = it.next(); + if (fStatus.isDirectory()) { + if (filter.accept(fStatus.getPath())) { + fs.delete(fStatus.getPath(), true); + deleted.add(fStatus); + } else { + deleteDeltaDirectoriesAux(fStatus.getPath(), fs, filter, deleted); + if (isDirectoryEmpty(fs, fStatus.getPath())) { + fs.delete(fStatus.getPath(), false); + deleted.add(fStatus); + } + } + } + } + } + + private static boolean isDirectoryEmpty(FileSystem fs, Path path) throws IOException { + RemoteIterator it = listIterator(fs, path, null); + return !it.hasNext(); + } + + private static RemoteIterator listIterator(FileSystem fs, Path path, PathFilter filter) + throws IOException { + try { + return new ToFileStatusIterator(SHIMS.listLocatedHdfsStatusIterator(fs, path, filter)); + } catch (Throwable t) { + return HdfsUtils.listLocatedStatusIterator(fs, path, filter); + } + } + + static class ToFileStatusIterator implements RemoteIterator { + private final RemoteIterator it; + ToFileStatusIterator(RemoteIterator it) { + this.it = it; + } + @Override public boolean hasNext() throws IOException { + return it.hasNext(); + } + + @Override public FileStatus next() throws IOException { + return it.next().getFileStatus(); + } + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java index 3482cfce36..e6f97b187f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java @@ -94,6 +94,54 @@ public static long createTestFileId( return result; } + public static RemoteIterator listLocatedStatusIterator(final FileSystem fs, + final Path path, + final PathFilter filter + ) throws IOException { + return new FilterRemoteIterator(fs.listLocatedStatus(path), filter); + } + + static class FilterRemoteIterator implements RemoteIterator { + private final RemoteIterator it; + private final PathFilter filter; + private boolean nextUsed = true; + private FileStatus next = null; + + FilterRemoteIterator(RemoteIterator it, PathFilter filter) { + this.it = it; + this.filter = filter; + } + + @Override public boolean hasNext() throws IOException { + if (!nextUsed) { + return next != null; + } + next = getNext(); + nextUsed = false; + return next != null; + } + + @Override public FileStatus next() throws IOException { + if (!nextUsed) { + nextUsed = true; + return next; + } + return getNext(); + } + + private FileStatus getNext() throws IOException { + while (it.hasNext()) { + FileStatus fStatus = it.next(); + if (filter == null) { + return fStatus; + } else if (filter.accept(fStatus.getPath())) { + return fStatus; + } + } + return null; + } + } + // TODO: this relies on HDFS not changing the format; we assume if we could get inode ID, this // is still going to work. Otherwise, file IDs can be turned off. Later, we should use // as public utility method in HDFS to obtain the inode-based path. diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java index 06b0209aa0..7e440de8eb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsRequest; import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResponse; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; @@ -49,10 +50,21 @@ import java.io.IOException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; +import java.util.Comparator; import java.util.List; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.FutureTask; +import java.util.concurrent.PriorityBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; @@ -65,11 +77,32 @@ private long cleanerCheckInterval = 0; private ReplChangeManager replChangeManager; + private ExecutorService executorService; + + private final int KEEPALIVE_SECONDS = 60; + private Lock rsLock = new ReentrantLock(); @Override public void init(AtomicBoolean stop, AtomicBoolean looped) throws Exception { super.init(stop, looped); replChangeManager = ReplChangeManager.getInstance(conf); + int coreThreads = MetastoreConf.getIntVar(conf, + MetastoreConf.ConfVars.COMPACTOR_CORE_CLEANER_THREADS); + int maxThreads = MetastoreConf.getIntVar(conf, + MetastoreConf.ConfVars.COMPACTOR_MAX_CLEANER_THREADS); + assert coreThreads > 0; + assert maxThreads > 0; + assert maxThreads >= coreThreads; + BlockingQueue cleanTaks = new PriorityBlockingQueue(coreThreads, + Comparator.comparing((cw) -> { + try { + return ((CleanWork)((FutureTask)cw).get()).getPriority(); + } catch (InterruptedException | ExecutionException e) { + LOG.info("Exception in priority queue: " + e.getMessage()); + } + return 0; + })); + executorService = new ThreadPoolExecutor(coreThreads, maxThreads, KEEPALIVE_SECONDS, TimeUnit.SECONDS, cleanTaks); } @Override @@ -92,9 +125,20 @@ public void run() { handle = txnHandler.getMutexAPI().acquireLock(TxnStore.MUTEX_KEY.Cleaner.name()); startedAt = System.currentTimeMillis(); long minOpenTxnId = txnHandler.findMinOpenTxnId(); - for(CompactionInfo compactionInfo : txnHandler.findReadyToClean()) { - clean(compactionInfo, minOpenTxnId); + Collection> calls = new ArrayList<>(); + List cis = txnHandler.findReadyToClean(); + LOG.info("Found " + cis.size() + " potentials compations to clean"); + for(CompactionInfo compactionInfo : cis) { + calls.add(new CleanWork(compactionInfo, minOpenTxnId, rsLock)); } + // We have to wait now for all the Callables to finish before proceeding + // because otherwise we could start again the same cleaning work. When the + // cleaning work finishes markCleaned will be called and rows will be removed + // from TXN_COMPONENTS that should prevent this. + // TODO: optimize this so we don't have this constraint. + // Maybe adding a new state besides READY_FOR_CLEANING, like CLEANING_RUNNING + executorService.invokeAll(calls); + } catch (Throwable t) { LOG.error("Caught an exception in the main loop of compactor cleaner, " + StringUtils.stringifyException(t)); @@ -121,150 +165,301 @@ public void run() { } while (!stop.get()); } - private void clean(CompactionInfo ci, long minOpenTxnGLB) throws MetaException { - LOG.info("Starting cleaning for " + ci); - try { - Table t = resolveTable(ci); - if (t == null) { - // The table was dropped before we got around to cleaning it. - LOG.info("Unable to find table " + ci.getFullTableName() + ", assuming it was dropped." + - idWatermark(ci)); - txnHandler.markCleaned(ci); + private static String idWatermark(CompactionInfo ci) { + return " id=" + ci.id; + } + + /** + * In the first one we scan all the directories and delete files of + * transaction that were aborted before addDynamicPartitions is called. + * The second one does the "regular" clean and removes files as a result + * of compaction and other kinds of aborted transactions. + */ + private class CleanWork implements Callable { + final CompactionInfo ci; + final long minOpenTxnGLB; + final Lock rsLock; + /** + * Contructor that corresponds to the second kind of clean work. + * @param ci compaction info. + */ + CleanWork(CompactionInfo ci, long minOpenTxnGLB, Lock rsLock) { + this.ci = ci; + this.minOpenTxnGLB = minOpenTxnGLB; + this.rsLock = rsLock; + } + + @Override + public Void call() { + try { + clean(); + } catch (Throwable t) { + LOG.error("Caught an exception in the main loop of compactor cleaner, " + + StringUtils.stringifyException(t)); + } + return null; + } + + void clean() throws MetaException { + if (ci.isCleanAbortedCompaction()) { + cleanAborted(); + } else { + cleanRegular(); + } + } + + private void cleanAborted() throws MetaException { + if (ci.writeIds == null || ci.writeIds.size() == 0) { + LOG.warn("Attempted cleaning aborted transaction with empty writeId list"); return; } - Partition p = null; - if (ci.partName != null) { - p = resolvePartition(ci); - if (p == null) { - // The partition was dropped before we got around to cleaning it. - LOG.info("Unable to find partition " + ci.getFullPartitionName() + - ", assuming it was dropped." + idWatermark(ci)); + LOG.info("Starting abort cleaning for table " + ci.getFullTableName() + + ". This will scan all the partition directories."); + try { + Table t = syncResolveTable(); + if (t == null) { + // The table was dropped before we got around to cleaning it. + LOG.info("Unable to find table " + ci.getFullTableName() + ", assuming it was dropped." + + idWatermark(ci)); txnHandler.markCleaned(ci); return; } - } - StorageDescriptor sd = resolveStorageDescriptor(t, p); - final String location = sd.getLocation(); - ValidTxnList validTxnList = - TxnUtils.createValidTxnListForCleaner(txnHandler.getOpenTxns(), minOpenTxnGLB); - //save it so that getAcidState() sees it - conf.set(ValidTxnList.VALID_TXNS_KEY, validTxnList.writeToString()); - /** - * {@code validTxnList} is capped by minOpenTxnGLB so if - * {@link AcidUtils#getAcidState(Path, Configuration, ValidWriteIdList)} sees a base/delta - * produced by a compactor, that means every reader that could be active right now see it - * as well. That means if this base/delta shadows some earlier base/delta, the it will be - * used in favor of any files that it shadows. Thus the shadowed files are safe to delete. - * - * - * The metadata about aborted writeIds (and consequently aborted txn IDs) cannot be deleted - * above COMPACTION_QUEUE.CQ_HIGHEST_WRITE_ID. - * See {@link TxnStore#markCleaned(CompactionInfo)} for details. - * For example given partition P1, txnid:150 starts and sees txnid:149 as open. - * Say compactor runs in txnid:160, but 149 is still open and P1 has the largest resolved - * writeId:17. Compactor will produce base_17_c160. - * Suppose txnid:149 writes delta_18_18 - * to P1 and aborts. Compactor can only remove TXN_COMPONENTS entries - * up to (inclusive) writeId:17 since delta_18_18 may be on disk (and perhaps corrupted) but - * not visible based on 'validTxnList' capped at minOpenTxn so it will not not be cleaned by - * {@link #removeFiles(String, ValidWriteIdList, CompactionInfo)} and so we must keep the - * metadata that says that 18 is aborted. - * In a slightly different case, whatever txn created delta_18 (and all other txn) may have - * committed by the time cleaner runs and so cleaner will indeed see delta_18_18 and remove - * it (since it has nothing but aborted data). But we can't tell which actually happened - * in markCleaned() so make sure it doesn't delete meta above CG_CQ_HIGHEST_WRITE_ID. - * - * We could perhaps make cleaning of aborted and obsolete and remove all aborted files up - * to the current Min Open Write Id, this way aborted TXN_COMPONENTS meta can be removed - * as well up to that point which may be higher than CQ_HIGHEST_WRITE_ID. This could be - * useful if there is all of a sudden a flood of aborted txns. (For another day). - */ - List tblNames = Collections.singletonList( - TableName.getDbTable(t.getDbName(), t.getTableName())); - GetValidWriteIdsRequest rqst = new GetValidWriteIdsRequest(tblNames); - rqst.setValidTxnList(validTxnList.writeToString()); - GetValidWriteIdsResponse rsp = txnHandler.getValidWriteIds(rqst); - //we could have no write IDs for a table if it was never written to but - // since we are in the Cleaner phase of compactions, there must have - // been some delta/base dirs - assert rsp != null && rsp.getTblValidWriteIdsSize() == 1; - //Creating 'reader' list since we are interested in the set of 'obsolete' files - ValidReaderWriteIdList validWriteIdList = - TxnCommonUtils.createValidReaderWriteIdList(rsp.getTblValidWriteIds().get(0)); - if (runJobAsSelf(ci.runAs)) { - removeFiles(location, validWriteIdList, ci); - } else { - LOG.info("Cleaning as user " + ci.runAs + " for " + ci.getFullPartitionName()); - UserGroupInformation ugi = UserGroupInformation.createProxyUser(ci.runAs, - UserGroupInformation.getLoginUser()); - ugi.doAs(new PrivilegedExceptionAction() { - @Override - public Object run() throws Exception { - removeFiles(location, validWriteIdList, ci); - return null; + StorageDescriptor sd = resolveStorageDescriptor(t, null); + + if (runJobAsSelf(ci.runAs)) { + rmFilesClean(sd.getLocation(), ci); + } else { + LOG.info("Cleaning as user " + ci.runAs + " for " + ci.getFullPartitionName()); + UserGroupInformation ugi = UserGroupInformation.createProxyUser(ci.runAs, + UserGroupInformation.getLoginUser()); + ugi.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + rmFilesClean(sd.getLocation(), ci); + return null; + } + }); + try { + FileSystem.closeAllForUGI(ugi); + } catch (IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + + ci.getFullPartitionName() + idWatermark(ci), exception); } - }); - try { - FileSystem.closeAllForUGI(ugi); - } catch (IOException exception) { - LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + - ci.getFullPartitionName() + idWatermark(ci), exception); } + txnHandler.markCleaned(ci); + } catch (Exception e) { + LOG.error("Caught exception when cleaning, unable to complete cleaning of " + ci + " " + + StringUtils.stringifyException(e)); + txnHandler.markFailed(ci); } - txnHandler.markCleaned(ci); - } catch (Exception e) { - LOG.error("Caught exception when cleaning, unable to complete cleaning of " + ci + " " + - StringUtils.stringifyException(e)); - txnHandler.markFailed(ci); } - } - private static String idWatermark(CompactionInfo ci) { - return " id=" + ci.id; - } - private void removeFiles(String location, ValidWriteIdList writeIdList, CompactionInfo ci) - throws IOException, NoSuchObjectException { - Path locPath = new Path(location); - AcidUtils.Directory dir = AcidUtils.getAcidState(locPath, conf, writeIdList); - List obsoleteDirs = dir.getObsolete(); - /** - * add anything in 'dir' that only has data from aborted transactions - no one should be - * trying to read anything in that dir (except getAcidState() that only reads the name of - * this dir itself) - * So this may run ahead of {@link CompactionInfo#highestWriteId} but it's ok (suppose there - * are no active txns when cleaner runs). The key is to not delete metadata about aborted - * txns with write IDs > {@link CompactionInfo#highestWriteId}. - * See {@link TxnStore#markCleaned(CompactionInfo)} - */ - obsoleteDirs.addAll(dir.getAbortedDirectories()); - List filesToDelete = new ArrayList<>(obsoleteDirs.size()); - StringBuilder extraDebugInfo = new StringBuilder("["); - for (FileStatus stat : obsoleteDirs) { - filesToDelete.add(stat.getPath()); - extraDebugInfo.append(stat.getPath().getName()).append(","); - if(!FileUtils.isPathWithinSubtree(stat.getPath(), locPath)) { - LOG.info(idWatermark(ci) + " found unexpected file: " + stat.getPath()); + + private Table syncResolveTable() throws MetaException { + try { + rsLock.lock(); + return resolveTable(ci); + } finally { + rsLock.unlock(); + } + } + + private Database syncGetDatabase() throws NoSuchObjectException { + try { + rsLock.lock(); + return rs.getDatabase(getDefaultCatalog(conf), ci.dbname); + } finally { + rsLock.unlock(); } } - extraDebugInfo.setCharAt(extraDebugInfo.length() - 1, ']'); - LOG.info(idWatermark(ci) + " About to remove " + filesToDelete.size() + - " obsolete directories from " + location + ". " + extraDebugInfo.toString()); - if (filesToDelete.size() < 1) { - LOG.warn("Hmm, nothing to delete in the cleaner for directory " + location + - ", that hardly seems right."); - return; + + private Partition syncResolvePartition() throws Exception { + try { + rsLock.lock(); + return resolvePartition(ci); + } finally { + rsLock.unlock(); + } } - FileSystem fs = filesToDelete.get(0).getFileSystem(conf); - Database db = rs.getDatabase(getDefaultCatalog(conf), ci.dbname); - Boolean isSourceOfRepl = ReplChangeManager.isSourceOfReplication(db); + private void cleanRegular() throws MetaException { + LOG.info("Starting cleaning for " + ci); + try { + Table t = syncResolveTable(); + if (t == null) { + // The table was dropped before we got around to cleaning it. + LOG.info("Unable to find table " + ci.getFullTableName() + ", assuming it was dropped." + + idWatermark(ci)); + txnHandler.markCleaned(ci); + return; + } + Partition p = null; + if (ci.partName != null) { + p = syncResolvePartition(); + if (p == null) { + // The partition was dropped before we got around to cleaning it. + LOG.info("Unable to find partition " + ci.getFullPartitionName() + + ", assuming it was dropped." + idWatermark(ci)); + txnHandler.markCleaned(ci); + return; + } + } + StorageDescriptor sd = resolveStorageDescriptor(t, p); + final String location = sd.getLocation(); + ValidTxnList validTxnList = + TxnUtils.createValidTxnListForCleaner(txnHandler.getOpenTxns(), minOpenTxnGLB); + //save it so that getAcidState() sees it + conf.set(ValidTxnList.VALID_TXNS_KEY, validTxnList.writeToString()); + /** + * {@code validTxnList} is capped by minOpenTxnGLB so if + * {@link AcidUtils#getAcidState(Path, Configuration, ValidWriteIdList)} sees a base/delta + * produced by a compactor, that means every reader that could be active right now see it + * as well. That means if this base/delta shadows some earlier base/delta, the it will be + * used in favor of any files that it shadows. Thus the shadowed files are safe to delete. + * + * + * The metadata about aborted writeIds (and consequently aborted txn IDs) cannot be deleted + * above COMPACTION_QUEUE.CQ_HIGHEST_WRITE_ID. + * See {@link TxnStore#markCleaned(CompactionInfo)} for details. + * For example given partition P1, txnid:150 starts and sees txnid:149 as open. + * Say compactor runs in txnid:160, but 149 is still open and P1 has the largest resolved + * writeId:17. Compactor will produce base_17_c160. + * Suppose txnid:149 writes delta_18_18 + * to P1 and aborts. Compactor can only remove TXN_COMPONENTS entries + * up to (inclusive) writeId:17 since delta_18_18 may be on disk (and perhaps corrupted) but + * not visible based on 'validTxnList' capped at minOpenTxn so it will not not be cleaned by + * {@link #removeFiles(String, ValidWriteIdList, CompactionInfo)} and so we must keep the + * metadata that says that 18 is aborted. + * In a slightly different case, whatever txn created delta_18 (and all other txn) may have + * committed by the time cleaner runs and so cleaner will indeed see delta_18_18 and remove + * it (since it has nothing but aborted data). But we can't tell which actually happened + * in markCleaned() so make sure it doesn't delete meta above CG_CQ_HIGHEST_WRITE_ID. + * + * We could perhaps make cleaning of aborted and obsolete and remove all aborted files up + * to the current Min Open Write Id, this way aborted TXN_COMPONENTS meta can be removed + * as well up to that point which may be higher than CQ_HIGHEST_WRITE_ID. This could be + * useful if there is all of a sudden a flood of aborted txns. (For another day). + */ + List tblNames = Collections.singletonList( + TableName.getDbTable(t.getDbName(), t.getTableName())); + GetValidWriteIdsRequest rqst = new GetValidWriteIdsRequest(tblNames); + rqst.setValidTxnList(validTxnList.writeToString()); + GetValidWriteIdsResponse rsp = txnHandler.getValidWriteIds(rqst); + //we could have no write IDs for a table if it was never written to but + // since we are in the Cleaner phase of compactions, there must have + // been some delta/base dirs + assert rsp != null && rsp.getTblValidWriteIdsSize() == 1; + //Creating 'reader' list since we are interested in the set of 'obsolete' files + ValidReaderWriteIdList validWriteIdList = + TxnCommonUtils.createValidReaderWriteIdList(rsp.getTblValidWriteIds().get(0)); + + if (runJobAsSelf(ci.runAs)) { + rmFilesRegular(location, validWriteIdList, ci); + } else { + LOG.info("Cleaning as user " + ci.runAs + " for " + ci.getFullPartitionName()); + UserGroupInformation ugi = UserGroupInformation.createProxyUser(ci.runAs, + UserGroupInformation.getLoginUser()); + ugi.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + rmFilesRegular(location, validWriteIdList, ci); + return null; + } + }); + try { + FileSystem.closeAllForUGI(ugi); + } catch (IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + + ci.getFullPartitionName() + idWatermark(ci), exception); + } + } + txnHandler.markCleaned(ci); + } catch (Exception e) { + LOG.error("Caught exception when cleaning, unable to complete cleaning of " + ci + " " + + StringUtils.stringifyException(e)); + txnHandler.markFailed(ci); + } + } + + private void rmFilesClean(String rootLocation, CompactionInfo ci) throws IOException, NoSuchObjectException { + List deleted = AcidUtils.deleteDeltaDirectories(new Path(rootLocation), conf, ci.writeIds); + + if (deleted.size() == 0) { + LOG.info("No files were deleted in the clean abort compaction: " + idWatermark(ci)); + return; + } - for (Path dead : filesToDelete) { - LOG.debug("Going to delete path " + dead.toString()); - if (isSourceOfRepl) { - replChangeManager.recycle(dead, ReplChangeManager.RecycleType.MOVE, true); + FileSystem fs = deleted.get(0).getPath().getFileSystem(conf); + Database db = syncGetDatabase(); + Boolean isSourceOfRepl = ReplChangeManager.isSourceOfReplication(db); + + for (FileStatus dead : deleted) { + Path deadPath = dead.getPath(); + LOG.debug("Deleted path " + deadPath.toString()); + if (isSourceOfRepl) { + replChangeManager.recycle(deadPath, ReplChangeManager.RecycleType.MOVE, true); + } + fs.delete(deadPath, true); + } + } + + private void rmFilesRegular(String location, ValidWriteIdList writeIdList, CompactionInfo ci) + throws IOException, NoSuchObjectException { + Path locPath = new Path(location); + AcidUtils.Directory dir = AcidUtils.getAcidState(locPath, conf, writeIdList); + List obsoleteDirs = dir.getObsolete(); + /** + * add anything in 'dir' that only has data from aborted transactions - no one should be + * trying to read anything in that dir (except getAcidState() that only reads the name of + * this dir itself) + * So this may run ahead of {@link CompactionInfo#highestWriteId} but it's ok (suppose there + * are no active txns when cleaner runs). The key is to not delete metadata about aborted + * txns with write IDs > {@link CompactionInfo#highestWriteId}. + * See {@link TxnStore#markCleaned(CompactionInfo)} + */ + obsoleteDirs.addAll(dir.getAbortedDirectories()); + List filesToDelete = new ArrayList<>(obsoleteDirs.size()); + StringBuilder extraDebugInfo = new StringBuilder("["); + for (FileStatus stat : obsoleteDirs) { + filesToDelete.add(stat.getPath()); + extraDebugInfo.append(stat.getPath().getName()).append(","); + if(!FileUtils.isPathWithinSubtree(stat.getPath(), locPath)) { + LOG.info(idWatermark(ci) + " found unexpected file: " + stat.getPath()); + } + } + extraDebugInfo.setCharAt(extraDebugInfo.length() - 1, ']'); + LOG.info(idWatermark(ci) + " About to remove " + filesToDelete.size() + + " obsolete directories from " + location + ". " + extraDebugInfo.toString()); + if (filesToDelete.size() < 1) { + LOG.warn("Hmm, nothing to delete in the cleaner for directory " + location + + ", that hardly seems right."); + return; + } + + FileSystem fs = filesToDelete.get(0).getFileSystem(conf); + Database db = syncGetDatabase(); + Boolean isSourceOfRepl = ReplChangeManager.isSourceOfReplication(db); + + for (Path dead : filesToDelete) { + LOG.debug("Deleted path " + dead.toString()); + if (isSourceOfRepl) { + replChangeManager.recycle(dead, ReplChangeManager.RecycleType.MOVE, true); + } + fs.delete(dead, true); + } + } + + /** + * Gives higher priority to regular clean tasks. Lower value + * means more priority + * @return priority. + */ + int getPriority() { + if (ci.isCleanAbortedCompaction()) { + return 1; + } else { + return 2; } - fs.delete(dead, true); } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java index a0df82cb20..3c1368d50f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java @@ -96,7 +96,7 @@ public void run() { LOG.debug("Found " + potentials.size() + " potential compactions, " + "checking to see if we should compact any of them"); for (CompactionInfo ci : potentials) { - LOG.info("Checking to see if we should compact " + ci.getFullPartitionName()); + LOG.info("Checking to see if we should compact " + ci.getFullPartitionName() + " with type " + ci.type); try { Table t = resolveTable(ci); if (t == null) { @@ -114,7 +114,7 @@ public void run() { // Check to see if this is a table level request on a partitioned table. If so, // then it's a dynamic partitioning case and we shouldn't check the table itself. - if (t.getPartitionKeys() != null && t.getPartitionKeys().size() > 0 && + if (!ci.isCleanAbortedCompaction() && t.getPartitionKeys() != null && t.getPartitionKeys().size() > 0 && ci.partName == null) { LOG.debug("Skipping entry for " + ci.getFullTableName() + " as it is from dynamic" + " partitioning"); @@ -140,7 +140,7 @@ public void run() { // Figure out who we should run the file operations as Partition p = resolvePartition(ci); - if (p == null && ci.partName != null) { + if (!ci.isCleanAbortedCompaction() && p == null && ci.partName != null) { LOG.info("Can't find partition " + ci.getFullPartitionName() + ", assuming it has been dropped and moving on."); continue; @@ -241,7 +241,11 @@ private CompactionType checkForCompaction(final CompactionInfo ci, if (ci.tooManyAborts) { LOG.debug("Found too many aborted transactions for " + ci.getFullPartitionName() + ", " + "initiating major compaction"); - return CompactionType.MAJOR; + if (ci.isCleanAbortedCompaction()) { + return CompactionType.CLEAN_ABORTED; + } else { + return CompactionType.MAJOR; + } } if (runJobAsSelf(runAs)) { @@ -270,6 +274,10 @@ private CompactionType determineCompactionType(CompactionInfo ci, ValidWriteIdLi StorageDescriptor sd, Map tblproperties) throws IOException, InterruptedException { + if (ci.isCleanAbortedCompaction()) { + return CompactionType.CLEAN_ABORTED; + } + boolean noBase = false; Path location = new Path(sd.getLocation()); FileSystem fs = location.getFileSystem(conf); diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java index 5e085f84af..cacdd32d45 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java @@ -1750,7 +1750,7 @@ private void testMerge3Way(boolean cc) throws Exception { Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 0, + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1)); //complete 1st txn long writeId = txnMgr.getTableWriteId("default", "target"); @@ -1816,7 +1816,7 @@ private void testMerge3Way(boolean cc) throws Exception { Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 0, + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId2)); //complete 2nd txn writeId = txnMgr2.getTableWriteId("default", "target"); @@ -2043,7 +2043,8 @@ public void testDynamicPartitionInsert() throws Exception { Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid1) + "): " + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 0, + // We have one before addDynamicPartitions in case the txn fails before. + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid1)); //now actually write to table to generate some partitions checkCmdOnDriver(driver.run("insert into target partition(p=1,q) values (1,2,2), (3,4,2), (5,6,3), (7,8,2)")); @@ -2138,7 +2139,7 @@ private void testMergePartitioned(boolean causeConflict) throws Exception { Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 0,//because it's using a DP write + 1,//because it's using a DP write TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1)); //complete T1 transaction (simulate writing to 2 partitions) long writeId = txnMgr.getTableWriteId("default", "target"); @@ -2174,7 +2175,7 @@ private void testMergePartitioned(boolean causeConflict) throws Exception { Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), - 0,//because it's using a DP write + 1,//because it's using a DP write TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2)); //complete T2 txn //simulate Insert into 2 partitions diff --git shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java index b6f70ebe63..4905a4d837 100644 --- shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java +++ shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java @@ -794,6 +794,95 @@ public Long getFileId() { return result; } + @Override + public RemoteIterator listLocatedHdfsStatusIterator( + FileSystem fs, Path path, PathFilter filter) throws IOException { + return new FileIterator(fs, path, filter); + } + + class FileIterator implements RemoteIterator { + private final DFSClient dfsc; + private final URI fsUri; + private final Path p; + private final String src; + + private DirectoryListing current; + private PathFilter filter; + private int i = 0; + private HdfsFileStatusWithId next = null; + private boolean nextUsed = true; + + org.apache.hadoop.hdfs.protocol.HdfsFileStatus[] hfss = null; + + FileIterator(FileSystem fs, Path p, PathFilter filter) throws IOException { + DistributedFileSystem dfs = ensureDfs(fs); + dfsc = dfs.getClient(); + src = p.toUri().getPath(); + current = dfsc.listPaths(src, + org.apache.hadoop.hdfs.protocol.HdfsFileStatus.EMPTY_NAME, true); + if (current == null) { // the directory does not exist + throw new FileNotFoundException("File " + p + " does not exist."); + } + fsUri = fs.getUri(); + this.filter = filter; + this.p = p; + } + + @Override + public boolean hasNext() throws IOException { + if (!nextUsed) { + return next != null; + } + next = getNext(); + nextUsed = false; + return next != null; + } + + @Override + public HdfsFileStatusWithId next() throws IOException { + if (!nextUsed) { + nextUsed = true; + return next; + } + return getNext(); + } + + private HdfsFileStatusWithId getNext() throws IOException { + if (!nextUsed) { + return next; + } + while (current != null) { + // First time we call getNext + if (hfss == null) { + hfss = current.getPartialListing(); + i = 0; + } else if (hfss.length == i) { + current = current.hasMore() ? dfsc + .listPaths(src, current.getLastName(), true) : null; + if (current == null) { + return null; + } + hfss = current.getPartialListing(); + i = 0; + } + + while (i < hfss.length) { + HdfsLocatedFileStatus next = (HdfsLocatedFileStatus) (hfss[i]); + i++; + if (filter != null) { + Path filterPath = next.getFullPath(p).makeQualified(fsUri, null); + if (!filter.accept(filterPath)) + continue; + } + LocatedFileStatus lfs = next.makeQualifiedLocated(fsUri, p); + return new HdfsFileStatusWithIdImpl(lfs, next.getFileId()); + } + } + return null; + + } + } + private DistributedFileSystem ensureDfs(FileSystem fs) { if (!(fs instanceof DistributedFileSystem)) { throw new UnsupportedOperationException("Only supported for DFS; got " + fs.getClass()); diff --git shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java index c569b242ae..e4f3465c40 100644 --- shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java +++ shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java @@ -26,6 +26,7 @@ import java.security.NoSuchAlgorithmException; import java.util.Collections; import java.util.Comparator; +import java.util.Iterator; import java.util.List; import java.util.Set; import java.util.TreeMap; @@ -40,6 +41,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.LongWritable; @@ -253,6 +255,9 @@ RecordReader getRecordReader(JobConf job, CombineFileSplit split, Reporter repor List listLocatedHdfsStatus( FileSystem fs, Path path, PathFilter filter) throws IOException; + RemoteIterator listLocatedHdfsStatusIterator( + FileSystem fs, Path path, PathFilter filter) throws IOException; + /** * For file status returned by listLocatedStatus, convert them into a list * of block locations. diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java index 9c33229270..af94d64d7a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java @@ -816,13 +816,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddDynamicPartition case 5: // PARTITIONNAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list724 = iprot.readListBegin(); - struct.partitionnames = new ArrayList(_list724.size); - String _elem725; - for (int _i726 = 0; _i726 < _list724.size; ++_i726) + org.apache.thrift.protocol.TList _list732 = iprot.readListBegin(); + struct.partitionnames = new ArrayList(_list732.size); + String _elem733; + for (int _i734 = 0; _i734 < _list732.size; ++_i734) { - _elem725 = iprot.readString(); - struct.partitionnames.add(_elem725); + _elem733 = iprot.readString(); + struct.partitionnames.add(_elem733); } iprot.readListEnd(); } @@ -872,9 +872,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddDynamicPartitio oprot.writeFieldBegin(PARTITIONNAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionnames.size())); - for (String _iter727 : struct.partitionnames) + for (String _iter735 : struct.partitionnames) { - oprot.writeString(_iter727); + oprot.writeString(_iter735); } oprot.writeListEnd(); } @@ -910,9 +910,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartition oprot.writeString(struct.tablename); { oprot.writeI32(struct.partitionnames.size()); - for (String _iter728 : struct.partitionnames) + for (String _iter736 : struct.partitionnames) { - oprot.writeString(_iter728); + oprot.writeString(_iter736); } } BitSet optionals = new BitSet(); @@ -937,13 +937,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartitions struct.tablename = iprot.readString(); struct.setTablenameIsSet(true); { - org.apache.thrift.protocol.TList _list729 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionnames = new ArrayList(_list729.size); - String _elem730; - for (int _i731 = 0; _i731 < _list729.size; ++_i731) + org.apache.thrift.protocol.TList _list737 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionnames = new ArrayList(_list737.size); + String _elem738; + for (int _i739 = 0; _i739 < _list737.size; ++_i739) { - _elem730 = iprot.readString(); - struct.partitionnames.add(_elem730); + _elem738 = iprot.readString(); + struct.partitionnames.add(_elem738); } } struct.setPartitionnamesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java index f7d9ed2e2e..3d8314542a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java @@ -877,14 +877,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AlterPartitionsRequ case 4: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list968 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list968.size); - Partition _elem969; - for (int _i970 = 0; _i970 < _list968.size; ++_i970) + org.apache.thrift.protocol.TList _list976 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list976.size); + Partition _elem977; + for (int _i978 = 0; _i978 < _list976.size; ++_i978) { - _elem969 = new Partition(); - _elem969.read(iprot); - struct.partitions.add(_elem969); + _elem977 = new Partition(); + _elem977.read(iprot); + struct.partitions.add(_elem977); } iprot.readListEnd(); } @@ -952,9 +952,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AlterPartitionsReq oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter971 : struct.partitions) + for (Partition _iter979 : struct.partitions) { - _iter971.write(oprot); + _iter979.write(oprot); } oprot.writeListEnd(); } @@ -1000,9 +1000,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsRequ oprot.writeString(struct.tableName); { oprot.writeI32(struct.partitions.size()); - for (Partition _iter972 : struct.partitions) + for (Partition _iter980 : struct.partitions) { - _iter972.write(oprot); + _iter980.write(oprot); } } BitSet optionals = new BitSet(); @@ -1041,14 +1041,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsReque struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); { - org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list973.size); - Partition _elem974; - for (int _i975 = 0; _i975 < _list973.size; ++_i975) + org.apache.thrift.protocol.TList _list981 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list981.size); + Partition _elem982; + for (int _i983 = 0; _i983 < _list981.size; ++_i983) { - _elem974 = new Partition(); - _elem974.read(iprot); - struct.partitions.add(_elem974); + _elem982 = new Partition(); + _elem982.read(iprot); + struct.partitions.add(_elem982); } } struct.setPartitionsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java index f4e3d6bd71..fbe6184c4a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClearFileMetadataRe case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list848 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list848.size); - long _elem849; - for (int _i850 = 0; _i850 < _list848.size; ++_i850) + org.apache.thrift.protocol.TList _list856 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list856.size); + long _elem857; + for (int _i858 = 0; _i858 < _list856.size; ++_i858) { - _elem849 = iprot.readI64(); - struct.fileIds.add(_elem849); + _elem857 = iprot.readI64(); + struct.fileIds.add(_elem857); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClearFileMetadataR oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter851 : struct.fileIds) + for (long _iter859 : struct.fileIds) { - oprot.writeI64(_iter851); + oprot.writeI64(_iter859); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter852 : struct.fileIds) + for (long _iter860 : struct.fileIds) { - oprot.writeI64(_iter852); + oprot.writeI64(_iter860); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe public void read(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list853 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list853.size); - long _elem854; - for (int _i855 = 0; _i855 < _list853.size; ++_i855) + org.apache.thrift.protocol.TList _list861 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list861.size); + long _elem862; + for (int _i863 = 0; _i863 < _list861.size; ++_i863) { - _elem854 = iprot.readI64(); - struct.fileIds.add(_elem854); + _elem862 = iprot.readI64(); + struct.fileIds.add(_elem862); } } struct.setFileIdsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java index 2b394449a3..db88f0f62e 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java @@ -354,13 +354,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClientCapabilities case 1: // VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list864 = iprot.readListBegin(); - struct.values = new ArrayList(_list864.size); - ClientCapability _elem865; - for (int _i866 = 0; _i866 < _list864.size; ++_i866) + org.apache.thrift.protocol.TList _list872 = iprot.readListBegin(); + struct.values = new ArrayList(_list872.size); + ClientCapability _elem873; + for (int _i874 = 0; _i874 < _list872.size; ++_i874) { - _elem865 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); - struct.values.add(_elem865); + _elem873 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); + struct.values.add(_elem873); } iprot.readListEnd(); } @@ -386,9 +386,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClientCapabilities oprot.writeFieldBegin(VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.values.size())); - for (ClientCapability _iter867 : struct.values) + for (ClientCapability _iter875 : struct.values) { - oprot.writeI32(_iter867.getValue()); + oprot.writeI32(_iter875.getValue()); } oprot.writeListEnd(); } @@ -413,9 +413,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.values.size()); - for (ClientCapability _iter868 : struct.values) + for (ClientCapability _iter876 : struct.values) { - oprot.writeI32(_iter868.getValue()); + oprot.writeI32(_iter876.getValue()); } } } @@ -424,13 +424,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities public void read(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list869 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.values = new ArrayList(_list869.size); - ClientCapability _elem870; - for (int _i871 = 0; _i871 < _list869.size; ++_i871) + org.apache.thrift.protocol.TList _list877 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); + struct.values = new ArrayList(_list877.size); + ClientCapability _elem878; + for (int _i879 = 0; _i879 < _list877.size; ++_i879) { - _elem870 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); - struct.values.add(_elem870); + _elem878 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); + struct.values.add(_elem878); } } struct.setValuesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java index 4aee45ce5f..14d8a76e83 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java @@ -50,6 +50,7 @@ private static final org.apache.thrift.protocol.TField WORKER_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("workerId", org.apache.thrift.protocol.TType.STRING, (short)10); private static final org.apache.thrift.protocol.TField START_FIELD_DESC = new org.apache.thrift.protocol.TField("start", org.apache.thrift.protocol.TType.I64, (short)11); private static final org.apache.thrift.protocol.TField HIGHEST_WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("highestWriteId", org.apache.thrift.protocol.TType.I64, (short)12); + private static final org.apache.thrift.protocol.TField WRITE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("writeIds", org.apache.thrift.protocol.TType.SET, (short)13); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -69,6 +70,7 @@ private String workerId; // optional private long start; // optional private long highestWriteId; // optional + private Set writeIds; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -87,7 +89,8 @@ STATE((short)9, "state"), WORKER_ID((short)10, "workerId"), START((short)11, "start"), - HIGHEST_WRITE_ID((short)12, "highestWriteId"); + HIGHEST_WRITE_ID((short)12, "highestWriteId"), + WRITE_IDS((short)13, "writeIds"); private static final Map byName = new HashMap(); @@ -126,6 +129,8 @@ public static _Fields findByThriftId(int fieldId) { return START; case 12: // HIGHEST_WRITE_ID return HIGHEST_WRITE_ID; + case 13: // WRITE_IDS + return WRITE_IDS; default: return null; } @@ -171,7 +176,7 @@ public String getFieldName() { private static final int __START_ISSET_ID = 2; private static final int __HIGHESTWRITEID_ISSET_ID = 3; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.RUNAS,_Fields.PROPERTIES,_Fields.TOOMANYABORTS,_Fields.STATE,_Fields.WORKER_ID,_Fields.START,_Fields.HIGHEST_WRITE_ID}; + private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.RUNAS,_Fields.PROPERTIES,_Fields.TOOMANYABORTS,_Fields.STATE,_Fields.WORKER_ID,_Fields.START,_Fields.HIGHEST_WRITE_ID,_Fields.WRITE_IDS}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -199,6 +204,9 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); tmpMap.put(_Fields.HIGHEST_WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("highestWriteId", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.WRITE_IDS, new org.apache.thrift.meta_data.FieldMetaData("writeIds", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CompactionInfoStruct.class, metaDataMap); } @@ -253,6 +261,10 @@ public CompactionInfoStruct(CompactionInfoStruct other) { } this.start = other.start; this.highestWriteId = other.highestWriteId; + if (other.isSetWriteIds()) { + Set __this__writeIds = new HashSet(other.writeIds); + this.writeIds = __this__writeIds; + } } public CompactionInfoStruct deepCopy() { @@ -277,6 +289,7 @@ public void clear() { this.start = 0; setHighestWriteIdIsSet(false); this.highestWriteId = 0; + this.writeIds = null; } public long getId() { @@ -559,6 +572,44 @@ public void setHighestWriteIdIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __HIGHESTWRITEID_ISSET_ID, value); } + public int getWriteIdsSize() { + return (this.writeIds == null) ? 0 : this.writeIds.size(); + } + + public java.util.Iterator getWriteIdsIterator() { + return (this.writeIds == null) ? null : this.writeIds.iterator(); + } + + public void addToWriteIds(long elem) { + if (this.writeIds == null) { + this.writeIds = new HashSet(); + } + this.writeIds.add(elem); + } + + public Set getWriteIds() { + return this.writeIds; + } + + public void setWriteIds(Set writeIds) { + this.writeIds = writeIds; + } + + public void unsetWriteIds() { + this.writeIds = null; + } + + /** Returns true if field writeIds is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteIds() { + return this.writeIds != null; + } + + public void setWriteIdsIsSet(boolean value) { + if (!value) { + this.writeIds = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case ID: @@ -657,6 +708,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case WRITE_IDS: + if (value == null) { + unsetWriteIds(); + } else { + setWriteIds((Set)value); + } + break; + } } @@ -698,6 +757,9 @@ public Object getFieldValue(_Fields field) { case HIGHEST_WRITE_ID: return getHighestWriteId(); + case WRITE_IDS: + return getWriteIds(); + } throw new IllegalStateException(); } @@ -733,6 +795,8 @@ public boolean isSet(_Fields field) { return isSetStart(); case HIGHEST_WRITE_ID: return isSetHighestWriteId(); + case WRITE_IDS: + return isSetWriteIds(); } throw new IllegalStateException(); } @@ -858,6 +922,15 @@ public boolean equals(CompactionInfoStruct that) { return false; } + boolean this_present_writeIds = true && this.isSetWriteIds(); + boolean that_present_writeIds = true && that.isSetWriteIds(); + if (this_present_writeIds || that_present_writeIds) { + if (!(this_present_writeIds && that_present_writeIds)) + return false; + if (!this.writeIds.equals(that.writeIds)) + return false; + } + return true; } @@ -925,6 +998,11 @@ public int hashCode() { if (present_highestWriteId) list.add(highestWriteId); + boolean present_writeIds = true && (isSetWriteIds()); + list.add(present_writeIds); + if (present_writeIds) + list.add(writeIds); + return list.hashCode(); } @@ -1056,6 +1134,16 @@ public int compareTo(CompactionInfoStruct other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetWriteIds()).compareTo(other.isSetWriteIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeIds, other.writeIds); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1171,6 +1259,16 @@ public String toString() { sb.append(this.highestWriteId); first = false; } + if (isSetWriteIds()) { + if (!first) sb.append(", "); + sb.append("writeIds:"); + if (this.writeIds == null) { + sb.append("null"); + } else { + sb.append(this.writeIds); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1328,6 +1426,24 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CompactionInfoStruc org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 13: // WRITE_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.SET) { + { + org.apache.thrift.protocol.TSet _set716 = iprot.readSetBegin(); + struct.writeIds = new HashSet(2*_set716.size); + long _elem717; + for (int _i718 = 0; _i718 < _set716.size; ++_i718) + { + _elem717 = iprot.readI64(); + struct.writeIds.add(_elem717); + } + iprot.readSetEnd(); + } + struct.setWriteIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1409,6 +1525,20 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CompactionInfoStru oprot.writeI64(struct.highestWriteId); oprot.writeFieldEnd(); } + if (struct.writeIds != null) { + if (struct.isSetWriteIds()) { + oprot.writeFieldBegin(WRITE_IDS_FIELD_DESC); + { + oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.writeIds.size())); + for (long _iter719 : struct.writeIds) + { + oprot.writeI64(_iter719); + } + oprot.writeSetEnd(); + } + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1455,7 +1585,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionInfoStruc if (struct.isSetHighestWriteId()) { optionals.set(7); } - oprot.writeBitSet(optionals, 8); + if (struct.isSetWriteIds()) { + optionals.set(8); + } + oprot.writeBitSet(optionals, 9); if (struct.isSetPartitionname()) { oprot.writeString(struct.partitionname); } @@ -1480,6 +1613,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionInfoStruc if (struct.isSetHighestWriteId()) { oprot.writeI64(struct.highestWriteId); } + if (struct.isSetWriteIds()) { + { + oprot.writeI32(struct.writeIds.size()); + for (long _iter720 : struct.writeIds) + { + oprot.writeI64(_iter720); + } + } + } } @Override @@ -1493,7 +1635,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionInfoStruct struct.setTablenameIsSet(true); struct.type = org.apache.hadoop.hive.metastore.api.CompactionType.findByValue(iprot.readI32()); struct.setTypeIsSet(true); - BitSet incoming = iprot.readBitSet(8); + BitSet incoming = iprot.readBitSet(9); if (incoming.get(0)) { struct.partitionname = iprot.readString(); struct.setPartitionnameIsSet(true); @@ -1526,6 +1668,19 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionInfoStruct struct.highestWriteId = iprot.readI64(); struct.setHighestWriteIdIsSet(true); } + if (incoming.get(8)) { + { + org.apache.thrift.protocol.TSet _set721 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.writeIds = new HashSet(2*_set721.size); + long _elem722; + for (int _i723 = 0; _i723 < _set721.size; ++_i723) + { + _elem722 = iprot.readI64(); + struct.writeIds.add(_elem722); + } + } + struct.setWriteIdsIsSet(true); + } } } diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionType.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionType.java index 7450b27cf3..b9b4e5d0ba 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionType.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionType.java @@ -13,7 +13,8 @@ public enum CompactionType implements org.apache.thrift.TEnum { MINOR(1), - MAJOR(2); + MAJOR(2), + CLEAN_ABORTED(3); private final int value; @@ -38,6 +39,8 @@ public static CompactionType findByValue(int value) { return MINOR; case 2: return MAJOR; + case 3: + return CLEAN_ABORTED; default: return null; } diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java index 9595a5dc10..a4058d770a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java @@ -792,13 +792,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreationMetadata st case 4: // TABLES_USED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set732 = iprot.readSetBegin(); - struct.tablesUsed = new HashSet(2*_set732.size); - String _elem733; - for (int _i734 = 0; _i734 < _set732.size; ++_i734) + org.apache.thrift.protocol.TSet _set740 = iprot.readSetBegin(); + struct.tablesUsed = new HashSet(2*_set740.size); + String _elem741; + for (int _i742 = 0; _i742 < _set740.size; ++_i742) { - _elem733 = iprot.readString(); - struct.tablesUsed.add(_elem733); + _elem741 = iprot.readString(); + struct.tablesUsed.add(_elem741); } iprot.readSetEnd(); } @@ -855,9 +855,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CreationMetadata s oprot.writeFieldBegin(TABLES_USED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, struct.tablesUsed.size())); - for (String _iter735 : struct.tablesUsed) + for (String _iter743 : struct.tablesUsed) { - oprot.writeString(_iter735); + oprot.writeString(_iter743); } oprot.writeSetEnd(); } @@ -897,9 +897,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CreationMetadata st oprot.writeString(struct.tblName); { oprot.writeI32(struct.tablesUsed.size()); - for (String _iter736 : struct.tablesUsed) + for (String _iter744 : struct.tablesUsed) { - oprot.writeString(_iter736); + oprot.writeString(_iter744); } } BitSet optionals = new BitSet(); @@ -928,13 +928,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CreationMetadata str struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TSet _set737 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tablesUsed = new HashSet(2*_set737.size); - String _elem738; - for (int _i739 = 0; _i739 < _set737.size; ++_i739) + org.apache.thrift.protocol.TSet _set745 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tablesUsed = new HashSet(2*_set745.size); + String _elem746; + for (int _i747 = 0; _i747 < _set745.size; ++_i747) { - _elem738 = iprot.readString(); - struct.tablesUsed.add(_elem738); + _elem746 = iprot.readString(); + struct.tablesUsed.add(_elem746); } } struct.setTablesUsedIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java index 42073db544..3fa764bb29 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, FindSchemasByColsRe case 1: // SCHEMA_VERSIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list960 = iprot.readListBegin(); - struct.schemaVersions = new ArrayList(_list960.size); - SchemaVersionDescriptor _elem961; - for (int _i962 = 0; _i962 < _list960.size; ++_i962) + org.apache.thrift.protocol.TList _list968 = iprot.readListBegin(); + struct.schemaVersions = new ArrayList(_list968.size); + SchemaVersionDescriptor _elem969; + for (int _i970 = 0; _i970 < _list968.size; ++_i970) { - _elem961 = new SchemaVersionDescriptor(); - _elem961.read(iprot); - struct.schemaVersions.add(_elem961); + _elem969 = new SchemaVersionDescriptor(); + _elem969.read(iprot); + struct.schemaVersions.add(_elem969); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, FindSchemasByColsR oprot.writeFieldBegin(SCHEMA_VERSIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.schemaVersions.size())); - for (SchemaVersionDescriptor _iter963 : struct.schemaVersions) + for (SchemaVersionDescriptor _iter971 : struct.schemaVersions) { - _iter963.write(oprot); + _iter971.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsRe if (struct.isSetSchemaVersions()) { { oprot.writeI32(struct.schemaVersions.size()); - for (SchemaVersionDescriptor _iter964 : struct.schemaVersions) + for (SchemaVersionDescriptor _iter972 : struct.schemaVersions) { - _iter964.write(oprot); + _iter972.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsRes BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list965 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.schemaVersions = new ArrayList(_list965.size); - SchemaVersionDescriptor _elem966; - for (int _i967 = 0; _i967 < _list965.size; ++_i967) + org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.schemaVersions = new ArrayList(_list973.size); + SchemaVersionDescriptor _elem974; + for (int _i975 = 0; _i975 < _list973.size; ++_i975) { - _elem966 = new SchemaVersionDescriptor(); - _elem966.read(iprot); - struct.schemaVersions.add(_elem966); + _elem974 = new SchemaVersionDescriptor(); + _elem974.read(iprot); + struct.schemaVersions.add(_elem974); } } struct.setSchemaVersionsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java index dd6658d636..bb79057cf2 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java @@ -794,13 +794,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, FireEventRequest st case 5: // PARTITION_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list780 = iprot.readListBegin(); - struct.partitionVals = new ArrayList(_list780.size); - String _elem781; - for (int _i782 = 0; _i782 < _list780.size; ++_i782) + org.apache.thrift.protocol.TList _list788 = iprot.readListBegin(); + struct.partitionVals = new ArrayList(_list788.size); + String _elem789; + for (int _i790 = 0; _i790 < _list788.size; ++_i790) { - _elem781 = iprot.readString(); - struct.partitionVals.add(_elem781); + _elem789 = iprot.readString(); + struct.partitionVals.add(_elem789); } iprot.readListEnd(); } @@ -857,9 +857,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, FireEventRequest s oprot.writeFieldBegin(PARTITION_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionVals.size())); - for (String _iter783 : struct.partitionVals) + for (String _iter791 : struct.partitionVals) { - oprot.writeString(_iter783); + oprot.writeString(_iter791); } oprot.writeListEnd(); } @@ -915,9 +915,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FireEventRequest st if (struct.isSetPartitionVals()) { { oprot.writeI32(struct.partitionVals.size()); - for (String _iter784 : struct.partitionVals) + for (String _iter792 : struct.partitionVals) { - oprot.writeString(_iter784); + oprot.writeString(_iter792); } } } @@ -945,13 +945,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FireEventRequest str } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionVals = new ArrayList(_list785.size); - String _elem786; - for (int _i787 = 0; _i787 < _list785.size; ++_i787) + org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionVals = new ArrayList(_list793.size); + String _elem794; + for (int _i795 = 0; _i795 < _list793.size; ++_i795) { - _elem786 = iprot.readString(); - struct.partitionVals.add(_elem786); + _elem794 = iprot.readString(); + struct.partitionVals.add(_elem794); } } struct.setPartitionValsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java index 68146e4561..22ec476ad8 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetAllFunctionsResp case 1: // FUNCTIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list856 = iprot.readListBegin(); - struct.functions = new ArrayList(_list856.size); - Function _elem857; - for (int _i858 = 0; _i858 < _list856.size; ++_i858) + org.apache.thrift.protocol.TList _list864 = iprot.readListBegin(); + struct.functions = new ArrayList(_list864.size); + Function _elem865; + for (int _i866 = 0; _i866 < _list864.size; ++_i866) { - _elem857 = new Function(); - _elem857.read(iprot); - struct.functions.add(_elem857); + _elem865 = new Function(); + _elem865.read(iprot); + struct.functions.add(_elem865); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetAllFunctionsRes oprot.writeFieldBegin(FUNCTIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.functions.size())); - for (Function _iter859 : struct.functions) + for (Function _iter867 : struct.functions) { - _iter859.write(oprot); + _iter867.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsResp if (struct.isSetFunctions()) { { oprot.writeI32(struct.functions.size()); - for (Function _iter860 : struct.functions) + for (Function _iter868 : struct.functions) { - _iter860.write(oprot); + _iter868.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsRespo BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list861 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.functions = new ArrayList(_list861.size); - Function _elem862; - for (int _i863 = 0; _i863 < _list861.size; ++_i863) + org.apache.thrift.protocol.TList _list869 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.functions = new ArrayList(_list869.size); + Function _elem870; + for (int _i871 = 0; _i871 < _list869.size; ++_i871) { - _elem862 = new Function(); - _elem862.read(iprot); - struct.functions.add(_elem862); + _elem870 = new Function(); + _elem870.read(iprot); + struct.functions.add(_elem870); } } struct.setFunctionsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java index ee535a0c80..f23838c3c9 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java @@ -619,13 +619,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list806 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list806.size); - long _elem807; - for (int _i808 = 0; _i808 < _list806.size; ++_i808) + org.apache.thrift.protocol.TList _list814 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list814.size); + long _elem815; + for (int _i816 = 0; _i816 < _list814.size; ++_i816) { - _elem807 = iprot.readI64(); - struct.fileIds.add(_elem807); + _elem815 = iprot.readI64(); + struct.fileIds.add(_elem815); } iprot.readListEnd(); } @@ -675,9 +675,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter809 : struct.fileIds) + for (long _iter817 : struct.fileIds) { - oprot.writeI64(_iter809); + oprot.writeI64(_iter817); } oprot.writeListEnd(); } @@ -719,9 +719,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter810 : struct.fileIds) + for (long _iter818 : struct.fileIds) { - oprot.writeI64(_iter810); + oprot.writeI64(_iter818); } } oprot.writeBinary(struct.expr); @@ -745,13 +745,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list811 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list811.size); - long _elem812; - for (int _i813 = 0; _i813 < _list811.size; ++_i813) + org.apache.thrift.protocol.TList _list819 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list819.size); + long _elem820; + for (int _i821 = 0; _i821 < _list819.size; ++_i821) { - _elem812 = iprot.readI64(); - struct.fileIds.add(_elem812); + _elem820 = iprot.readI64(); + struct.fileIds.add(_elem820); } } struct.setFileIdsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java index 71e92b6c03..5a662bb959 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java @@ -444,16 +444,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map796 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map796.size); - long _key797; - MetadataPpdResult _val798; - for (int _i799 = 0; _i799 < _map796.size; ++_i799) + org.apache.thrift.protocol.TMap _map804 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map804.size); + long _key805; + MetadataPpdResult _val806; + for (int _i807 = 0; _i807 < _map804.size; ++_i807) { - _key797 = iprot.readI64(); - _val798 = new MetadataPpdResult(); - _val798.read(iprot); - struct.metadata.put(_key797, _val798); + _key805 = iprot.readI64(); + _val806 = new MetadataPpdResult(); + _val806.read(iprot); + struct.metadata.put(_key805, _val806); } iprot.readMapEnd(); } @@ -487,10 +487,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, struct.metadata.size())); - for (Map.Entry _iter800 : struct.metadata.entrySet()) + for (Map.Entry _iter808 : struct.metadata.entrySet()) { - oprot.writeI64(_iter800.getKey()); - _iter800.getValue().write(oprot); + oprot.writeI64(_iter808.getKey()); + _iter808.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -518,10 +518,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter801 : struct.metadata.entrySet()) + for (Map.Entry _iter809 : struct.metadata.entrySet()) { - oprot.writeI64(_iter801.getKey()); - _iter801.getValue().write(oprot); + oprot.writeI64(_iter809.getKey()); + _iter809.getValue().write(oprot); } } oprot.writeBool(struct.isSupported); @@ -531,16 +531,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map802 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.metadata = new HashMap(2*_map802.size); - long _key803; - MetadataPpdResult _val804; - for (int _i805 = 0; _i805 < _map802.size; ++_i805) + org.apache.thrift.protocol.TMap _map810 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.metadata = new HashMap(2*_map810.size); + long _key811; + MetadataPpdResult _val812; + for (int _i813 = 0; _i813 < _map810.size; ++_i813) { - _key803 = iprot.readI64(); - _val804 = new MetadataPpdResult(); - _val804.read(iprot); - struct.metadata.put(_key803, _val804); + _key811 = iprot.readI64(); + _val812 = new MetadataPpdResult(); + _val812.read(iprot); + struct.metadata.put(_key811, _val812); } } struct.setMetadataIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java index 0ea6ef5fb3..378e87756e 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list824 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list824.size); - long _elem825; - for (int _i826 = 0; _i826 < _list824.size; ++_i826) + org.apache.thrift.protocol.TList _list832 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list832.size); + long _elem833; + for (int _i834 = 0; _i834 < _list832.size; ++_i834) { - _elem825 = iprot.readI64(); - struct.fileIds.add(_elem825); + _elem833 = iprot.readI64(); + struct.fileIds.add(_elem833); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter827 : struct.fileIds) + for (long _iter835 : struct.fileIds) { - oprot.writeI64(_iter827); + oprot.writeI64(_iter835); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter828 : struct.fileIds) + for (long _iter836 : struct.fileIds) { - oprot.writeI64(_iter828); + oprot.writeI64(_iter836); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list829 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list829.size); - long _elem830; - for (int _i831 = 0; _i831 < _list829.size; ++_i831) + org.apache.thrift.protocol.TList _list837 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list837.size); + long _elem838; + for (int _i839 = 0; _i839 < _list837.size; ++_i839) { - _elem830 = iprot.readI64(); - struct.fileIds.add(_elem830); + _elem838 = iprot.readI64(); + struct.fileIds.add(_elem838); } } struct.setFileIdsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java index 759b495bf6..1892bf9261 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java @@ -433,15 +433,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataResu case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map814 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map814.size); - long _key815; - ByteBuffer _val816; - for (int _i817 = 0; _i817 < _map814.size; ++_i817) + org.apache.thrift.protocol.TMap _map822 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map822.size); + long _key823; + ByteBuffer _val824; + for (int _i825 = 0; _i825 < _map822.size; ++_i825) { - _key815 = iprot.readI64(); - _val816 = iprot.readBinary(); - struct.metadata.put(_key815, _val816); + _key823 = iprot.readI64(); + _val824 = iprot.readBinary(); + struct.metadata.put(_key823, _val824); } iprot.readMapEnd(); } @@ -475,10 +475,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataRes oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (Map.Entry _iter818 : struct.metadata.entrySet()) + for (Map.Entry _iter826 : struct.metadata.entrySet()) { - oprot.writeI64(_iter818.getKey()); - oprot.writeBinary(_iter818.getValue()); + oprot.writeI64(_iter826.getKey()); + oprot.writeBinary(_iter826.getValue()); } oprot.writeMapEnd(); } @@ -506,10 +506,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter819 : struct.metadata.entrySet()) + for (Map.Entry _iter827 : struct.metadata.entrySet()) { - oprot.writeI64(_iter819.getKey()); - oprot.writeBinary(_iter819.getValue()); + oprot.writeI64(_iter827.getKey()); + oprot.writeBinary(_iter827.getValue()); } } oprot.writeBool(struct.isSupported); @@ -519,15 +519,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map820 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new HashMap(2*_map820.size); - long _key821; - ByteBuffer _val822; - for (int _i823 = 0; _i823 < _map820.size; ++_i823) + org.apache.thrift.protocol.TMap _map828 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new HashMap(2*_map828.size); + long _key829; + ByteBuffer _val830; + for (int _i831 = 0; _i831 < _map828.size; ++_i831) { - _key821 = iprot.readI64(); - _val822 = iprot.readBinary(); - struct.metadata.put(_key821, _val822); + _key829 = iprot.readI64(); + _val830 = iprot.readBinary(); + struct.metadata.put(_key829, _val830); } } struct.setMetadataIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java index b5a2b68efd..d64ab62234 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java @@ -444,13 +444,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsFilter case 8: // FILTERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list992 = iprot.readListBegin(); - struct.filters = new ArrayList(_list992.size); - String _elem993; - for (int _i994 = 0; _i994 < _list992.size; ++_i994) + org.apache.thrift.protocol.TList _list1000 = iprot.readListBegin(); + struct.filters = new ArrayList(_list1000.size); + String _elem1001; + for (int _i1002 = 0; _i1002 < _list1000.size; ++_i1002) { - _elem993 = iprot.readString(); - struct.filters.add(_elem993); + _elem1001 = iprot.readString(); + struct.filters.add(_elem1001); } iprot.readListEnd(); } @@ -484,9 +484,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsFilte oprot.writeFieldBegin(FILTERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filters.size())); - for (String _iter995 : struct.filters) + for (String _iter1003 : struct.filters) { - oprot.writeString(_iter995); + oprot.writeString(_iter1003); } oprot.writeListEnd(); } @@ -524,9 +524,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsFilter if (struct.isSetFilters()) { { oprot.writeI32(struct.filters.size()); - for (String _iter996 : struct.filters) + for (String _iter1004 : struct.filters) { - oprot.writeString(_iter996); + oprot.writeString(_iter1004); } } } @@ -542,13 +542,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsFilterS } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list997 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filters = new ArrayList(_list997.size); - String _elem998; - for (int _i999 = 0; _i999 < _list997.size; ++_i999) + org.apache.thrift.protocol.TList _list1005 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filters = new ArrayList(_list1005.size); + String _elem1006; + for (int _i1007 = 0; _i1007 < _list1005.size; ++_i1007) { - _elem998 = iprot.readString(); - struct.filters.add(_elem998); + _elem1006 = iprot.readString(); + struct.filters.add(_elem1006); } } struct.setFiltersIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectionSpec.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectionSpec.java index e6c9c06beb..844e17e49f 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectionSpec.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectionSpec.java @@ -509,13 +509,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsProjec case 1: // FIELD_LIST if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list984 = iprot.readListBegin(); - struct.fieldList = new ArrayList(_list984.size); - String _elem985; - for (int _i986 = 0; _i986 < _list984.size; ++_i986) + org.apache.thrift.protocol.TList _list992 = iprot.readListBegin(); + struct.fieldList = new ArrayList(_list992.size); + String _elem993; + for (int _i994 = 0; _i994 < _list992.size; ++_i994) { - _elem985 = iprot.readString(); - struct.fieldList.add(_elem985); + _elem993 = iprot.readString(); + struct.fieldList.add(_elem993); } iprot.readListEnd(); } @@ -557,9 +557,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsProje oprot.writeFieldBegin(FIELD_LIST_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.fieldList.size())); - for (String _iter987 : struct.fieldList) + for (String _iter995 : struct.fieldList) { - oprot.writeString(_iter987); + oprot.writeString(_iter995); } oprot.writeListEnd(); } @@ -606,9 +606,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsProjec if (struct.isSetFieldList()) { { oprot.writeI32(struct.fieldList.size()); - for (String _iter988 : struct.fieldList) + for (String _iter996 : struct.fieldList) { - oprot.writeString(_iter988); + oprot.writeString(_iter996); } } } @@ -626,13 +626,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsProject BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list989 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.fieldList = new ArrayList(_list989.size); - String _elem990; - for (int _i991 = 0; _i991 < _list989.size; ++_i991) + org.apache.thrift.protocol.TList _list997 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.fieldList = new ArrayList(_list997.size); + String _elem998; + for (int _i999 = 0; _i999 < _list997.size; ++_i999) { - _elem990 = iprot.readString(); - struct.fieldList.add(_elem990); + _elem998 = iprot.readString(); + struct.fieldList.add(_elem998); } } struct.setFieldListIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java index 7ec107ea6c..54b77be98e 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java @@ -960,13 +960,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsReques case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1008 = iprot.readListBegin(); - struct.groupNames = new ArrayList(_list1008.size); - String _elem1009; - for (int _i1010 = 0; _i1010 < _list1008.size; ++_i1010) + org.apache.thrift.protocol.TList _list1016 = iprot.readListBegin(); + struct.groupNames = new ArrayList(_list1016.size); + String _elem1017; + for (int _i1018 = 0; _i1018 < _list1016.size; ++_i1018) { - _elem1009 = iprot.readString(); - struct.groupNames.add(_elem1009); + _elem1017 = iprot.readString(); + struct.groupNames.add(_elem1017); } iprot.readListEnd(); } @@ -1040,9 +1040,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsReque oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.groupNames.size())); - for (String _iter1011 : struct.groupNames) + for (String _iter1019 : struct.groupNames) { - oprot.writeString(_iter1011); + oprot.writeString(_iter1019); } oprot.writeListEnd(); } @@ -1120,9 +1120,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsReques if (struct.isSetGroupNames()) { { oprot.writeI32(struct.groupNames.size()); - for (String _iter1012 : struct.groupNames) + for (String _iter1020 : struct.groupNames) { - oprot.writeString(_iter1012); + oprot.writeString(_iter1020); } } } @@ -1160,13 +1160,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRequest } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1013 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.groupNames = new ArrayList(_list1013.size); - String _elem1014; - for (int _i1015 = 0; _i1015 < _list1013.size; ++_i1015) + org.apache.thrift.protocol.TList _list1021 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.groupNames = new ArrayList(_list1021.size); + String _elem1022; + for (int _i1023 = 0; _i1023 < _list1021.size; ++_i1023) { - _elem1014 = iprot.readString(); - struct.groupNames.add(_elem1014); + _elem1022 = iprot.readString(); + struct.groupNames.add(_elem1022); } } struct.setGroupNamesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java index faac848991..fbbc627f8c 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsRespon case 1: // PARTITION_SPEC if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1000 = iprot.readListBegin(); - struct.partitionSpec = new ArrayList(_list1000.size); - PartitionSpec _elem1001; - for (int _i1002 = 0; _i1002 < _list1000.size; ++_i1002) + org.apache.thrift.protocol.TList _list1008 = iprot.readListBegin(); + struct.partitionSpec = new ArrayList(_list1008.size); + PartitionSpec _elem1009; + for (int _i1010 = 0; _i1010 < _list1008.size; ++_i1010) { - _elem1001 = new PartitionSpec(); - _elem1001.read(iprot); - struct.partitionSpec.add(_elem1001); + _elem1009 = new PartitionSpec(); + _elem1009.read(iprot); + struct.partitionSpec.add(_elem1009); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsRespo oprot.writeFieldBegin(PARTITION_SPEC_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionSpec.size())); - for (PartitionSpec _iter1003 : struct.partitionSpec) + for (PartitionSpec _iter1011 : struct.partitionSpec) { - _iter1003.write(oprot); + _iter1011.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRespon if (struct.isSetPartitionSpec()) { { oprot.writeI32(struct.partitionSpec.size()); - for (PartitionSpec _iter1004 : struct.partitionSpec) + for (PartitionSpec _iter1012 : struct.partitionSpec) { - _iter1004.write(oprot); + _iter1012.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRespons BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1005 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitionSpec = new ArrayList(_list1005.size); - PartitionSpec _elem1006; - for (int _i1007 = 0; _i1007 < _list1005.size; ++_i1007) + org.apache.thrift.protocol.TList _list1013 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionSpec = new ArrayList(_list1013.size); + PartitionSpec _elem1014; + for (int _i1015 = 0; _i1015 < _list1013.size; ++_i1015) { - _elem1006 = new PartitionSpec(); - _elem1006.read(iprot); - struct.partitionSpec.add(_elem1006); + _elem1014 = new PartitionSpec(); + _elem1014.read(iprot); + struct.partitionSpec.add(_elem1014); } } struct.setPartitionSpecIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java index da361572e5..4989d0bdca 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java @@ -606,13 +606,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesRequest st case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list872 = iprot.readListBegin(); - struct.tblNames = new ArrayList(_list872.size); - String _elem873; - for (int _i874 = 0; _i874 < _list872.size; ++_i874) + org.apache.thrift.protocol.TList _list880 = iprot.readListBegin(); + struct.tblNames = new ArrayList(_list880.size); + String _elem881; + for (int _i882 = 0; _i882 < _list880.size; ++_i882) { - _elem873 = iprot.readString(); - struct.tblNames.add(_elem873); + _elem881 = iprot.readString(); + struct.tblNames.add(_elem881); } iprot.readListEnd(); } @@ -661,9 +661,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesRequest s oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tblNames.size())); - for (String _iter875 : struct.tblNames) + for (String _iter883 : struct.tblNames) { - oprot.writeString(_iter875); + oprot.writeString(_iter883); } oprot.writeListEnd(); } @@ -716,9 +716,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest st if (struct.isSetTblNames()) { { oprot.writeI32(struct.tblNames.size()); - for (String _iter876 : struct.tblNames) + for (String _iter884 : struct.tblNames) { - oprot.writeString(_iter876); + oprot.writeString(_iter884); } } } @@ -738,13 +738,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest str BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list877 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tblNames = new ArrayList(_list877.size); - String _elem878; - for (int _i879 = 0; _i879 < _list877.size; ++_i879) + org.apache.thrift.protocol.TList _list885 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tblNames = new ArrayList(_list885.size); + String _elem886; + for (int _i887 = 0; _i887 < _list885.size; ++_i887) { - _elem878 = iprot.readString(); - struct.tblNames.add(_elem878); + _elem886 = iprot.readString(); + struct.tblNames.add(_elem886); } } struct.setTblNamesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java index b3cfc88b34..1e5cb8c03a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesResult str case 1: // TABLES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list880 = iprot.readListBegin(); - struct.tables = new ArrayList(_list880.size); - Table _elem881; - for (int _i882 = 0; _i882 < _list880.size; ++_i882) + org.apache.thrift.protocol.TList _list888 = iprot.readListBegin(); + struct.tables = new ArrayList
(_list888.size); + Table _elem889; + for (int _i890 = 0; _i890 < _list888.size; ++_i890) { - _elem881 = new Table(); - _elem881.read(iprot); - struct.tables.add(_elem881); + _elem889 = new Table(); + _elem889.read(iprot); + struct.tables.add(_elem889); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesResult st oprot.writeFieldBegin(TABLES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tables.size())); - for (Table _iter883 : struct.tables) + for (Table _iter891 : struct.tables) { - _iter883.write(oprot); + _iter891.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult str TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tables.size()); - for (Table _iter884 : struct.tables) + for (Table _iter892 : struct.tables) { - _iter884.write(oprot); + _iter892.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult str public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list885 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tables = new ArrayList
(_list885.size); - Table _elem886; - for (int _i887 = 0; _i887 < _list885.size; ++_i887) + org.apache.thrift.protocol.TList _list893 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tables = new ArrayList
(_list893.size); + Table _elem894; + for (int _i895 = 0; _i895 < _list893.size; ++_i895) { - _elem886 = new Table(); - _elem886.read(iprot); - struct.tables.add(_elem886); + _elem894 = new Table(); + _elem894.read(iprot); + struct.tables.add(_elem894); } } struct.setTablesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java index f1ba64348e..a4f4be472c 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java @@ -636,13 +636,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 2: // FILES_ADDED if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list756 = iprot.readListBegin(); - struct.filesAdded = new ArrayList(_list756.size); - String _elem757; - for (int _i758 = 0; _i758 < _list756.size; ++_i758) + org.apache.thrift.protocol.TList _list764 = iprot.readListBegin(); + struct.filesAdded = new ArrayList(_list764.size); + String _elem765; + for (int _i766 = 0; _i766 < _list764.size; ++_i766) { - _elem757 = iprot.readString(); - struct.filesAdded.add(_elem757); + _elem765 = iprot.readString(); + struct.filesAdded.add(_elem765); } iprot.readListEnd(); } @@ -654,13 +654,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 3: // FILES_ADDED_CHECKSUM if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list759 = iprot.readListBegin(); - struct.filesAddedChecksum = new ArrayList(_list759.size); - String _elem760; - for (int _i761 = 0; _i761 < _list759.size; ++_i761) + org.apache.thrift.protocol.TList _list767 = iprot.readListBegin(); + struct.filesAddedChecksum = new ArrayList(_list767.size); + String _elem768; + for (int _i769 = 0; _i769 < _list767.size; ++_i769) { - _elem760 = iprot.readString(); - struct.filesAddedChecksum.add(_elem760); + _elem768 = iprot.readString(); + struct.filesAddedChecksum.add(_elem768); } iprot.readListEnd(); } @@ -672,13 +672,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 4: // SUB_DIRECTORY_LIST if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list762 = iprot.readListBegin(); - struct.subDirectoryList = new ArrayList(_list762.size); - String _elem763; - for (int _i764 = 0; _i764 < _list762.size; ++_i764) + org.apache.thrift.protocol.TList _list770 = iprot.readListBegin(); + struct.subDirectoryList = new ArrayList(_list770.size); + String _elem771; + for (int _i772 = 0; _i772 < _list770.size; ++_i772) { - _elem763 = iprot.readString(); - struct.subDirectoryList.add(_elem763); + _elem771 = iprot.readString(); + struct.subDirectoryList.add(_elem771); } iprot.readListEnd(); } @@ -709,9 +709,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAdded.size())); - for (String _iter765 : struct.filesAdded) + for (String _iter773 : struct.filesAdded) { - oprot.writeString(_iter765); + oprot.writeString(_iter773); } oprot.writeListEnd(); } @@ -722,9 +722,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_CHECKSUM_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAddedChecksum.size())); - for (String _iter766 : struct.filesAddedChecksum) + for (String _iter774 : struct.filesAddedChecksum) { - oprot.writeString(_iter766); + oprot.writeString(_iter774); } oprot.writeListEnd(); } @@ -736,9 +736,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(SUB_DIRECTORY_LIST_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.subDirectoryList.size())); - for (String _iter767 : struct.subDirectoryList) + for (String _iter775 : struct.subDirectoryList) { - oprot.writeString(_iter767); + oprot.writeString(_iter775); } oprot.writeListEnd(); } @@ -764,9 +764,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.filesAdded.size()); - for (String _iter768 : struct.filesAdded) + for (String _iter776 : struct.filesAdded) { - oprot.writeString(_iter768); + oprot.writeString(_iter776); } } BitSet optionals = new BitSet(); @@ -786,18 +786,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD if (struct.isSetFilesAddedChecksum()) { { oprot.writeI32(struct.filesAddedChecksum.size()); - for (String _iter769 : struct.filesAddedChecksum) + for (String _iter777 : struct.filesAddedChecksum) { - oprot.writeString(_iter769); + oprot.writeString(_iter777); } } } if (struct.isSetSubDirectoryList()) { { oprot.writeI32(struct.subDirectoryList.size()); - for (String _iter770 : struct.subDirectoryList) + for (String _iter778 : struct.subDirectoryList) { - oprot.writeString(_iter770); + oprot.writeString(_iter778); } } } @@ -807,13 +807,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestData struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list771 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAdded = new ArrayList(_list771.size); - String _elem772; - for (int _i773 = 0; _i773 < _list771.size; ++_i773) + org.apache.thrift.protocol.TList _list779 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAdded = new ArrayList(_list779.size); + String _elem780; + for (int _i781 = 0; _i781 < _list779.size; ++_i781) { - _elem772 = iprot.readString(); - struct.filesAdded.add(_elem772); + _elem780 = iprot.readString(); + struct.filesAdded.add(_elem780); } } struct.setFilesAddedIsSet(true); @@ -824,26 +824,26 @@ public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestDa } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list774 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAddedChecksum = new ArrayList(_list774.size); - String _elem775; - for (int _i776 = 0; _i776 < _list774.size; ++_i776) + org.apache.thrift.protocol.TList _list782 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAddedChecksum = new ArrayList(_list782.size); + String _elem783; + for (int _i784 = 0; _i784 < _list782.size; ++_i784) { - _elem775 = iprot.readString(); - struct.filesAddedChecksum.add(_elem775); + _elem783 = iprot.readString(); + struct.filesAddedChecksum.add(_elem783); } } struct.setFilesAddedChecksumIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.subDirectoryList = new ArrayList(_list777.size); - String _elem778; - for (int _i779 = 0; _i779 < _list777.size; ++_i779) + org.apache.thrift.protocol.TList _list785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.subDirectoryList = new ArrayList(_list785.size); + String _elem786; + for (int _i787 = 0; _i787 < _list785.size; ++_i787) { - _elem778 = iprot.readString(); - struct.subDirectoryList.add(_elem778); + _elem786 = iprot.readString(); + struct.subDirectoryList.add(_elem786); } } struct.setSubDirectoryListIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java index 288c365950..420ed6dedb 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java @@ -525,13 +525,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventRe case 3: // EVENT_TYPE_SKIP_LIST if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list740 = iprot.readListBegin(); - struct.eventTypeSkipList = new ArrayList(_list740.size); - String _elem741; - for (int _i742 = 0; _i742 < _list740.size; ++_i742) + org.apache.thrift.protocol.TList _list748 = iprot.readListBegin(); + struct.eventTypeSkipList = new ArrayList(_list748.size); + String _elem749; + for (int _i750 = 0; _i750 < _list748.size; ++_i750) { - _elem741 = iprot.readString(); - struct.eventTypeSkipList.add(_elem741); + _elem749 = iprot.readString(); + struct.eventTypeSkipList.add(_elem749); } iprot.readListEnd(); } @@ -566,9 +566,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEventR oprot.writeFieldBegin(EVENT_TYPE_SKIP_LIST_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.eventTypeSkipList.size())); - for (String _iter743 : struct.eventTypeSkipList) + for (String _iter751 : struct.eventTypeSkipList) { - oprot.writeString(_iter743); + oprot.writeString(_iter751); } oprot.writeListEnd(); } @@ -607,9 +607,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe if (struct.isSetEventTypeSkipList()) { { oprot.writeI32(struct.eventTypeSkipList.size()); - for (String _iter744 : struct.eventTypeSkipList) + for (String _iter752 : struct.eventTypeSkipList) { - oprot.writeString(_iter744); + oprot.writeString(_iter752); } } } @@ -627,13 +627,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventReq } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list745 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.eventTypeSkipList = new ArrayList(_list745.size); - String _elem746; - for (int _i747 = 0; _i747 < _list745.size; ++_i747) + org.apache.thrift.protocol.TList _list753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.eventTypeSkipList = new ArrayList(_list753.size); + String _elem754; + for (int _i755 = 0; _i755 < _list753.size; ++_i755) { - _elem746 = iprot.readString(); - struct.eventTypeSkipList.add(_elem746); + _elem754 = iprot.readString(); + struct.eventTypeSkipList.add(_elem754); } } struct.setEventTypeSkipListIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java index b86f038c1e..f924d820ea 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventRe case 1: // EVENTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list748 = iprot.readListBegin(); - struct.events = new ArrayList(_list748.size); - NotificationEvent _elem749; - for (int _i750 = 0; _i750 < _list748.size; ++_i750) + org.apache.thrift.protocol.TList _list756 = iprot.readListBegin(); + struct.events = new ArrayList(_list756.size); + NotificationEvent _elem757; + for (int _i758 = 0; _i758 < _list756.size; ++_i758) { - _elem749 = new NotificationEvent(); - _elem749.read(iprot); - struct.events.add(_elem749); + _elem757 = new NotificationEvent(); + _elem757.read(iprot); + struct.events.add(_elem757); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEventR oprot.writeFieldBegin(EVENTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.events.size())); - for (NotificationEvent _iter751 : struct.events) + for (NotificationEvent _iter759 : struct.events) { - _iter751.write(oprot); + _iter759.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.events.size()); - for (NotificationEvent _iter752 : struct.events) + for (NotificationEvent _iter760 : struct.events) { - _iter752.write(oprot); + _iter760.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.events = new ArrayList(_list753.size); - NotificationEvent _elem754; - for (int _i755 = 0; _i755 < _list753.size; ++_i755) + org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.events = new ArrayList(_list761.size); + NotificationEvent _elem762; + for (int _i763 = 0; _i763 < _list761.size; ++_i763) { - _elem754 = new NotificationEvent(); - _elem754.read(iprot); - struct.events.add(_elem754); + _elem762 = new NotificationEvent(); + _elem762.read(iprot); + struct.events.add(_elem762); } } struct.setEventsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java index 5cbfe64945..d9df4ff993 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java @@ -547,13 +547,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list832 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list832.size); - long _elem833; - for (int _i834 = 0; _i834 < _list832.size; ++_i834) + org.apache.thrift.protocol.TList _list840 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list840.size); + long _elem841; + for (int _i842 = 0; _i842 < _list840.size; ++_i842) { - _elem833 = iprot.readI64(); - struct.fileIds.add(_elem833); + _elem841 = iprot.readI64(); + struct.fileIds.add(_elem841); } iprot.readListEnd(); } @@ -565,13 +565,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 2: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list835 = iprot.readListBegin(); - struct.metadata = new ArrayList(_list835.size); - ByteBuffer _elem836; - for (int _i837 = 0; _i837 < _list835.size; ++_i837) + org.apache.thrift.protocol.TList _list843 = iprot.readListBegin(); + struct.metadata = new ArrayList(_list843.size); + ByteBuffer _elem844; + for (int _i845 = 0; _i845 < _list843.size; ++_i845) { - _elem836 = iprot.readBinary(); - struct.metadata.add(_elem836); + _elem844 = iprot.readBinary(); + struct.metadata.add(_elem844); } iprot.readListEnd(); } @@ -605,9 +605,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter838 : struct.fileIds) + for (long _iter846 : struct.fileIds) { - oprot.writeI64(_iter838); + oprot.writeI64(_iter846); } oprot.writeListEnd(); } @@ -617,9 +617,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (ByteBuffer _iter839 : struct.metadata) + for (ByteBuffer _iter847 : struct.metadata) { - oprot.writeBinary(_iter839); + oprot.writeBinary(_iter847); } oprot.writeListEnd(); } @@ -651,16 +651,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter840 : struct.fileIds) + for (long _iter848 : struct.fileIds) { - oprot.writeI64(_iter840); + oprot.writeI64(_iter848); } } { oprot.writeI32(struct.metadata.size()); - for (ByteBuffer _iter841 : struct.metadata) + for (ByteBuffer _iter849 : struct.metadata) { - oprot.writeBinary(_iter841); + oprot.writeBinary(_iter849); } } BitSet optionals = new BitSet(); @@ -677,24 +677,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list842 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list842.size); - long _elem843; - for (int _i844 = 0; _i844 < _list842.size; ++_i844) + org.apache.thrift.protocol.TList _list850 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list850.size); + long _elem851; + for (int _i852 = 0; _i852 < _list850.size; ++_i852) { - _elem843 = iprot.readI64(); - struct.fileIds.add(_elem843); + _elem851 = iprot.readI64(); + struct.fileIds.add(_elem851); } } struct.setFileIdsIsSet(true); { - org.apache.thrift.protocol.TList _list845 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new ArrayList(_list845.size); - ByteBuffer _elem846; - for (int _i847 = 0; _i847 < _list845.size; ++_i847) + org.apache.thrift.protocol.TList _list853 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new ArrayList(_list853.size); + ByteBuffer _elem854; + for (int _i855 = 0; _i855 < _list853.size; ++_i855) { - _elem846 = iprot.readBinary(); - struct.metadata.add(_elem846); + _elem854 = iprot.readBinary(); + struct.metadata.add(_elem854); } } struct.setMetadataIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java index ea4cc16af5..8364142090 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java @@ -796,13 +796,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, RenamePartitionRequ case 4: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list976 = iprot.readListBegin(); - struct.partVals = new ArrayList(_list976.size); - String _elem977; - for (int _i978 = 0; _i978 < _list976.size; ++_i978) + org.apache.thrift.protocol.TList _list984 = iprot.readListBegin(); + struct.partVals = new ArrayList(_list984.size); + String _elem985; + for (int _i986 = 0; _i986 < _list984.size; ++_i986) { - _elem977 = iprot.readString(); - struct.partVals.add(_elem977); + _elem985 = iprot.readString(); + struct.partVals.add(_elem985); } iprot.readListEnd(); } @@ -862,9 +862,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, RenamePartitionReq oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partVals.size())); - for (String _iter979 : struct.partVals) + for (String _iter987 : struct.partVals) { - oprot.writeString(_iter979); + oprot.writeString(_iter987); } oprot.writeListEnd(); } @@ -903,9 +903,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, RenamePartitionRequ oprot.writeString(struct.tableName); { oprot.writeI32(struct.partVals.size()); - for (String _iter980 : struct.partVals) + for (String _iter988 : struct.partVals) { - oprot.writeString(_iter980); + oprot.writeString(_iter988); } } struct.newPart.write(oprot); @@ -933,13 +933,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, RenamePartitionReque struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); { - org.apache.thrift.protocol.TList _list981 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partVals = new ArrayList(_list981.size); - String _elem982; - for (int _i983 = 0; _i983 < _list981.size; ++_i983) + org.apache.thrift.protocol.TList _list989 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partVals = new ArrayList(_list989.size); + String _elem990; + for (int _i991 = 0; _i991 < _list989.size; ++_i991) { - _elem982 = iprot.readString(); - struct.partVals.add(_elem982); + _elem990 = iprot.readString(); + struct.partVals.add(_elem990); } } struct.setPartValsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java index b87f65f524..fca11df041 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java @@ -1119,14 +1119,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SchemaVersion struc case 4: // COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list952 = iprot.readListBegin(); - struct.cols = new ArrayList(_list952.size); - FieldSchema _elem953; - for (int _i954 = 0; _i954 < _list952.size; ++_i954) + org.apache.thrift.protocol.TList _list960 = iprot.readListBegin(); + struct.cols = new ArrayList(_list960.size); + FieldSchema _elem961; + for (int _i962 = 0; _i962 < _list960.size; ++_i962) { - _elem953 = new FieldSchema(); - _elem953.read(iprot); - struct.cols.add(_elem953); + _elem961 = new FieldSchema(); + _elem961.read(iprot); + struct.cols.add(_elem961); } iprot.readListEnd(); } @@ -1212,9 +1212,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SchemaVersion stru oprot.writeFieldBegin(COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.cols.size())); - for (FieldSchema _iter955 : struct.cols) + for (FieldSchema _iter963 : struct.cols) { - _iter955.write(oprot); + _iter963.write(oprot); } oprot.writeListEnd(); } @@ -1323,9 +1323,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SchemaVersion struc if (struct.isSetCols()) { { oprot.writeI32(struct.cols.size()); - for (FieldSchema _iter956 : struct.cols) + for (FieldSchema _iter964 : struct.cols) { - _iter956.write(oprot); + _iter964.write(oprot); } } } @@ -1368,14 +1368,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SchemaVersion struct } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list957 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.cols = new ArrayList(_list957.size); - FieldSchema _elem958; - for (int _i959 = 0; _i959 < _list957.size; ++_i959) + org.apache.thrift.protocol.TList _list965 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.cols = new ArrayList(_list965.size); + FieldSchema _elem966; + for (int _i967 = 0; _i967 < _list965.size; ++_i967) { - _elem958 = new FieldSchema(); - _elem958.read(iprot); - struct.cols.add(_elem958); + _elem966 = new FieldSchema(); + _elem966.read(iprot); + struct.cols.add(_elem966); } } struct.setColsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java index 2a7b3eba2a..b97e3e3a6b 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowCompactResponse case 1: // COMPACTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list716 = iprot.readListBegin(); - struct.compacts = new ArrayList(_list716.size); - ShowCompactResponseElement _elem717; - for (int _i718 = 0; _i718 < _list716.size; ++_i718) + org.apache.thrift.protocol.TList _list724 = iprot.readListBegin(); + struct.compacts = new ArrayList(_list724.size); + ShowCompactResponseElement _elem725; + for (int _i726 = 0; _i726 < _list724.size; ++_i726) { - _elem717 = new ShowCompactResponseElement(); - _elem717.read(iprot); - struct.compacts.add(_elem717); + _elem725 = new ShowCompactResponseElement(); + _elem725.read(iprot); + struct.compacts.add(_elem725); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowCompactRespons oprot.writeFieldBegin(COMPACTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.compacts.size())); - for (ShowCompactResponseElement _iter719 : struct.compacts) + for (ShowCompactResponseElement _iter727 : struct.compacts) { - _iter719.write(oprot); + _iter727.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9