commit e178a9b9e4cc429208a25ceb6aa59ab27ab10c9c Author: Daniel Dai Date: Sat Aug 3 17:51:22 2019 -0700 HIVE-21637 diff --git a/beeline/pom.xml b/beeline/pom.xml index 19ec53eba6..0bf065d802 100644 --- a/beeline/pom.xml +++ b/beeline/pom.xml @@ -105,6 +105,12 @@ tests test + + org.apache.hive.hcatalog + hive-hcatalog-server-extensions + ${project.version} + test + org.apache.hive hive-service diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java index 5f9d809ab2..6959febf42 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java @@ -333,7 +333,7 @@ public PartitionFiles next() { Partition p = partitionIter.next(); Iterator fileIterator; //For transactional tables, the actual file copy will be done by acid write event during replay of commit txn. - if (!TxnUtils.isTransactionalTable(t)) { + if (!TxnUtils.isTransactionalTable(t) && p.getSd() != null) { List files = Lists.newArrayList(new FileIterator(p.getSd().getLocation())); fileIterator = files.iterator(); } else { @@ -760,7 +760,8 @@ public void onUpdateTableColumnStat(UpdateTableColumnStatEvent updateTableColumn .buildUpdateTableColumnStatMessage(updateTableColumnStatEvent.getColStats(), updateTableColumnStatEvent.getTableObj(), updateTableColumnStatEvent.getTableParameters(), - updateTableColumnStatEvent.getWriteId()); + updateTableColumnStatEvent.getWriteId(), + updateTableColumnStatEvent.getWriteIds()); NotificationEvent event = new NotificationEvent(0, now(), EventType.UPDATE_TABLE_COLUMN_STAT.toString(), msgEncoder.getSerializer().serialize(msg)); ColumnStatisticsDesc statDesc = updateTableColumnStatEvent.getColStats().getStatsDesc(); @@ -790,7 +791,8 @@ public void onUpdatePartitionColumnStat(UpdatePartitionColumnStatEvent updatePar updatePartColStatEvent.getPartVals(), updatePartColStatEvent.getPartParameters(), updatePartColStatEvent.getTableObj(), - updatePartColStatEvent.getWriteId()); + updatePartColStatEvent.getWriteId(), + updatePartColStatEvent.getWriteIds()); NotificationEvent event = new NotificationEvent(0, now(), EventType.UPDATE_PARTITION_COLUMN_STAT.toString(), msgEncoder.getSerializer().serialize(msg)); ColumnStatisticsDesc statDesc = updatePartColStatEvent.getPartColStats().getStatsDesc(); diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java index efafe0c641..afa17613fa 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java @@ -208,7 +208,7 @@ public void onCreateTable(CreateTableEvent tableEvent) throws MetaException { Configuration conf = handler.getConf(); Table newTbl; try { - newTbl = handler.get_table_core(tbl.getCatName(), tbl.getDbName(), tbl.getTableName()) + newTbl = handler.get_table_core(tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), null) .deepCopy(); newTbl.getParameters().put( HCatConstants.HCAT_MSGBUS_TOPIC_NAME, diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java index bc67d03078..68612f449d 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java @@ -391,16 +391,16 @@ public void testNoBuckets() throws Exception { Assert.assertEquals("", 0, BucketCodec.determineVersion(536870912).decodeWriterId(536870912)); rs = queryTable(driver,"select ROW__ID, a, b, INPUT__FILE__NAME from default.streamingnobuckets order by ROW__ID"); - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/delta_0000001_0000001_0000/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\ta1\tb2")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); - Assert.assertTrue(rs.get(4), rs.get(4).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\ta7\tb8")); - Assert.assertTrue(rs.get(4), rs.get(4).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/delta_0000002_0000002_0000/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\ta1\tb2")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/delta_0000003_0000004/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/delta_0000003_0000004/bucket_00000")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/delta_0000003_0000004/bucket_00000")); + Assert.assertTrue(rs.get(4), rs.get(4).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":1}\ta7\tb8")); + Assert.assertTrue(rs.get(4), rs.get(4).endsWith("streamingnobuckets/delta_0000003_0000004/bucket_00000")); queryTable(driver, "update default.streamingnobuckets set a=0, b=0 where a='a7'"); queryTable(driver, "delete from default.streamingnobuckets where a='a1'"); @@ -415,14 +415,14 @@ public void testNoBuckets() throws Exception { runWorker(conf); rs = queryTable(driver,"select ROW__ID, a, b, INPUT__FILE__NAME from default.streamingnobuckets order by ROW__ID"); - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/base_0000005_v0000025/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/base_0000005_v0000025/bucket_00000")); - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/base_0000005_v0000025/bucket_00000")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t0\t0")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/base_0000005_v0000025/bucket_00000")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/base_0000006_v0000025/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/base_0000006_v0000025/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/base_0000006_v0000025/bucket_00000")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":5,\"bucketid\":536870912,\"rowid\":0}\t0\t0")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/base_0000006_v0000025/bucket_00000")); } /** @@ -908,7 +908,7 @@ private void testTransactionBatchCommit_Delimited(UserGroupInformation ugi) thro txnBatch.write("1,Hello streaming".getBytes()); txnBatch.commit(); - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}"); Assert.assertEquals(TransactionBatch.TxnState.COMMITTED , txnBatch.getCurrentTransactionState()); @@ -920,11 +920,11 @@ private void testTransactionBatchCommit_Delimited(UserGroupInformation ugi) thro txnBatch.write("2,Welcome to streaming".getBytes()); // data should not be visible - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}"); txnBatch.commit(); - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}", + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}", "{2, Welcome to streaming}"); txnBatch.close(); @@ -976,7 +976,7 @@ private void testTransactionBatchCommit_Regex(UserGroupInformation ugi) throws E txnBatch.write("1,Hello streaming".getBytes()); txnBatch.commit(); - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}"); Assert.assertEquals(TransactionBatch.TxnState.COMMITTED , txnBatch.getCurrentTransactionState()); @@ -988,11 +988,11 @@ private void testTransactionBatchCommit_Regex(UserGroupInformation ugi) throws E txnBatch.write("2,Welcome to streaming".getBytes()); // data should not be visible - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}"); txnBatch.commit(); - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}", + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}", "{2, Welcome to streaming}"); txnBatch.close(); @@ -1038,7 +1038,7 @@ public void testTransactionBatchCommit_Json() throws Exception { txnBatch.write(rec1.getBytes()); txnBatch.commit(); - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}"); Assert.assertEquals(TransactionBatch.TxnState.COMMITTED , txnBatch.getCurrentTransactionState()); @@ -1165,7 +1165,7 @@ public void testTransactionBatchAbortAndCommit() throws Exception { txnBatch.write("2,Welcome to streaming".getBytes()); txnBatch.commit(); - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}", + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}", "{2, Welcome to streaming}"); txnBatch.close(); @@ -1184,13 +1184,13 @@ public void testMultipleTransactionBatchCommits() throws Exception { txnBatch.write("1,Hello streaming".getBytes()); txnBatch.commit(); String validationQuery = "select id, msg from " + dbName + "." + tblName + " order by id, msg"; - checkDataWritten2(partLoc, 1, 10, 1, validationQuery, false, "1\tHello streaming"); + checkDataWritten2(partLoc, 3, 12, 1, validationQuery, false, "1\tHello streaming"); txnBatch.beginNextTransaction(); txnBatch.write("2,Welcome to streaming".getBytes()); txnBatch.commit(); - checkDataWritten2(partLoc, 1, 10, 1, validationQuery, true, "1\tHello streaming", + checkDataWritten2(partLoc, 3, 12, 1, validationQuery, true, "1\tHello streaming", "2\tWelcome to streaming"); txnBatch.close(); @@ -1201,14 +1201,14 @@ public void testMultipleTransactionBatchCommits() throws Exception { txnBatch.write("3,Hello streaming - once again".getBytes()); txnBatch.commit(); - checkDataWritten2(partLoc, 1, 20, 2, validationQuery, false, "1\tHello streaming", + checkDataWritten2(partLoc, 3, 22, 2, validationQuery, false, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again"); txnBatch.beginNextTransaction(); txnBatch.write("4,Welcome to streaming - once again".getBytes()); txnBatch.commit(); - checkDataWritten2(partLoc, 1, 20, 2, validationQuery, true, "1\tHello streaming", + checkDataWritten2(partLoc, 3, 22, 2, validationQuery, true, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again", "4\tWelcome to streaming - once again"); @@ -1245,7 +1245,7 @@ public void testInterleavedTransactionBatchCommits() throws Exception { txnBatch2.commit(); String validationQuery = "select id, msg from " + dbName + "." + tblName + " order by id, msg"; - checkDataWritten2(partLoc, 11, 20, 1, + checkDataWritten2(partLoc, 13, 22, 1, validationQuery, true, "3\tHello streaming - once again"); txnBatch1.commit(); @@ -1265,7 +1265,7 @@ public void testInterleavedTransactionBatchCommits() throws Exception { Assert.assertTrue("", logicalLength == actualLength); } } - checkDataWritten2(partLoc, 1, 20, 2, + checkDataWritten2(partLoc, 3, 22, 2, validationQuery, false,"1\tHello streaming", "3\tHello streaming - once again"); txnBatch1.beginNextTransaction(); @@ -1290,19 +1290,19 @@ public void testInterleavedTransactionBatchCommits() throws Exception { Assert.assertTrue("", logicalLength <= actualLength); } } - checkDataWritten2(partLoc, 1, 20, 2, + checkDataWritten2(partLoc, 3, 22, 2, validationQuery, true,"1\tHello streaming", "3\tHello streaming - once again"); txnBatch1.commit(); - checkDataWritten2(partLoc, 1, 20, 2, + checkDataWritten2(partLoc, 3, 22, 2, validationQuery, false, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again"); txnBatch2.commit(); - checkDataWritten2(partLoc, 1, 20, 2, + checkDataWritten2(partLoc, 3, 22, 2, validationQuery, true, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again", diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 0212e076cd..0e1df69656 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -263,11 +263,6 @@ public boolean dropTable(String catName, String dbName, String tableName) } } - @Override - public Table getTable(String catName, String dbName, String tableName) throws MetaException { - return objectStore.getTable(catName, dbName, tableName); - } - @Override public Table getTable(String catName, String dbName, String tableName, String writeIdList) throws MetaException { @@ -280,12 +275,6 @@ public boolean addPartition(Partition part) return objectStore.addPartition(part); } - @Override - public Partition getPartition(String catName, String dbName, String tableName, List partVals) - throws MetaException, NoSuchObjectException { - return objectStore.getPartition(catName, dbName, tableName, partVals); - } - @Override public Partition getPartition(String catName, String dbName, String tableName, List partVals, String writeIdList) @@ -305,15 +294,15 @@ public boolean dropPartition(String catName, String dbName, String tableName, Li } @Override - public List getPartitions(String catName, String dbName, String tableName, int max) + public List getPartitions(String catName, String dbName, String tableName, int max, String writeIdList) throws MetaException, NoSuchObjectException { - return objectStore.getPartitions(catName, dbName, tableName, max); + return objectStore.getPartitions(catName, dbName, tableName, max, writeIdList); } @Override public Map getPartitionLocations(String catName, String dbName, String tblName, - String baseLocationToNotShow, int max) { - return objectStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max); + String baseLocationToNotShow, int max, String writeIdList) { + return objectStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max, writeIdList); } @Override @@ -378,9 +367,9 @@ public Table alterTable(String catName, String dbName, String name, Table newTab } @Override - public List listPartitionNames(String catName, String dbName, String tblName, short maxParts) + public List listPartitionNames(String catName, String dbName, String tblName, short maxParts, String writeIdList) throws MetaException { - return objectStore.listPartitionNames(catName, dbName, tblName, maxParts); + return objectStore.listPartitionNames(catName, dbName, tblName, maxParts, writeIdList); } @Override @@ -388,7 +377,7 @@ public PartitionValuesResponse listPartitionValues(String catName, String db_nam String tbl_name, List cols, boolean applyDistinct, String filter, boolean ascending, List order, - long maxParts) throws MetaException { + long maxParts, String writeIdList) throws MetaException { return null; } @@ -416,42 +405,43 @@ public Partition alterPartition(String catName, String dbName, String tblName, L @Override public List getPartitionsByFilter(String catName, String dbName, String tblName, - String filter, short maxParts) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts); + String filter, short maxParts, String writeIdList) throws MetaException, NoSuchObjectException { + return objectStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts, writeIdList); } @Override public List getPartitionSpecsByFilterAndProjection(Table table, - GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec) + GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec, String writeIdList) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionSpecsByFilterAndProjection(table, projectionSpec, filterSpec); + return objectStore.getPartitionSpecsByFilterAndProjection(table, projectionSpec, filterSpec, writeIdList); } @Override public int getNumPartitionsByFilter(String catName, String dbName, String tblName, - String filter) throws MetaException, NoSuchObjectException { - return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter); + String filter, String writeIdList) throws MetaException, NoSuchObjectException { + return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter, writeIdList); } @Override public int getNumPartitionsByExpr(String catName, String dbName, String tblName, - byte[] expr) throws MetaException, NoSuchObjectException { - return objectStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); + byte[] expr, String writeIdList) throws MetaException, NoSuchObjectException { + return objectStore.getNumPartitionsByExpr(catName, dbName, tblName, expr, writeIdList); } @Override public List getPartitionsByNames(String catName, String dbName, String tblName, - List partNames) + List partNames, String writeIdList) throws MetaException, NoSuchObjectException { return objectStore.getPartitionsByNames( - catName, dbName, tblName, partNames); + catName, dbName, tblName, partNames, writeIdList); } @Override public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, - String defaultPartitionName, short maxParts, List result) throws TException { + String defaultPartitionName, short maxParts, List result, + String writeIdList) throws TException { return objectStore.getPartitionsByExpr(catName, - dbName, tblName, expr, defaultPartitionName, maxParts, result); + dbName, tblName, expr, defaultPartitionName, maxParts, result, writeIdList); } @Override @@ -622,34 +612,36 @@ public Role getRole(String roleName) throws NoSuchObjectException { @Override public Partition getPartitionWithAuth(String catName, String dbName, String tblName, - List partVals, String userName, List groupNames) + List partVals, String userName, + List groupNames, String writeIdList) throws MetaException, NoSuchObjectException, InvalidObjectException { return objectStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, - groupNames); + groupNames, writeIdList); } @Override public List getPartitionsWithAuth(String catName, String dbName, String tblName, - short maxParts, String userName, List groupNames) + short maxParts, String userName, + List groupNames, String writeIdList) throws MetaException, NoSuchObjectException, InvalidObjectException { return objectStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, - groupNames); + groupNames, writeIdList); } @Override public List listPartitionNamesPs(String catName, String dbName, String tblName, - List partVals, short maxParts) + List partVals, short maxParts, String writeIdList) throws MetaException, NoSuchObjectException { - return objectStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts); + return objectStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts, writeIdList); } @Override public List listPartitionsPsWithAuth(String catName, String dbName, String tblName, List partVals, short maxParts, String userName, - List groupNames) + List groupNames, String writeIdList) throws MetaException, InvalidObjectException, NoSuchObjectException { return objectStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, - userName, groupNames); + userName, groupNames, writeIdList); } @Override @@ -720,12 +712,6 @@ public long cleanupEvents() { return objectStore.listTableColumnGrantsAll(catName, dbName, tableName, columnName); } - @Override - public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, - List colNames) throws MetaException, NoSuchObjectException { - return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames); - } - @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, List colNames, @@ -817,14 +803,6 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro } - @Override - public List getPartitionColumnStatistics(String catName, String dbName, - String tblName, List colNames, - List partNames) - throws MetaException, NoSuchObjectException { - return objectStore.getPartitionColumnStatistics(catName, dbName, tblName , colNames, partNames); - } - @Override public List getPartitionColumnStatistics(String catName, String dbName, String tblName, List colNames, @@ -837,9 +815,9 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro @Override public boolean doesPartitionExist(String catName, String dbName, String tableName, - List partKeys, List partVals) + List partKeys, List partVals, String writeIdList) throws MetaException, NoSuchObjectException { - return objectStore.doesPartitionExist(catName, dbName, tableName, partKeys, partVals); + return objectStore.doesPartitionExist(catName, dbName, tableName, partKeys, partVals, writeIdList); } @Override @@ -905,13 +883,6 @@ public Function getFunction(String catName, String dbName, String funcName) return objectStore.getFunctions(catName, dbName, pattern); } - @Override - public AggrStats get_aggr_stats_for(String catName, String dbName, - String tblName, List partNames, List colNames) - throws MetaException { - return null; - } - @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames, @@ -1324,5 +1295,4 @@ public int deleteRuntimeStats(int maxRetainSecs) throws MetaException { NoSuchObjectException { return null; } - } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java index 285f30b008..a971ef3699 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java @@ -10,6 +10,7 @@ import org.apache.hadoop.hive.metastore.*; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.cache.CachedStore.MergedColumnStatsForPartitions; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -44,7 +45,6 @@ public void setUp() throws Exception { MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); MetastoreConf.setVar(conf, ConfVars.TRANSACTIONAL_EVENT_LISTENERS, DbNotificationListener.class.getName()); MetastoreConf.setVar(conf, ConfVars.RAW_STORE_IMPL, "org.apache.hadoop.hive.metastore.cache.CachedStore"); - MetastoreConf.setBoolVar(conf, ConfVars.METASTORE_CACHE_CAN_USE_EVENT, true); MetastoreConf.setBoolVar(conf, ConfVars.HIVE_TXN_STATS_ENABLED, true); MetastoreConf.setBoolVar(conf, ConfVars.AGGREGATE_STATS_CACHE_ENABLED, false); MetaStoreTestUtils.setConfForStandloneMode(conf); @@ -120,84 +120,6 @@ private void comparePartitions(Partition part1, Partition part2) { Assert.assertEquals(part1.getLastAccessTime(), part2.getLastAccessTime()); } - @Test - public void testDatabaseOpsForUpdateUsingEvents() throws Exception { - RawStore rawStore = hmsHandler.getMS(); - - // Prewarm CachedStore - CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(rawStore); - - // Add a db via rawStore - String dbName = "testDatabaseOps"; - String dbOwner = "user1"; - Database db = createTestDb(dbName, dbOwner); - - hmsHandler.create_database(db); - db = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); - - // Read database via CachedStore - Database dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName); - Assert.assertEquals(db, dbRead); - - // Add another db via rawStore - final String dbName1 = "testDatabaseOps1"; - Database db1 = createTestDb(dbName1, dbOwner); - hmsHandler.create_database(db1); - db1 = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1); - - // Read database via CachedStore - dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName1); - Assert.assertEquals(db1, dbRead); - - // Alter the db via rawStore (can only alter owner or parameters) - dbOwner = "user2"; - Database newdb = new Database(db); - newdb.setOwnerName(dbOwner); - hmsHandler.alter_database(dbName, newdb); - newdb = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); - - // Read db via cachedStore - dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName); - Assert.assertEquals(newdb, dbRead); - - // Add another db via rawStore - final String dbName2 = "testDatabaseOps2"; - Database db2 = createTestDb(dbName2, dbOwner); - hmsHandler.create_database(db2); - db2 = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName2); - - // Alter db "testDatabaseOps" via rawStore - dbOwner = "user1"; - newdb = new Database(db); - newdb.setOwnerName(dbOwner); - hmsHandler.alter_database(dbName, newdb); - newdb = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); - - // Drop db "testDatabaseOps1" via rawStore - Database dropDb = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1); - hmsHandler.drop_database(dbName1, true, true); - - // Read the newly added db via CachedStore - dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName2); - Assert.assertEquals(db2, dbRead); - - // Read the altered db via CachedStore (altered user from "user2" to "user1") - dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName); - Assert.assertEquals(newdb, dbRead); - - // Try to read the dropped db after cache update - dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName1); - Assert.assertEquals(null, dbRead); - - // Clean up - hmsHandler.drop_database(dbName, true, true); - hmsHandler.drop_database(dbName2, true, true); - sharedCache.getDatabaseCache().clear(); - sharedCache.clearTableCache(); - sharedCache.getSdCache().clear(); - } - @Test public void testTableOpsForUpdateUsingEvents() throws Exception { long lastEventId = -1; @@ -205,7 +127,7 @@ public void testTableOpsForUpdateUsingEvents() throws Exception { // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(rawStore); + CachedStore.prewarm(rawStore, conf); // Add a db via rawStore String dbName = "test_table_ops"; @@ -225,19 +147,18 @@ public void testTableOpsForUpdateUsingEvents() throws Exception { List ptnCols = new ArrayList(); Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols); hmsHandler.create_table(tbl); - tbl = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); + tbl = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName, null); - // Read database, table via CachedStore - Database dbRead= sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName); - Assert.assertEquals(db, dbRead); - Table tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName); + lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf); + // Read table via CachedStore + Table tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, null); compareTables(tblRead, tbl); // Add a new table via rawStore String tblName2 = "tbl2"; Table tbl2 = createTestTbl(dbName, tblName2, tblOwner, cols, ptnCols); hmsHandler.create_table(tbl2); - tbl2 = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName2); + tbl2 = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName2, null); // Alter table "tbl" via rawStore tblOwner = "role1"; @@ -245,7 +166,7 @@ public void testTableOpsForUpdateUsingEvents() throws Exception { newTable.setOwner(tblOwner); newTable.setOwnerType(PrincipalType.ROLE); hmsHandler.alter_table(dbName, tblName, newTable); - newTable = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); + newTable = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName, null); Assert.assertEquals("Owner of the table did not change.", tblOwner, newTable.getOwner()); Assert.assertEquals("Owner type of the table did not change", PrincipalType.ROLE, newTable.getOwnerType()); @@ -253,24 +174,25 @@ public void testTableOpsForUpdateUsingEvents() throws Exception { // Drop table "tbl2" via rawStore hmsHandler.drop_table(dbName, tblName2, true); + lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf); // Read the altered "tbl" via CachedStore - tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName); + tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, null); compareTables(tblRead, newTable); // Try to read the dropped "tbl2" via CachedStore (should throw exception) - tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName2); + tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName2, null); Assert.assertNull(tblRead); // Clean up hmsHandler.drop_database(dbName, true, true); - tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName2); + lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf); + tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName2, null); Assert.assertNull(tblRead); - tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName); + tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, null); Assert.assertNull(tblRead); - sharedCache.getDatabaseCache().clear(); sharedCache.clearTableCache(); sharedCache.getSdCache().clear(); } @@ -282,7 +204,7 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception { // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(rawStore); + CachedStore.prewarm(rawStore, conf); // Add a db via rawStore String dbName = "test_partition_ops"; @@ -304,7 +226,7 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception { ptnCols.add(ptnCol1); Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols); hmsHandler.create_table(tbl); - tbl = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); + tbl = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName, null); final String ptnColVal1 = "aaa"; Map partParams = new HashMap(); @@ -313,7 +235,7 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception { 0, tbl.getSd(), partParams); ptn1.setCatName(DEFAULT_CATALOG_NAME); hmsHandler.add_partition(ptn1); - ptn1 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1)); + ptn1 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1), null); final String ptnColVal2 = "bbb"; Partition ptn2 = @@ -321,13 +243,11 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception { 0, tbl.getSd(), partParams); ptn2.setCatName(DEFAULT_CATALOG_NAME); hmsHandler.add_partition(ptn2); - ptn2 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2)); + ptn2 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2), null); - // Read database, table, partition via CachedStore - Database dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME.toLowerCase(), dbName.toLowerCase()); - Assert.assertEquals(db, dbRead); + lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf); Table tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME.toLowerCase(), - dbName.toLowerCase(), tblName.toLowerCase()); + dbName.toLowerCase(), tblName.toLowerCase(), null); compareTables(tbl, tblRead); Partition ptn1Read = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME.toLowerCase(), dbName.toLowerCase(), tblName.toLowerCase(), Arrays.asList(ptnColVal1)); @@ -343,22 +263,23 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception { 0, tbl.getSd(), partParams); ptn3.setCatName(DEFAULT_CATALOG_NAME); hmsHandler.add_partition(ptn3); - ptn3 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3)); + ptn3 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3), null); // Alter an existing partition ("aaa") via rawStore - ptn1 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1)); + ptn1 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1), null); final String ptnColVal1Alt = "aaa"; Partition ptn1Atl = new Partition(Arrays.asList(ptnColVal1Alt), dbName, tblName, 0, 0, tbl.getSd(), partParams); ptn1Atl.setCatName(DEFAULT_CATALOG_NAME); hmsHandler.alter_partitions(dbName, tblName, Arrays.asList(ptn1Atl)); - ptn1Atl = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt)); + ptn1Atl = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt), null); // Drop an existing partition ("bbb") via rawStore - Partition ptnDrop = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2)); + Partition ptnDrop = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2), null); hmsHandler.drop_partition(dbName, tblName, Arrays.asList(ptnColVal2), false); + lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf); // Read the newly added partition via CachedStore Partition ptnRead = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3)); @@ -374,6 +295,7 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception { // Drop table "tbl" via rawStore, it should remove the partition also hmsHandler.drop_table(dbName, tblName, true); + lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf); ptnRead = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt)); Assert.assertEquals(null, ptnRead); @@ -382,13 +304,12 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception { // Clean up rawStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName); - sharedCache.getDatabaseCache().clear(); sharedCache.clearTableCache(); sharedCache.getSdCache().clear(); } - private void updateTableColStats(String dbName, String tblName, String[] colName, - double highValue, double avgColLen, boolean isTxnTable) throws Throwable { + private long updateTableColStats(String dbName, String tblName, String[] colName, + double highValue, double avgColLen, boolean isTxnTable, long lastEventId) throws Throwable { long writeId = -1; String validWriteIds = null; if (isTxnTable) { @@ -412,6 +333,7 @@ private void updateTableColStats(String dbName, String tblName, String[] colName // write stats objs persistently hmsHandler.update_table_column_statistics_req(setTblColStat); + lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, null); validateTablePara(dbName, tblName); ColumnStatistics colStatsCache = sharedCache.getTableColStatsFromCache(DEFAULT_CATALOG_NAME, @@ -423,10 +345,11 @@ private void updateTableColStats(String dbName, String tblName, String[] colName dbName, tblName, Lists.newArrayList(colName[1]), validWriteIds, true); Assert.assertEquals(colStatsCache.getStatsObj().get(0).getColName(), colName[1]); verifyStatString(colStatsCache.getStatsObj().get(0), colName[1], avgColLen); + return lastEventId; } - private void updatePartColStats(String dbName, String tblName, boolean isTxnTable, String[] colName, - String partName, double highValue, double avgColLen) throws Throwable { + private long updatePartColStats(String dbName, String tblName, boolean isTxnTable, String[] colName, + String partName, double highValue, double avgColLen, long lastEventId) throws Throwable { long writeId = -1; String validWriteIds = null; List txnIds = null; @@ -471,7 +394,7 @@ private void updatePartColStats(String dbName, String tblName, boolean isTxnTabl } else { Assert.assertEquals(statRowStore.get(0).isIsStatsCompliant(), false); } - + lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf); List statSharedCache = sharedCache.getPartitionColStatsListFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, Collections.singletonList(partName), Collections.singletonList(colName[1]), validWriteIds, true); @@ -489,6 +412,8 @@ private void updatePartColStats(String dbName, String tblName, boolean isTxnTabl statPartCache = sharedCache.getPartitionColStatsFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, CachedStore.partNameToVals(partName), colName[1], validWriteIds); verifyStatString(statPartCache.getColumnStatisticsObj(), colName[1], avgColLen); + + return lastEventId; } private List getStatsObjects(String dbName, String tblName, String[] colName, @@ -572,7 +497,7 @@ private void setUpBeforeTest(String dbName, String tblName, String[] colName, bo // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(rawStore); + CachedStore.prewarm(rawStore, conf); // Add a db via rawStore Database db = createTestDb(dbName, dbOwner); @@ -670,8 +595,8 @@ private String getValidWriteIds(String dbName, String tblName) throws Throwable } private void validateTablePara(String dbName, String tblName) throws Throwable { - Table tblRead = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); - Table tblRead1 = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName); + Table tblRead = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName, null); + Table tblRead1 = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, null); Assert.assertEquals(tblRead.getParameters(), tblRead1.getParameters()); } @@ -681,45 +606,53 @@ private void validatePartPara(String dbName, String tblName, String partName) th //Assert.assertEquals(part1.getParameters(), part2.getParameters()); } - private void deleteColStats(String dbName, String tblName, String[] colName) throws Throwable { + private long deleteColStats(String dbName, String tblName, String[] colName, long lastEventId) throws Throwable { boolean status = hmsHandler.delete_table_column_statistics(dbName, tblName, null); Assert.assertEquals(status, true); + + lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf); Assert.assertEquals(sharedCache.getTableColStatsFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, Lists.newArrayList(colName[0]), null, true).getStatsObj().isEmpty(), true); Assert.assertEquals(sharedCache.getTableColStatsFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, Lists.newArrayList(colName[1]), null, true).getStatsObj().isEmpty(), true); validateTablePara(dbName, tblName); + + return lastEventId; } - private void deletePartColStats(String dbName, String tblName, String[] colName, - String partName) throws Throwable { + private long deletePartColStats(String dbName, String tblName, String[] colName, + String partName, long lastEventId) throws Throwable { boolean status = hmsHandler.delete_partition_column_statistics(dbName, tblName, partName, colName[1]); Assert.assertEquals(status, true); + lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf); SharedCache.ColumStatsWithWriteId colStats = sharedCache.getPartitionColStatsFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, CachedStore.partNameToVals(partName), colName[1], null); Assert.assertEquals(colStats.getColumnStatisticsObj(), null); validateTablePara(dbName, tblName); + + return lastEventId; } private void testTableColStatInternal(String dbName, String tblName, boolean isTxnTable) throws Throwable { String[] colName = new String[]{"income", "name"}; double highValue = 1200000.4525; double avgColLen = 50.30; + long lastEventId = 0; setUpBeforeTest(dbName, tblName, colName, isTxnTable); - updateTableColStats(dbName, tblName, colName, highValue, avgColLen, isTxnTable); + lastEventId = updateTableColStats(dbName, tblName, colName, highValue, avgColLen, isTxnTable, lastEventId); if (!isTxnTable) { - deleteColStats(dbName, tblName, colName); + lastEventId = deleteColStats(dbName, tblName, colName, lastEventId); } tblName = "tbl_part"; createTableWithPart(dbName, tblName, colName, isTxnTable); - List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1); + List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1, null); String partName = partitions.get(0); - updatePartColStats(dbName, tblName, isTxnTable, colName, partName, highValue, avgColLen); + lastEventId = updatePartColStats(dbName, tblName, isTxnTable, colName, partName, highValue, avgColLen, lastEventId); if (!isTxnTable) { - deletePartColStats(dbName, tblName, colName, partName); + lastEventId = deletePartColStats(dbName, tblName, colName, partName, lastEventId); } } @@ -747,11 +680,12 @@ public void testTableColumnStatisticsTxnTableMulti() throws Throwable { setUpBeforeTest(dbName, null, colName, true); createTableWithPart(dbName, tblName, colName, true); - List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1); + List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1, null); String partName = partitions.get(0); - updatePartColStats(dbName, tblName, true, colName, partName, highValue, avgColLen); - updatePartColStats(dbName, tblName, true, colName, partName, 1200000.4521, avgColLen); - updatePartColStats(dbName, tblName, true, colName, partName, highValue, 34.78); + long lastEventId = 0; + lastEventId = updatePartColStats(dbName, tblName, true, colName, partName, highValue, avgColLen, lastEventId); + lastEventId = updatePartColStats(dbName, tblName, true, colName, partName, 1200000.4521, avgColLen, lastEventId); + lastEventId = updatePartColStats(dbName, tblName, true, colName, partName, highValue, 34.78, lastEventId); } @Test @@ -761,10 +695,11 @@ public void testTableColumnStatisticsTxnTableMultiAbort() throws Throwable { String[] colName = new String[]{"income", "name"}; double highValue = 1200000.4525; double avgColLen = 50.30; + long lastEventId = 0; setUpBeforeTest(dbName, null, colName, true); createTableWithPart(dbName, tblName, colName, true); - List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1); + List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1, null); String partName = partitions.get(0); List txnIds = allocateTxns(1); @@ -804,6 +739,7 @@ public void testTableColumnStatisticsTxnTableMultiAbort() throws Throwable { verifyStat(statRawStore.get(0).getStatsObj(), colName, highValue, avgColLen); Assert.assertEquals(statRawStore.get(0).isIsStatsCompliant(), false); + lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf); List statsListFromCache = sharedCache.getPartitionColStatsListFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, Collections.singletonList(partName), Collections.singletonList(colName[1]), validWriteIds, true); @@ -824,14 +760,15 @@ public void testTableColumnStatisticsTxnTableOpenTxn() throws Throwable { String[] colName = new String[]{"income", "name"}; double highValue = 1200000.4121; double avgColLen = 23.30; + long lastEventId = 0; setUpBeforeTest(dbName, null, colName, true); createTableWithPart(dbName, tblName, colName, true); - List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1); + List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1, null); String partName = partitions.get(0); // update part col stats successfully. - updatePartColStats(dbName, tblName, true, colName, partName, 1.2, 12.2); + lastEventId = updatePartColStats(dbName, tblName, true, colName, partName, 1.2, 12.2, lastEventId); List txnIds = allocateTxns(1); long writeId = allocateWriteIds(txnIds, dbName, tblName).get(0).getWriteId(); @@ -854,6 +791,7 @@ public void testTableColumnStatisticsTxnTableOpenTxn() throws Throwable { // write stats objs persistently hmsHandler.update_partition_column_statistics_req(setTblColStat); + lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf); // keep the txn open and verify that the stats got is not compliant. @@ -904,9 +842,9 @@ private void verifyAggrStat(String dbName, String tblName, String[] colName, Lis Assert.assertEquals(aggrStatsCached, aggrStats); //Assert.assertEquals(aggrStatsCached.isIsStatsCompliant(), true); - List stats = sharedCache.getAggrStatsFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, - Collections.singletonList(colName[0]), SharedCache.StatsType.ALL); - Assert.assertEquals(stats.get(0).getStatsData().getDoubleStats().getHighValue(), highValue, 0.01); + MergedColumnStatsForPartitions stats = CachedStore.mergeColStatsForPartitions(DEFAULT_CATALOG_NAME, dbName, tblName, Lists.newArrayList("income=1", "income=2"), + Collections.singletonList(colName[0]), sharedCache, SharedCache.StatsType.ALL, validWriteIds, false, 0.0); + Assert.assertEquals(stats.colStats.get(0).getStatsData().getDoubleStats().getHighValue(), highValue, 0.01); } @Test @@ -917,15 +855,17 @@ public void testAggrStat() throws Throwable { setUpBeforeTest(dbName, null, colName, false); createTableWithPart(dbName, tblName, colName, false); - List partitions = hmsHandler.get_partition_names(dbName, tblName, (short) -1); + List partitions = hmsHandler.get_partition_names(dbName, tblName, (short) -1, null); String partName = partitions.get(0); // update part col stats successfully. - updatePartColStats(dbName, tblName, false, colName, partitions.get(0), 2, 12); - updatePartColStats(dbName, tblName, false, colName, partitions.get(1), 4, 10); + long lastEventId = 0; + lastEventId = updatePartColStats(dbName, tblName, false, colName, partitions.get(0), 2, 12, lastEventId); + lastEventId = updatePartColStats(dbName, tblName, false, colName, partitions.get(1), 4, 10, lastEventId); + lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf); verifyAggrStat(dbName, tblName, colName, partitions, false, 4); - updatePartColStats(dbName, tblName, false, colName, partitions.get(1), 3, 10); + lastEventId = updatePartColStats(dbName, tblName, false, colName, partitions.get(1), 3, 10, lastEventId); verifyAggrStat(dbName, tblName, colName, partitions, false, 3); } @@ -934,18 +874,19 @@ public void testAggrStatTxnTable() throws Throwable { String dbName = "aggr_stats_test_db_txn"; String tblName = "tbl_part"; String[] colName = new String[]{"income", "name"}; + long lastEventId = 0; setUpBeforeTest(dbName, null, colName, true); createTableWithPart(dbName, tblName, colName, true); - List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1); + List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1, null); String partName = partitions.get(0); // update part col stats successfully. - updatePartColStats(dbName, tblName, true, colName, partitions.get(0), 2, 12); - updatePartColStats(dbName, tblName, true, colName, partitions.get(1), 4, 10); + lastEventId = updatePartColStats(dbName, tblName, true, colName, partitions.get(0), 2, 12, lastEventId); + lastEventId = updatePartColStats(dbName, tblName, true, colName, partitions.get(1), 4, 10, lastEventId); verifyAggrStat(dbName, tblName, colName, partitions, true, 4); - updatePartColStats(dbName, tblName, true, colName, partitions.get(1), 3, 10); + lastEventId = updatePartColStats(dbName, tblName, true, colName, partitions.get(1), 3, 10, lastEventId); verifyAggrStat(dbName, tblName, colName, partitions, true, 3); List txnIds = allocateTxns(1); @@ -988,15 +929,16 @@ public void testAggrStatAbortTxn() throws Throwable { String dbName = "aggr_stats_test_db_txn_abort"; String tblName = "tbl_part"; String[] colName = new String[]{"income", "name"}; + long lastEventId = 0; setUpBeforeTest(dbName, null, colName, true); createTableWithPart(dbName, tblName, colName, true); - List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1); + List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1, null); String partName = partitions.get(0); // update part col stats successfully. - updatePartColStats(dbName, tblName, true, colName, partitions.get(0), 2, 12); - updatePartColStats(dbName, tblName, true, colName, partitions.get(1), 4, 10); + lastEventId = updatePartColStats(dbName, tblName, true, colName, partitions.get(0), 2, 12, lastEventId); + lastEventId = updatePartColStats(dbName, tblName, true, colName, partitions.get(1), 4, 10, lastEventId); verifyAggrStat(dbName, tblName, colName, partitions, true, 4); List txnIds = allocateTxns(4); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java index ff8a84fb39..22dbb4ba4a 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java @@ -683,11 +683,11 @@ public void testAcidInsertWithRemoveUnion() throws Exception { } String[][] expected2 = { - {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "warehouse/t/delta_0000001_0000001_0001/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "warehouse/t/delta_0000001_0000001_0001/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870914,\"rowid\":0}\t5\t6", "warehouse/t/delta_0000001_0000001_0002/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870914,\"rowid\":1}\t7\t8", "warehouse/t/delta_0000001_0000001_0002/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870915,\"rowid\":0}\t9\t10", "warehouse/t/delta_0000001_0000001_0003/bucket_00000"} + {"{\"writeid\":2,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "warehouse/t/delta_0000002_0000002_0001/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "warehouse/t/delta_0000002_0000002_0001/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870914,\"rowid\":0}\t5\t6", "warehouse/t/delta_0000002_0000002_0002/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870914,\"rowid\":1}\t7\t8", "warehouse/t/delta_0000002_0000002_0002/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870915,\"rowid\":0}\t9\t10", "warehouse/t/delta_0000002_0000002_0003/bucket_00000"} }; Assert.assertEquals("Unexpected row count", expected2.length, rs.size()); for(int i = 0; i < expected2.length; i++) { @@ -728,11 +728,11 @@ public void testBucketedAcidInsertWithRemoveUnion() throws Exception { LOG.warn(s); } String[][] expected2 = { - {"{\"writeid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\t2", "warehouse/t/delta_0000001_0000001_0000/bucket_00001"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t2\t4", "warehouse/t/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536936448,\"rowid\":1}\t5\t6", "warehouse/t/delta_0000001_0000001_0000/bucket_00001"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t6\t8", "warehouse/t/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536936448,\"rowid\":2}\t9\t10", "warehouse/t/delta_0000001_0000001_0000/bucket_00001"} + {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":0}\t1\t2", "warehouse/t/delta_0000002_0000002_0000/bucket_00001"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t2\t4", "warehouse/t/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":1}\t5\t6", "warehouse/t/delta_0000002_0000002_0000/bucket_00001"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t6\t8", "warehouse/t/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":2}\t9\t10", "warehouse/t/delta_0000002_0000002_0000/bucket_00001"} }; Assert.assertEquals("Unexpected row count", expected2.length, rs.size()); for(int i = 0; i < expected2.length; i++) { @@ -866,14 +866,14 @@ public void testCrudMajorCompactionSplitGrouper() throws Exception { runStatementOnDriver("insert into " + tblName + " values(3,2),(3,3),(3,4),(4,2),(4,3),(4,4)", confForTez); runStatementOnDriver("delete from " + tblName + " where b = 2"); List expectedRs = new ArrayList<>(); - expectedRs.add("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t2\t3"); - expectedRs.add("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":2}\t2\t4"); - expectedRs.add("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t3"); - expectedRs.add("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":2}\t3\t4"); - expectedRs.add("{\"writeid\":1,\"bucketid\":536936448,\"rowid\":1}\t1\t3"); - expectedRs.add("{\"writeid\":1,\"bucketid\":536936448,\"rowid\":2}\t1\t4"); - expectedRs.add("{\"writeid\":2,\"bucketid\":536936448,\"rowid\":1}\t4\t3"); - expectedRs.add("{\"writeid\":2,\"bucketid\":536936448,\"rowid\":2}\t4\t4"); + expectedRs.add("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t2\t3"); + expectedRs.add("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":2}\t2\t4"); + expectedRs.add("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\t3\t3"); + expectedRs.add("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":2}\t3\t4"); + expectedRs.add("{\"writeid\":2,\"bucketid\":536936448,\"rowid\":1}\t1\t3"); + expectedRs.add("{\"writeid\":2,\"bucketid\":536936448,\"rowid\":2}\t1\t4"); + expectedRs.add("{\"writeid\":3,\"bucketid\":536936448,\"rowid\":1}\t4\t3"); + expectedRs.add("{\"writeid\":3,\"bucketid\":536936448,\"rowid\":2}\t4\t4"); List rs = runStatementOnDriver("select ROW__ID, * from " + tblName + " order by ROW__ID.bucketid, ROW__ID", confForTez); HiveConf.setVar(confForTez, HiveConf.ConfVars.SPLIT_GROUPING_MODE, "compactor"); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestMetaStoreEventListenerInRepl.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestMetaStoreEventListenerInRepl.java index 7121dfbb7f..1c07c61470 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestMetaStoreEventListenerInRepl.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestMetaStoreEventListenerInRepl.java @@ -164,7 +164,7 @@ public void tearDown() throws Throwable { // alter table events as well. eventsMap.put(AlterDatabaseEvent.class.getName(), null); eventsMap.put(CreateTableEvent.class.getName(), new HashSet<>(Arrays.asList("t7"))); - eventsMap.put(AlterTableEvent.class.getName(), new HashSet<>(Arrays.asList("t4", "t7"))); + eventsMap.put(AlterTableEvent.class.getName(), new HashSet<>(Arrays.asList("t4", "t7", "t1"))); eventsMap.put(DropTableEvent.class.getName(), new HashSet<>(Arrays.asList("t1"))); return eventsMap; diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java index e23fdd82a4..9358e50e1e 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java @@ -181,8 +181,8 @@ public void testAcidTablesBootstrapWithOpenTxnsTimeout() throws Throwable { // Allocate write ids for both tables t1 and t2 for all txns // t1=5+1(insert) and t2=5+2(insert) Map tables = new HashMap<>(); - tables.put("t1", numTxns+1L); - tables.put("t2", numTxns+2L); + tables.put("t1", numTxns+2L); + tables.put("t2", numTxns+3L); allocateWriteIdsForTables(primaryDbName, tables, txnHandler, txns, primaryConf); // Bootstrap dump with open txn timeout as 1s. @@ -398,7 +398,7 @@ public void testOpenTxnEvent() throws Throwable { primary.dump(primaryDbName, bootStrapDump.lastReplicationId); long lastReplId = Long.parseLong(bootStrapDump.lastReplicationId); - primary.testEventCounts(primaryDbName, lastReplId, null, null, 22); + primary.testEventCounts(primaryDbName, lastReplId, null, null, 23); // Test load replica.load(replicatedDbName, incrementalDump.dumpLocation) diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java index f475b1e55d..a61af09be6 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java @@ -211,10 +211,10 @@ public void testAcidTablesBootstrapDuringIncrementalWithOpenTxnsTimeout() throws prepareIncNonAcidData(primaryDbName); prepareIncAcidData(primaryDbName); // Allocate write ids for tables t1 and t2 for all txns - // t1=5+2(insert) and t2=5+5(insert, alter add column) + // t1=5+3(create, insert) and t2=5+7(create, insert, alter add column) Map tables = new HashMap<>(); - tables.put("t1", numTxns+2L); - tables.put("t2", numTxns+5L); + tables.put("t1", numTxns+3L); + tables.put("t2", numTxns+7L); allocateWriteIdsForTables(primaryDbName, tables, txnHandler, txns, primaryConf); // Bootstrap dump with open txn timeout as 1s. diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java index 61be5a3a5b..f609e7caef 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java @@ -643,18 +643,18 @@ public void minorCompactWhileStreaming() throws Exception { Path resultFile = null; for (int i = 0; i < names.length; i++) { names[i] = stat[i].getPath().getName(); - if (names[i].equals("delta_0000001_0000004_v0000009")) { + if (names[i].equals("delta_0000002_0000005_v0000009")) { resultFile = stat[i].getPath(); } } Arrays.sort(names); - String[] expected = new String[]{"delta_0000001_0000002", - "delta_0000001_0000004_v0000009", "delta_0000003_0000004", "delta_0000005_0000006"}; + String[] expected = new String[]{"delta_0000002_0000003", + "delta_0000002_0000005_v0000009", "delta_0000004_0000005", "delta_0000006_0000007"}; if (!Arrays.deepEquals(expected, names)) { Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names) + ",stat=" + toString(stat)); } checkExpectedTxnsPresent(null, new Path[]{resultFile}, columnNamesProperty, columnTypesProperty, - 0, 1L, 4L, 1); + 0, 2L, 5L, 1); } finally { if (connection != null) { @@ -697,8 +697,8 @@ public void majorCompactWhileStreaming() throws Exception { Assert.fail("Expecting 1 file \"base_0000004\" and found " + stat.length + " files " + Arrays.toString(stat)); } String name = stat[0].getPath().getName(); - Assert.assertEquals("base_0000004_v0000009", name); - checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 1); + Assert.assertEquals("base_0000005_v0000009", name); + checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 2L, 5L, 1); } finally { if (connection != null) { connection.close(); @@ -740,17 +740,17 @@ private void minorCompactAfterAbort(boolean newStreamingAPI) throws Exception { Path resultDelta = null; for (int i = 0; i < names.length; i++) { names[i] = stat[i].getPath().getName(); - if (names[i].equals("delta_0000001_0000004_v0000009")) { + if (names[i].equals("delta_0000002_0000005_v0000009")) { resultDelta = stat[i].getPath(); } } Arrays.sort(names); - String[] expected = new String[]{"delta_0000001_0000002", - "delta_0000001_0000004_v0000009", "delta_0000003_0000004"}; + String[] expected = new String[]{"delta_0000002_0000003", + "delta_0000002_0000005_v0000009", "delta_0000004_0000005"}; if (!Arrays.deepEquals(expected, names)) { Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names)); } - checkExpectedTxnsPresent(null, new Path[]{resultDelta}, columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 1); + checkExpectedTxnsPresent(null, new Path[]{resultDelta}, columnNamesProperty, columnTypesProperty, 0, 2L, 5L, 1); } @Test @@ -787,10 +787,10 @@ private void majorCompactAfterAbort(boolean newStreamingAPI) throws Exception { Assert.fail("Expecting 1 file \"base_0000004\" and found " + stat.length + " files " + Arrays.toString(stat)); } String name = stat[0].getPath().getName(); - if (!name.equals("base_0000004_v0000009")) { + if (!name.equals("base_0000005_v0000009")) { Assert.fail("majorCompactAfterAbort name " + name + " not equals to base_0000004"); } - checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 1); + checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 2L, 5L, 1); } @@ -817,12 +817,12 @@ public void mmTable() throws Exception { runMajorCompaction(dbName, tblName); verifyFooBarResult(tblName, 1); - verifyHasBase(table.getSd(), fs, "base_0000002_v0000006"); + verifyHasBase(table.getSd(), fs, "base_0000003_v0000006"); // Make sure we don't compact if we don't need to compact. runMajorCompaction(dbName, tblName); verifyFooBarResult(tblName, 1); - verifyHasBase(table.getSd(), fs, "base_0000002_v0000006"); + verifyHasBase(table.getSd(), fs, "base_0000003_v0000006"); } @Test @@ -938,7 +938,7 @@ public void mmTableBucketed() throws Exception { runMajorCompaction(dbName, tblName); verifyFooBarResult(tblName, 1); - String baseDir = "base_0000002_v0000006"; + String baseDir = "base_0000003_v0000006"; verifyHasBase(table.getSd(), fs, baseDir); FileStatus[] files = fs.listStatus(new Path(table.getSd().getLocation(), baseDir), @@ -965,7 +965,7 @@ public void mmTableOpenWriteId() throws Exception { long openTxnId = msClient.openTxn("test"); long openWriteId = msClient.allocateTableWriteId(openTxnId, dbName, tblName); - Assert.assertEquals(3, openWriteId); // Just check to make sure base_5 below is not new. + Assert.assertEquals(4, openWriteId); // Just check to make sure base_5 below is not new. executeStatementOnDriver("INSERT INTO " + tblName +"(a,b) VALUES(1, 'foo')", driver); executeStatementOnDriver("INSERT INTO " + tblName +"(a,b) VALUES(2, 'bar')", driver); @@ -974,19 +974,20 @@ public void mmTableOpenWriteId() throws Exception { runMajorCompaction(dbName, tblName); // Don't compact 4 and 5; 3 is opened. FileSystem fs = FileSystem.get(conf); - verifyHasBase(table.getSd(), fs, "base_0000002_v0000010"); + verifyHasBase(table.getSd(), fs, "base_0000003_v0000010"); verifyDirCount(table.getSd(), fs, 1, AcidUtils.baseFileFilter); verifyFooBarResult(tblName, 2); runCleaner(conf); - verifyHasDir(table.getSd(), fs, "delta_0000004_0000004_0000", AcidUtils.deltaFileFilter); verifyHasDir(table.getSd(), fs, "delta_0000005_0000005_0000", AcidUtils.deltaFileFilter); + verifyHasDir(table.getSd(), fs, "delta_0000006_0000006_0000", AcidUtils.deltaFileFilter); verifyFooBarResult(tblName, 2); msClient.abortTxns(Lists.newArrayList(openTxnId)); // Now abort 3. runMajorCompaction(dbName, tblName); // Compact 4 and 5. + verifyFooBarResult(tblName, 2); - verifyHasBase(table.getSd(), fs, "base_0000005_v0000016"); + verifyHasBase(table.getSd(), fs, "base_0000006_v0000016"); runCleaner(conf); verifyDeltaCount(table.getSd(), fs, 0); } @@ -1050,8 +1051,8 @@ public void mmTablePartitioned() throws Exception { verifyFooBarResult(tblName, 3); verifyDeltaCount(p3.getSd(), fs, 1); - verifyHasBase(p1.getSd(), fs, "base_0000006_v0000010"); - verifyHasBase(p2.getSd(), fs, "base_0000006_v0000014"); + verifyHasBase(p1.getSd(), fs, "base_0000007_v0000010"); + verifyHasBase(p2.getSd(), fs, "base_0000007_v0000014"); executeStatementOnDriver("INSERT INTO " + tblName + " partition (ds) VALUES(1, 'foo', 2)", driver); executeStatementOnDriver("INSERT INTO " + tblName + " partition (ds) VALUES(2, 'bar', 2)", driver); @@ -1061,8 +1062,8 @@ public void mmTablePartitioned() throws Exception { // Make sure we don't compact if we don't need to compact; but do if we do. verifyFooBarResult(tblName, 4); verifyDeltaCount(p3.getSd(), fs, 1); - verifyHasBase(p1.getSd(), fs, "base_0000006_v0000010"); - verifyHasBase(p2.getSd(), fs, "base_0000008_v0000023"); + verifyHasBase(p1.getSd(), fs, "base_0000007_v0000010"); + verifyHasBase(p2.getSd(), fs, "base_0000009_v0000023"); } @@ -1159,8 +1160,8 @@ private void majorCompactWhileStreamingForSplitUpdate(boolean newStreamingAPI) t Assert.fail("Expecting 1 file \"base_0000004\" and found " + stat.length + " files " + Arrays.toString(stat)); } String name = stat[0].getPath().getName(); - Assert.assertEquals("base_0000004_v0000009", name); - checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 1, 1L, 4L, 2); + Assert.assertEquals("base_0000005_v0000009", name); + checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 1, 2L, 5L, 2); if (connection1 != null) { connection1.close(); } @@ -1209,18 +1210,18 @@ public void testMinorCompactionForSplitUpdateWithInsertsAndDeletes() throws Exce Path minorCompactedDelta = null; for (int i = 0; i < deltas.length; i++) { deltas[i] = stat[i].getPath().getName(); - if (deltas[i].equals("delta_0000001_0000003_v0000006")) { + if (deltas[i].equals("delta_0000002_0000004_v0000006")) { minorCompactedDelta = stat[i].getPath(); } } Arrays.sort(deltas); - String[] expectedDeltas = new String[]{"delta_0000001_0000001_0000", "delta_0000001_0000003_v0000006", - "delta_0000002_0000002_0000"}; + String[] expectedDeltas = new String[]{"delta_0000002_0000002_0000", "delta_0000002_0000004_v0000006", + "delta_0000003_0000003_0000"}; if (!Arrays.deepEquals(expectedDeltas, deltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeltas) + ", found: " + Arrays.toString(deltas)); } checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, - 0, 1L, 2L, 1); + 0, 2L, 3L, 1); // Verify that we have got correct set of delete_deltas. FileStatus[] deleteDeltaStat = @@ -1229,17 +1230,17 @@ public void testMinorCompactionForSplitUpdateWithInsertsAndDeletes() throws Exce Path minorCompactedDeleteDelta = null; for (int i = 0; i < deleteDeltas.length; i++) { deleteDeltas[i] = deleteDeltaStat[i].getPath().getName(); - if (deleteDeltas[i].equals("delete_delta_0000001_0000003_v0000006")) { + if (deleteDeltas[i].equals("delete_delta_0000002_0000004_v0000006")) { minorCompactedDeleteDelta = deleteDeltaStat[i].getPath(); } } Arrays.sort(deleteDeltas); - String[] expectedDeleteDeltas = new String[]{"delete_delta_0000001_0000003_v0000006", "delete_delta_0000003_0000003_0000"}; + String[] expectedDeleteDeltas = new String[]{"delete_delta_0000002_0000004_v0000006", "delete_delta_0000004_0000004_0000"}; if (!Arrays.deepEquals(expectedDeleteDeltas, deleteDeltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeleteDeltas) + ", found: " + Arrays.toString(deleteDeltas)); } checkExpectedTxnsPresent(null, new Path[]{minorCompactedDeleteDelta}, columnNamesProperty, columnTypesProperty, - 0, 2L, 2L, 1); + 0, 3L, 3L, 1); } @Test @@ -1281,18 +1282,18 @@ public void testMinorCompactionForSplitUpdateWithOnlyInserts() throws Exception Path minorCompactedDelta = null; for (int i = 0; i < deltas.length; i++) { deltas[i] = stat[i].getPath().getName(); - if (deltas[i].equals("delta_0000001_0000002_v0000005")) { + if (deltas[i].equals("delta_0000002_0000003_v0000005")) { minorCompactedDelta = stat[i].getPath(); } } Arrays.sort(deltas); - String[] expectedDeltas = new String[]{"delta_0000001_0000001_0000", "delta_0000001_0000002_v0000005", - "delta_0000002_0000002_0000"}; + String[] expectedDeltas = new String[]{"delta_0000002_0000002_0000", "delta_0000002_0000003_v0000005", + "delta_0000003_0000003_0000"}; if (!Arrays.deepEquals(expectedDeltas, deltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeltas) + ", found: " + Arrays.toString(deltas)); } checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, - 0, 1L, 2L, 1); + 0, 2L, 3L, 1); //Assert that we have no delete deltas if there are no input delete events. FileStatus[] deleteDeltaStat = @@ -1358,18 +1359,18 @@ private void minorCompactWhileStreamingWithSplitUpdate(boolean newStreamingAPI) Path resultFile = null; for (int i = 0; i < names.length; i++) { names[i] = stat[i].getPath().getName(); - if (names[i].equals("delta_0000001_0000004_v0000009")) { + if (names[i].equals("delta_0000002_0000005_v0000009")) { resultFile = stat[i].getPath(); } } Arrays.sort(names); - String[] expected = new String[]{"delta_0000001_0000002", - "delta_0000001_0000004_v0000009", "delta_0000003_0000004", "delta_0000005_0000006"}; + String[] expected = new String[]{"delta_0000002_0000003", + "delta_0000002_0000005_v0000009", "delta_0000004_0000005", "delta_0000006_0000007"}; if (!Arrays.deepEquals(expected, names)) { Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names)); } checkExpectedTxnsPresent(null, new Path[]{resultFile}, columnNamesProperty, columnTypesProperty, - 0, 1L, 4L, 1); + 0, 2L, 5L, 1); //Assert that we have no delete deltas if there are no input delete events. FileStatus[] deleteDeltaStat = @@ -1699,6 +1700,9 @@ public boolean isWriteIdAborted(long writeid) { public RangeResponse isWriteIdRangeAborted(long minWriteId, long maxWriteId) { return RangeResponse.ALL; } + + @Override + public void commitWriteId(long writeId) {}; }; OrcInputFormat aif = new OrcInputFormat(); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java index 3b0d045be5..981b35f3f3 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java @@ -149,7 +149,7 @@ public void testMajorCompaction() throws Exception { deltas[i] = filestatus[i].getPath().getName(); } Arrays.sort(deltas); - String[] expectedDeltas = new String[] { "delta_0000001_0000001_0000", "delta_0000002_0000002_0000" }; + String[] expectedDeltas = new String[] { "delta_0000002_0000002_0000", "delta_0000003_0000003_0000" }; if (!Arrays.deepEquals(expectedDeltas, deltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeltas) + ", found: " + Arrays.toString(deltas)); } @@ -161,20 +161,20 @@ public void testMajorCompaction() throws Exception { deleteDeltas[i] = deleteDeltaStat[i].getPath().getName(); } Arrays.sort(deleteDeltas); - String[] expectedDeleteDeltas = new String[] { "delete_delta_0000003_0000003_0000" }; + String[] expectedDeleteDeltas = new String[] { "delete_delta_0000004_0000004_0000" }; if (!Arrays.deepEquals(expectedDeleteDeltas, deleteDeltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeleteDeltas) + ", found: " + Arrays.toString(deleteDeltas)); } List expectedRsBucket0 = new ArrayList<>(); - expectedRsBucket0.add("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t2\t3"); - expectedRsBucket0.add("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":2}\t2\t4"); - expectedRsBucket0.add("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t3"); - expectedRsBucket0.add("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":2}\t3\t4"); + expectedRsBucket0.add("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t2\t3"); + expectedRsBucket0.add("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":2}\t2\t4"); + expectedRsBucket0.add("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\t3\t3"); + expectedRsBucket0.add("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":2}\t3\t4"); List expectedRsBucket1 = new ArrayList<>(); - expectedRsBucket1.add("{\"writeid\":1,\"bucketid\":536936448,\"rowid\":1}\t1\t3"); - expectedRsBucket1.add("{\"writeid\":1,\"bucketid\":536936448,\"rowid\":2}\t1\t4"); - expectedRsBucket1.add("{\"writeid\":2,\"bucketid\":536936448,\"rowid\":1}\t4\t3"); - expectedRsBucket1.add("{\"writeid\":2,\"bucketid\":536936448,\"rowid\":2}\t4\t4"); + expectedRsBucket1.add("{\"writeid\":2,\"bucketid\":536936448,\"rowid\":1}\t1\t3"); + expectedRsBucket1.add("{\"writeid\":2,\"bucketid\":536936448,\"rowid\":2}\t1\t4"); + expectedRsBucket1.add("{\"writeid\":3,\"bucketid\":536936448,\"rowid\":1}\t4\t3"); + expectedRsBucket1.add("{\"writeid\":3,\"bucketid\":536936448,\"rowid\":2}\t4\t4"); // Bucket 0 List rsBucket0 = executeStatementOnDriverAndReturnResults("select ROW__ID, * from " + tblName + " where ROW__ID.bucketid = 536870912 order by ROW__ID", driver); @@ -193,7 +193,7 @@ public void testMajorCompaction() throws Exception { bases[i] = filestatus[i].getPath().getName(); } Arrays.sort(bases); - String[] expectedBases = new String[] { "base_0000003_v0000008" }; + String[] expectedBases = new String[] { "base_0000004_v0000008" }; if (!Arrays.deepEquals(expectedBases, bases)) { Assert.fail("Expected: " + Arrays.toString(expectedBases) + ", found: " + Arrays.toString(bases)); } @@ -231,7 +231,7 @@ public void testMinorCompactionDisabled() throws Exception { deltas[i] = filestatus[i].getPath().getName(); } Arrays.sort(deltas); - String[] expectedDeltas = new String[] { "delta_0000001_0000001_0000", "delta_0000002_0000002_0000" }; + String[] expectedDeltas = new String[] { "delta_0000002_0000002_0000", "delta_0000003_0000003_0000" }; if (!Arrays.deepEquals(expectedDeltas, deltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeltas) + ", found: " + Arrays.toString(deltas)); } @@ -243,7 +243,7 @@ public void testMinorCompactionDisabled() throws Exception { deleteDeltas[i] = deleteDeltaStat[i].getPath().getName(); } Arrays.sort(deleteDeltas); - String[] expectedDeleteDeltas = new String[] { "delete_delta_0000003_0000003_0000" }; + String[] expectedDeleteDeltas = new String[] { "delete_delta_0000004_0000004_0000" }; if (!Arrays.deepEquals(expectedDeleteDeltas, deleteDeltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeleteDeltas) + ", found: " + Arrays.toString(deleteDeltas)); } @@ -281,12 +281,12 @@ public void testCompactionWithSchemaEvolutionAndBuckets() throws Exception { runCompaction(dbName, tblName, CompactionType.MAJOR, "ds=yesterday", "ds=today"); runCleaner(conf); List expectedRsBucket0PtnToday = new ArrayList<>(); - expectedRsBucket0PtnToday.add("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t2\t3\tNULL\ttoday"); - expectedRsBucket0PtnToday.add("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t2\t4\tNULL\ttoday"); - expectedRsBucket0PtnToday.add("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t3\t3\t1001\ttoday"); + expectedRsBucket0PtnToday.add("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t2\t3\tNULL\ttoday"); + expectedRsBucket0PtnToday.add("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t2\t4\tNULL\ttoday"); + expectedRsBucket0PtnToday.add("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t3\t3\t1001\ttoday"); List expectedRsBucket1PtnToday = new ArrayList<>(); - expectedRsBucket1PtnToday.add("{\"writeid\":1,\"bucketid\":536936448,\"rowid\":1}\t1\t3\tNULL\ttoday"); - expectedRsBucket1PtnToday.add("{\"writeid\":3,\"bucketid\":536936448,\"rowid\":1}\t4\t4\t1005\ttoday"); + expectedRsBucket1PtnToday.add("{\"writeid\":2,\"bucketid\":536936448,\"rowid\":1}\t1\t3\tNULL\ttoday"); + expectedRsBucket1PtnToday.add("{\"writeid\":4,\"bucketid\":536936448,\"rowid\":1}\t4\t4\t1005\ttoday"); // Bucket 0, partition 'today' List rsCompactBucket0PtnToday = executeStatementOnDriverAndReturnResults("select ROW__ID, * from " + tblName + " where ROW__ID.bucketid = 536870912 and ds='today'", driver); @@ -325,15 +325,15 @@ public void testCompactionWithSchemaEvolutionNoBucketsMultipleReducers() throws runCompaction(dbName, tblName, CompactionType.MAJOR, "ds=yesterday", "ds=today"); runCleaner(hiveConf); List expectedRsPtnToday = new ArrayList<>(); - expectedRsPtnToday.add("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t3\tNULL\ttoday"); - expectedRsPtnToday.add("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":2}\t2\t3\tNULL\ttoday"); - expectedRsPtnToday.add("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":3}\t2\t4\tNULL\ttoday"); - expectedRsPtnToday.add("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t3\t3\t1001\ttoday"); - expectedRsPtnToday.add("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":2}\t4\t4\t1005\ttoday"); + expectedRsPtnToday.add("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t3\tNULL\ttoday"); + expectedRsPtnToday.add("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":2}\t2\t3\tNULL\ttoday"); + expectedRsPtnToday.add("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":3}\t2\t4\tNULL\ttoday"); + expectedRsPtnToday.add("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t3\t3\t1001\ttoday"); + expectedRsPtnToday.add("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":2}\t4\t4\t1005\ttoday"); List expectedRsPtnYesterday = new ArrayList<>(); - expectedRsPtnYesterday.add("{\"writeid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\t4\tNULL\tyesterday"); - expectedRsPtnYesterday.add("{\"writeid\":3,\"bucketid\":536936448,\"rowid\":1}\t3\t4\t1002\tyesterday"); - expectedRsPtnYesterday.add("{\"writeid\":3,\"bucketid\":536936448,\"rowid\":2}\t4\t3\t1004\tyesterday"); + expectedRsPtnYesterday.add("{\"writeid\":2,\"bucketid\":536936448,\"rowid\":0}\t1\t4\tNULL\tyesterday"); + expectedRsPtnYesterday.add("{\"writeid\":4,\"bucketid\":536936448,\"rowid\":1}\t3\t4\t1002\tyesterday"); + expectedRsPtnYesterday.add("{\"writeid\":4,\"bucketid\":536936448,\"rowid\":2}\t4\t3\t1004\tyesterday"); // Partition 'today' List rsCompactPtnToday = executeStatementOnDriverAndReturnResults("select ROW__ID, * from " + tblName + " where ds='today'", driver); diff --git a/itests/qtest-accumulo/pom.xml b/itests/qtest-accumulo/pom.xml index abfb5cb413..3d0a23df59 100644 --- a/itests/qtest-accumulo/pom.xml +++ b/itests/qtest-accumulo/pom.xml @@ -188,6 +188,12 @@ + + org.apache.hive.hcatalog + hive-hcatalog-server-extensions + ${project.version} + test + org.apache.hadoop hadoop-common diff --git a/itests/qtest-spark/pom.xml b/itests/qtest-spark/pom.xml index 43b087e1d4..952ead5936 100644 --- a/itests/qtest-spark/pom.xml +++ b/itests/qtest-spark/pom.xml @@ -177,6 +177,12 @@ + + org.apache.hive.hcatalog + hive-hcatalog-server-extensions + ${project.version} + test + com.sun.jersey jersey-servlet diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/MetaStoreDumpUtility.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/MetaStoreDumpUtility.java index 2389c3bc68..e72c5216af 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/MetaStoreDumpUtility.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/MetaStoreDumpUtility.java @@ -41,7 +41,9 @@ import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.cache.CachedStore; import org.apache.hive.testutils.HiveTestEnvSetup; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -223,6 +225,12 @@ public int compare(String str1, String str2) { conn.close(); + CachedStore.clearSharedCache(); + ObjectStore objStore = new ObjectStore(); + objStore.setConf(conf); + CachedStore.getSharedCache().initialize(conf); + CachedStore.prewarm(objStore, conf); + } catch (Exception e) { throw new RuntimeException("error while loading tpcds metastore dump", e); } diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index d2c2ccd5ea..85ee9c41de 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -158,7 +158,6 @@ public void initConf() throws Exception { // Plug verifying metastore in for testing DirectSQL. conf.setVar(ConfVars.METASTORE_RAW_STORE_IMPL, "org.apache.hadoop.hive.metastore.VerifyingObjectStore"); - miniClusters.initConf(conf); } @@ -378,6 +377,7 @@ public void newSession(boolean canReuseSession) throws Exception { miniClusters.restartSessions(canReuseSession, ss, oldSs); closeSession(oldSs); + ss.initTxnMgr(conf); SessionState.start(ss); cliDriver = new CliDriver(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 2eb65918c9..ad03439cac 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -42,6 +42,7 @@ import org.apache.commons.lang.StringUtils; import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.Pair; +import org.apache.curator.shaded.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hive.common.JavaUtils; @@ -1090,22 +1091,35 @@ private ValidTxnWriteIdList recordValidWriteIds(HiveTxnManager txnMgr) throws Lo } List txnTables = getTransactionalTableList(plan); ValidTxnWriteIdList txnWriteIds = null; - if (compactionWriteIds != null) { - /** - * This is kludgy: here we need to read with Compactor's snapshot/txn - * rather than the snapshot of the current {@code txnMgr}, in effect - * simulating a "flashback query" but can't actually share compactor's - * txn since it would run multiple statements. See more comments in - * {@link org.apache.hadoop.hive.ql.txn.compactor.Worker} where it start - * the compactor txn*/ - if (txnTables.size() != 1) { - throw new LockException("Unexpected tables in compaction: " + txnTables); - } - String fullTableName = txnTables.get(0); - txnWriteIds = new ValidTxnWriteIdList(compactorTxnId); - txnWriteIds.addTableValidWriteIdList(compactionWriteIds); - } else { - txnWriteIds = txnMgr.getValidWriteIds(txnTables, txnString); + + // If we have collected all required table writeid (in SemanticAnalyzer), skip fetch again + if (conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY) != null) { + txnWriteIds = new ValidTxnWriteIdList(conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY)); + for (String txnTable : txnTables) { + if (txnWriteIds.getTableValidWriteIdList(txnTable) == null) { + txnWriteIds = null; + break; + } + } + } + if (txnWriteIds == null) { + if (compactionWriteIds != null) { + /** + * This is kludgy: here we need to read with Compactor's snapshot/txn + * rather than the snapshot of the current {@code txnMgr}, in effect + * simulating a "flashback query" but can't actually share compactor's + * txn since it would run multiple statements. See more comments in + * {@link org.apache.hadoop.hive.ql.txn.compactor.Worker} where it start + * the compactor txn*/ + if (txnTables.size() != 1) { + throw new LockException("Unexpected tables in compaction: " + txnTables); + } + String fullTableName = txnTables.get(0); + txnWriteIds = new ValidTxnWriteIdList(compactorTxnId); + txnWriteIds.addTableValidWriteIdList(compactionWriteIds); + } else { + txnWriteIds = txnMgr.getValidWriteIds(txnTables, txnString); + } } String writeIdStr = txnWriteIds.toString(); conf.set(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY, writeIdStr); @@ -1339,35 +1353,47 @@ public void releaseLocksAndCommitOrRollback(boolean commit, HiveTxnManager txnMa } // If we've opened a transaction we need to commit or rollback rather than explicitly // releasing the locks. - conf.unset(ValidTxnList.VALID_TXNS_KEY); - conf.unset(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); if(!checkConcurrency()) { return; } - if (txnMgr.isTxnOpen()) { - if (commit) { - if(conf.getBoolVar(ConfVars.HIVE_IN_TEST) && conf.getBoolVar(ConfVars.HIVETESTMODEROLLBACKTXN)) { + try { + if (txnMgr.isTxnOpen()) { + if (commit) { + if(conf.getBoolVar(ConfVars.HIVE_IN_TEST) && conf.getBoolVar(ConfVars.HIVETESTMODEROLLBACKTXN)) { + txnMgr.rollbackTxn(); + } + else { + txnMgr.commitTxn();//both commit & rollback clear ALL locks for this tx + } + } else { txnMgr.rollbackTxn(); } - else { - txnMgr.commitTxn();//both commit & rollback clear ALL locks for this tx - } } else { - txnMgr.rollbackTxn(); + //since there is no tx, we only have locks for current query (if any) + if (ctx != null && ctx.getHiveLocks() != null) { + hiveLocks.addAll(ctx.getHiveLocks()); + } + txnMgr.releaseLocks(hiveLocks); } - } else { - //since there is no tx, we only have locks for current query (if any) - if (ctx != null && ctx.getHiveLocks() != null) { - hiveLocks.addAll(ctx.getHiveLocks()); + } finally { + hiveLocks.clear(); + if (ctx != null) { + ctx.setHiveLocks(null); + } + + for (String key : new String[] {ValidTxnList.VALID_TXNS_KEY, ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY, + ValidTxnList.COMPACTOR_VALID_TXNS_ID_KEY, ValidTxnWriteIdList.COMPACTOR_VALID_TABLES_WRITEIDS_KEY}) { + conf.unset(key); + SessionState.get().getConf().unset(key); } - txnMgr.releaseLocks(hiveLocks); - } - hiveLocks.clear(); - if (ctx != null) { - ctx.setHiveLocks(null); - } - perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.RELEASE_LOCKS); + try { + Hive.get().clearValidWriteIdList(); + } catch (HiveException e) { + LOG.error("Error clear ValidWriteIdList, this shall never happen:" + e); + } + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.RELEASE_LOCKS); + } } /** @@ -2645,5 +2671,9 @@ public boolean hasResultSet() { public void setCompactionWriteIds(ValidWriteIdList val, long compactorTxnId) { this.compactionWriteIds = val; this.compactorTxnId = compactorTxnId; + if (val != null) { + conf.set(ValidTxnWriteIdList.COMPACTOR_VALID_TABLES_WRITEIDS_KEY, val.toString()); + } + conf.setLong(ValidTxnList.COMPACTOR_VALID_TXNS_ID_KEY, compactorTxnId); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java index 267f7d041f..1077421ac4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java @@ -21,6 +21,8 @@ import java.util.Map; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.session.LineageState; import org.apache.hadoop.hive.ql.session.SessionState; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index f207bf2fdb..c569b87161 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -2305,6 +2305,42 @@ public static TableSnapshot getTableSnapshot(Configuration conf, validWriteIdList != null ? validWriteIdList.toString() : null); } + /** + * This is called by Hive.java for all write operations (DDL). Advance write id + * for the table via transaction manager, and store it in config. The write id + * will be marked as committed instantly in config, as all DDL are auto + * committed, there's no chance to rollback. + */ + public static ValidWriteIdList advanceWriteId(HiveConf conf, Table tbl) throws LockException { + if (!isTransactionalTable(tbl)) { + return null; + } + HiveTxnManager txnMgr = SessionState.get().getTxnMgr(); + long writeId = SessionState.get().getTxnMgr().getTableWriteId(tbl.getDbName(), tbl.getTableName()); + List txnTables = new ArrayList<>(); + String fullTableName = getFullTableName(tbl.getDbName(), tbl.getTableName()); + txnTables.add(fullTableName); + ValidTxnWriteIdList txnWriteIds; + if (conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY) != null) { + txnWriteIds = new ValidTxnWriteIdList(conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY)); + } else { + String txnString; + if (conf.get(ValidTxnList.VALID_TXNS_KEY) != null) { + txnString = conf.get(ValidTxnList.VALID_TXNS_KEY); + } else { + ValidTxnList txnIds = txnMgr.getValidTxns(); + txnString = txnIds.toString(); + } + txnWriteIds = txnMgr.getValidWriteIds(txnTables, txnString); + } + ValidWriteIdList writeIds = txnWriteIds.getTableValidWriteIdList(fullTableName); + if (writeIds != null) { + writeIds.commitWriteId(writeId); + conf.set(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY, txnWriteIds.toString()); + } + return writeIds; + } + /** * Returns ValidWriteIdList for the table with the given "dbName" and "tableName". * This is called when HiveConf has no list for the table. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index d412dd72d1..82fb21f2ce 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -680,7 +680,6 @@ private void stopHeartbeat() throws LockException { @Override public ValidTxnList getValidTxns() throws LockException { - assert isTxnOpen(); init(); try { return getMS().getValidTxns(txnId); @@ -692,7 +691,6 @@ public ValidTxnList getValidTxns() throws LockException { @Override public ValidTxnWriteIdList getValidWriteIds(List tableList, String validTxnList) throws LockException { - assert isTxnOpen(); assert validTxnList != null && !validTxnList.isEmpty(); try { return TxnCommonUtils.createValidTxnWriteIdList( diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java index 17a2d20a00..77efa676e2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java @@ -53,10 +53,12 @@ private HiveLockManagerCtx lockManagerCtx; + private long txnId = 0; + private int numTxn = 0; + @Override public long openTxn(Context ctx, String user) throws LockException { - // No-op - return 0L; + return txnId++; } @Override public List replOpenTxn(String replPolicy, List srcTxnIds, String user) throws LockException { @@ -65,11 +67,11 @@ public long openTxn(Context ctx, String user) throws LockException { @Override public boolean isTxnOpen() { - return false; + return numTxn != 0; } @Override public long getCurrentTxnId() { - return 0L; + return txnId; } @Override public int getStmtIdAndIncrement() { @@ -228,7 +230,7 @@ public void releaseLocks(List hiveLocks) throws LockException { @Override public void commitTxn() throws LockException { - // No-op + numTxn--; } @Override @@ -238,7 +240,7 @@ public void replCommitTxn(CommitTxnRequest rqst) throws LockException { @Override public void rollbackTxn() throws LockException { - // No-op + numTxn--; } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 53208cce0d..ebe1de4fb0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -113,6 +113,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableDropPartitionDesc; @@ -121,6 +122,7 @@ import org.apache.hadoop.hive.ql.exec.FunctionUtils; import org.apache.hadoop.hive.ql.exec.SerializationUtilities; import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.AcidUtils.TableSnapshot; import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; @@ -329,7 +331,12 @@ private static Hive getInternal(HiveConf c, boolean needsRefresh, boolean isFast } db = create(c, doRegisterAllFns); } - if (c != null) { + if (c != null && db.conf != null && db.conf != c) { + if (db.conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY) != null) { + c.set(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY, db.conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY)); + } else { + c.unset(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); + } db.conf = c; } return db; @@ -703,6 +710,7 @@ public void alterTable(String catName, String dbName, String tblName, Table newT EnvironmentContext environmentContext, boolean transactional, long replWriteId) throws HiveException { + boolean txnOpened = false; if (catName == null) { catName = getDefaultCatalog(conf); } @@ -722,7 +730,13 @@ public void alterTable(String catName, String dbName, String tblName, Table newT // Take a table snapshot and set it to newTbl. AcidUtils.TableSnapshot tableSnapshot = null; if (transactional) { - if (replWriteId > 0) { + if (AcidUtils.isTransactionalTable(newTbl) && !inReplication(newTbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, newTbl); + } + + if (inReplication(newTbl) && replWriteId > 0) { // We need a valid writeId list for a transactional table modification. During // replication we do not have a valid writeId list which was used to modify the table // on the source. But we know for sure that the writeId associated with it was valid @@ -751,6 +765,12 @@ public void alterTable(String catName, String dbName, String tblName, Table newT throw new HiveException("Unable to alter table. " + e.getMessage(), e); } catch (TException e) { throw new HiveException("Unable to alter table. " + e.getMessage(), e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } @@ -802,6 +822,7 @@ public void alterPartition(String tblName, Partition newPart, public void alterPartition(String catName, String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext, boolean transactional) throws InvalidOperationException, HiveException { + boolean txnOpened = false; try { if (catName == null) { catName = getDefaultCatalog(conf); @@ -815,8 +836,14 @@ public void alterPartition(String catName, String dbName, String tblName, Partit if (environmentContext == null) { environmentContext = new EnvironmentContext(); } + AcidUtils.TableSnapshot tableSnapshot = null; if (transactional) { + if (AcidUtils.isTransactionalTable(newPart.getTable()) && !inReplication(newPart.getTable())) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, newPart.getTable()); + } tableSnapshot = AcidUtils.getTableSnapshot(conf, newPart.getTable(), true); if (tableSnapshot != null) { newPart.getTPartition().setWriteId(tableSnapshot.getWriteId()); @@ -832,6 +859,12 @@ public void alterPartition(String catName, String dbName, String tblName, Partit throw new HiveException("Unable to alter partition. " + e.getMessage(), e); } catch (TException e) { throw new HiveException("Unable to alter partition. " + e.getMessage(), e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } @@ -859,12 +892,18 @@ private void validatePartition(Partition newPart) throws HiveException { public void alterPartitions(String tblName, List newParts, EnvironmentContext environmentContext, boolean transactional) throws InvalidOperationException, HiveException { + boolean txnOpened = false; String[] names = Utilities.getDbTableName(tblName); List newTParts = new ArrayList(); try { AcidUtils.TableSnapshot tableSnapshot = null; if (transactional) { + if (AcidUtils.isTransactionalTable(newParts.get(0).getTable()) && !inReplication(newParts.get(0).getTable())) { + // Advance writeId for ddl on transactional table + txnOpened = openTxnIfNeeded(); + AcidUtils.advanceWriteId(conf, newParts.get(0).getTable()); + } tableSnapshot = AcidUtils.getTableSnapshot(conf, newParts.get(0).getTable(), true); } // Remove the DDL time so that it gets refreshed @@ -886,6 +925,12 @@ public void alterPartitions(String tblName, List newParts, throw new HiveException("Unable to alter partition. " + e.getMessage(), e); } catch (TException e) { throw new HiveException("Unable to alter partition. " + e.getMessage(), e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } /** @@ -902,6 +947,7 @@ public void alterPartitions(String tblName, List newParts, public void renamePartition(Table tbl, Map oldPartSpec, Partition newPart, long replWriteId) throws HiveException { + boolean txnOpened = false; try { Map newPartSpec = newPart.getSpec(); if (oldPartSpec.keySet().size() != tbl.getPartCols().size() @@ -924,8 +970,13 @@ public void renamePartition(Table tbl, Map oldPartSpec, Partitio } String validWriteIds = null; if (AcidUtils.isTransactionalTable(tbl)) { + if (!inReplication(tbl)) { + // Advance writeId for ddl on transactional table + txnOpened = openTxnIfNeeded(); + AcidUtils.advanceWriteId(conf, tbl); + } TableSnapshot tableSnapshot; - if (replWriteId > 0) { + if (inReplication(tbl)) { // We need a valid writeId list for a transactional table modification. During // replication we do not have a valid writeId list which was used to modify the table // on the source. But we know for sure that the writeId associated with it was valid @@ -954,6 +1005,12 @@ public void renamePartition(Table tbl, Map oldPartSpec, Partitio throw new HiveException("Unable to rename partition. " + e.getMessage(), e); } catch (TException e) { throw new HiveException("Unable to rename partition. " + e.getMessage(), e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } @@ -1013,6 +1070,7 @@ public void createTable(Table tbl, boolean ifNotExists, List defaultConstraints, List checkConstraints) throws HiveException { + boolean txnOpened = false; try { if (tbl.getDbName() == null || "".equals(tbl.getDbName().trim())) { tbl.setDbName(SessionState.get().getCurrentDatabase()); @@ -1037,6 +1095,11 @@ public void createTable(Table tbl, boolean ifNotExists, tTbl.setPrivileges(principalPrivs); } } + if (AcidUtils.isTransactionalTable(tbl) && !inReplication(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } // Set table snapshot to api.Table to make it persistent. A transactional table being // replicated may have a valid write Id copied from the source. Use that instead of // crafting one on the replica. @@ -1062,6 +1125,12 @@ public void createTable(Table tbl, boolean ifNotExists, } } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } @@ -1156,7 +1225,18 @@ public void dropTable(String dbName, String tableName, boolean deleteData, */ public void dropTable(String dbName, String tableName, boolean deleteData, boolean ignoreUnknownTab, boolean ifPurge) throws HiveException { + boolean txnOpened = false; try { + Table tbl = null; + try { + tbl = getTable(dbName, tableName); + } catch (InvalidTableException e) { + } + if (tbl != null && AcidUtils.isTransactionalTable(tbl) && !inReplication(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } getMSC().dropTable(dbName, tableName, deleteData, ignoreUnknownTab, ifPurge); } catch (NoSuchObjectException e) { if (!ignoreUnknownTab) { @@ -1171,6 +1251,12 @@ public void dropTable(String dbName, String tableName, boolean deleteData, throw new HiveException(e); } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } @@ -1184,10 +1270,19 @@ public void dropTable(String dbName, String tableName, boolean deleteData, * @throws HiveException */ public void truncateTable(String dbDotTableName, Map partSpec, Long writeId) throws HiveException { + boolean txnOpened = false; try { Table table = getTable(dbDotTableName, true); + AcidUtils.TableSnapshot snapshot = null; if (AcidUtils.isTransactionalTable(table)) { + + if (!inReplication(table)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, table); + } + if (writeId <= 0) { snapshot = AcidUtils.getTableSnapshot(conf, table, true); } else { @@ -1208,6 +1303,12 @@ public void truncateTable(String dbDotTableName, Map partSpec, L } } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } @@ -1274,7 +1375,7 @@ public Table getTable(final String dbName, final String tableName) throws HiveEx */ public Table getTable(final String dbName, final String tableName, boolean throwException) throws HiveException { - return this.getTable(dbName, tableName, throwException, false); + return this.getTable(dbName, tableName, throwException, true); } /** @@ -1324,20 +1425,7 @@ public Table getTable(final String dbName, final String tableName, boolean throw // Get the table from metastore org.apache.hadoop.hive.metastore.api.Table tTable = null; try { - // Note: this is currently called w/true from StatsOptimizer only. - if (checkTransactional) { - ValidWriteIdList validWriteIdList = null; - long txnId = SessionState.get().getTxnMgr() != null ? - SessionState.get().getTxnMgr().getCurrentTxnId() : 0; - if (txnId > 0) { - validWriteIdList = AcidUtils.getTableValidWriteIdListWithTxnList(conf, - dbName, tableName); - } - tTable = getMSC().getTable(getDefaultCatalog(conf), dbName, tableName, - validWriteIdList != null ? validWriteIdList.toString() : null, getColumnStats); - } else { - tTable = getMSC().getTable(dbName, tableName, getColumnStats); - } + tTable = getMSC().getTable(getDefaultCatalog(conf), dbName, tableName, checkTransactional, getColumnStats); } catch (NoSuchObjectException e) { if (throwException) { throw new InvalidTableException(tableName); @@ -1997,48 +2085,61 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par boolean isSrcLocal, boolean isAcidIUDoperation, boolean resetStatistics, Long writeId, int stmtId, boolean isInsertOverwrite) throws HiveException { + boolean txnOpened = false; + try { + PerfLogger perfLogger = SessionState.getPerfLogger(); + perfLogger.PerfLogBegin("MoveTask", PerfLogger.LOAD_PARTITION); - PerfLogger perfLogger = SessionState.getPerfLogger(); - perfLogger.PerfLogBegin("MoveTask", PerfLogger.LOAD_PARTITION); - - // Get the partition object if it already exists - Partition oldPart = getPartition(tbl, partSpec, false); - boolean isTxnTable = AcidUtils.isTransactionalTable(tbl); - - // If config is set, table is not temporary and partition being inserted exists, capture - // the list of files added. For not yet existing partitions (insert overwrite to new partition - // or dynamic partition inserts), the add partition event will capture the list of files added. - List newFiles = Collections.synchronizedList(new ArrayList<>()); + // Get the partition object if it already exists + Partition oldPart = getPartition(tbl, partSpec, false); + boolean isTxnTable = AcidUtils.isTransactionalTable(tbl); - Partition newTPart = loadPartitionInternal(loadPath, tbl, partSpec, oldPart, - loadFileType, inheritTableSpecs, - inheritLocation, isSkewedStoreAsSubdir, isSrcLocal, isAcidIUDoperation, - resetStatistics, writeId, stmtId, isInsertOverwrite, isTxnTable, newFiles); + // If config is set, table is not temporary and partition being inserted exists, capture + // the list of files added. For not yet existing partitions (insert overwrite to new partition + // or dynamic partition inserts), the add partition event will capture the list of files added. + List newFiles = Collections.synchronizedList(new ArrayList<>()); - AcidUtils.TableSnapshot tableSnapshot = isTxnTable ? getTableSnapshot(tbl, writeId) : null; - if (tableSnapshot != null) { - newTPart.getTPartition().setWriteId(tableSnapshot.getWriteId()); - } + Partition newTPart = loadPartitionInternal(loadPath, tbl, partSpec, oldPart, + loadFileType, inheritTableSpecs, + inheritLocation, isSkewedStoreAsSubdir, isSrcLocal, isAcidIUDoperation, + resetStatistics, writeId, stmtId, isInsertOverwrite, isTxnTable, newFiles); - if (oldPart == null) { - addPartitionToMetastore(newTPart, resetStatistics, tbl, tableSnapshot); - // For acid table, add the acid_write event with file list at the time of load itself. But - // it should be done after partition is created. - if (isTxnTable && (null != newFiles)) { - addWriteNotificationLog(tbl, partSpec, newFiles, writeId); + if (AcidUtils.isTransactionalTable(tbl) && !inReplication(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); } - } else { - try { - setStatsPropAndAlterPartition(resetStatistics, tbl, newTPart, tableSnapshot); - } catch (TException e) { - LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); + AcidUtils.TableSnapshot tableSnapshot = isTxnTable ? getTableSnapshot(tbl, writeId) : null; + if (tableSnapshot != null) { + newTPart.getTPartition().setWriteId(tableSnapshot.getWriteId()); } - } - perfLogger.PerfLogEnd("MoveTask", PerfLogger.LOAD_PARTITION); + if (oldPart == null) { + addPartitionToMetastore(newTPart, resetStatistics, tbl, tableSnapshot); + // For acid table, add the acid_write event with file list at the time of load itself. But + // it should be done after partition is created. + if (isTxnTable && (null != newFiles)) { + addWriteNotificationLog(tbl, partSpec, newFiles, writeId); + } + } else { + try { + setStatsPropAndAlterPartition(resetStatistics, tbl, newTPart, tableSnapshot); + } catch (TException e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } + } + + perfLogger.PerfLogEnd("MoveTask", PerfLogger.LOAD_PARTITION); - return newTPart; + return newTPart; + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } + } } /** @@ -2646,206 +2747,219 @@ private void constructOneLBLocationMap(FileStatus fSta, final int numDP, final int numLB, final boolean isAcid, final long writeId, final int stmtId, final boolean resetStatistics, final AcidUtils.Operation operation, boolean isInsertOverwrite) throws HiveException { + boolean txnOpened = false; + try { + PerfLogger perfLogger = SessionState.getPerfLogger(); + perfLogger.PerfLogBegin("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS); - PerfLogger perfLogger = SessionState.getPerfLogger(); - perfLogger.PerfLogBegin("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS); - - // Get all valid partition paths and existing partitions for them (if any) - final Table tbl = getTable(tableName); - final Set validPartitions = getValidPartitionsInPath(numDP, numLB, loadPath, writeId, stmtId, - AcidUtils.isInsertOnlyTable(tbl.getParameters()), isInsertOverwrite); - - final int partsToLoad = validPartitions.size(); - final AtomicInteger partitionsLoaded = new AtomicInteger(0); - final boolean inPlaceEligible = conf.getLong("fs.trash.interval", 0) <= 0 - && InPlaceUpdate.canRenderInPlace(conf) && !SessionState.getConsole().getIsSilent(); - final PrintStream ps = (inPlaceEligible) ? SessionState.getConsole().getInfoStream() : null; - - final SessionState parentSession = SessionState.get(); - List> tasks = Lists.newLinkedList(); + // Get all valid partition paths and existing partitions for them (if any) + final Table tbl = getTable(tableName); + final Set validPartitions = getValidPartitionsInPath(numDP, numLB, loadPath, writeId, stmtId, + AcidUtils.isInsertOnlyTable(tbl.getParameters()), isInsertOverwrite); - final class PartitionDetails { - Map fullSpec; - Partition partition; - List newFiles; - boolean hasOldPartition = false; - AcidUtils.TableSnapshot tableSnapshot; - } + final int partsToLoad = validPartitions.size(); + final AtomicInteger partitionsLoaded = new AtomicInteger(0); + final boolean inPlaceEligible = conf.getLong("fs.trash.interval", 0) <= 0 + && InPlaceUpdate.canRenderInPlace(conf) && !SessionState.getConsole().getIsSilent(); + final PrintStream ps = (inPlaceEligible) ? SessionState.getConsole().getInfoStream() : null; - Map partitionDetailsMap = - Collections.synchronizedMap(new LinkedHashMap<>()); + final SessionState parentSession = SessionState.get(); + List> tasks = Lists.newLinkedList(); - // calculate full path spec for each valid partition path - validPartitions.forEach(partPath -> { - Map fullPartSpec = Maps.newLinkedHashMap(partSpec); - if (!Warehouse.makeSpecFromName(fullPartSpec, partPath, new HashSet<>(partSpec.keySet()))) { - Utilities.FILE_OP_LOGGER.warn("Ignoring invalid DP directory " + partPath); - } else { - PartitionDetails details = new PartitionDetails(); - details.fullSpec = fullPartSpec; - partitionDetailsMap.put(partPath, details); + final class PartitionDetails { + Map fullSpec; + Partition partition; + List newFiles; + boolean hasOldPartition = false; + AcidUtils.TableSnapshot tableSnapshot; } - }); - // fetch all the partitions matching the part spec using the partition iterable - // this way the maximum batch size configuration parameter is considered - PartitionIterable partitionIterable = new PartitionIterable(Hive.get(), tbl, partSpec, - conf.getInt(MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX.getVarname(), 300)); - Iterator iterator = partitionIterable.iterator(); + Map partitionDetailsMap = + Collections.synchronizedMap(new LinkedHashMap<>()); - // Match valid partition path to partitions - while (iterator.hasNext()) { - Partition partition = iterator.next(); - partitionDetailsMap.entrySet().stream() - .filter(entry -> entry.getValue().fullSpec.equals(partition.getSpec())) - .findAny().ifPresent(entry -> { - entry.getValue().partition = partition; - entry.getValue().hasOldPartition = true; - }); - } + // calculate full path spec for each valid partition path + validPartitions.forEach(partPath -> { + Map fullPartSpec = Maps.newLinkedHashMap(partSpec); + if (!Warehouse.makeSpecFromName(fullPartSpec, partPath, new HashSet<>(partSpec.keySet()))) { + Utilities.FILE_OP_LOGGER.warn("Ignoring invalid DP directory " + partPath); + } else { + PartitionDetails details = new PartitionDetails(); + details.fullSpec = fullPartSpec; + partitionDetailsMap.put(partPath, details); + } + }); - boolean isTxnTable = AcidUtils.isTransactionalTable(tbl); - AcidUtils.TableSnapshot tableSnapshot = isTxnTable ? getTableSnapshot(tbl, writeId) : null; + // fetch all the partitions matching the part spec using the partition iterable + // this way the maximum batch size configuration parameter is considered + PartitionIterable partitionIterable = new PartitionIterable(Hive.get(), tbl, partSpec, + conf.getInt(MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX.getVarname(), 300)); + Iterator iterator = partitionIterable.iterator(); - for (Entry entry : partitionDetailsMap.entrySet()) { - tasks.add(() -> { - PartitionDetails partitionDetails = entry.getValue(); - Map fullPartSpec = partitionDetails.fullSpec; - try { + // Match valid partition path to partitions + while (iterator.hasNext()) { + Partition partition = iterator.next(); + partitionDetailsMap.entrySet().stream() + .filter(entry -> entry.getValue().fullSpec.equals(partition.getSpec())) + .findAny().ifPresent(entry -> { + entry.getValue().partition = partition; + entry.getValue().hasOldPartition = true; + }); + } - SessionState.setCurrentSessionState(parentSession); - LOG.info("New loading path = " + entry.getKey() + " withPartSpec " + fullPartSpec); + boolean isTxnTable = AcidUtils.isTransactionalTable(tbl); + if (isTxnTable && !inReplication(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } + AcidUtils.TableSnapshot tableSnapshot = isTxnTable ? getTableSnapshot(tbl, writeId) : null; - List newFiles = Lists.newArrayList(); - Partition oldPartition = partitionDetails.partition; - // load the partition - Partition partition = loadPartitionInternal(entry.getKey(), tbl, - fullPartSpec, oldPartition, loadFileType, true, false, numLB > 0, false, isAcid, - resetStatistics, writeId, stmtId, isInsertOverwrite, isTxnTable, newFiles); - // if the partition already existed before the loading, no need to add it again to the - // metastore + for (Entry entry : partitionDetailsMap.entrySet()) { + tasks.add(() -> { + PartitionDetails partitionDetails = entry.getValue(); + Map fullPartSpec = partitionDetails.fullSpec; + try { - if (tableSnapshot != null) { - partition.getTPartition().setWriteId(tableSnapshot.getWriteId()); - } - partitionDetails.tableSnapshot = tableSnapshot; - if (oldPartition == null) { - partitionDetails.newFiles = newFiles; - partitionDetails.partition = partition; - } + SessionState.setCurrentSessionState(parentSession); + LOG.info("New loading path = " + entry.getKey() + " withPartSpec " + fullPartSpec); + + List newFiles = Lists.newArrayList(); + Partition oldPartition = partitionDetails.partition; + // load the partition + Partition partition = loadPartitionInternal(entry.getKey(), tbl, + fullPartSpec, oldPartition, loadFileType, true, false, numLB > 0, false, isAcid, + resetStatistics, writeId, stmtId, isInsertOverwrite, isTxnTable, newFiles); + // if the partition already existed before the loading, no need to add it again to the + // metastore + + if (tableSnapshot != null) { + partition.getTPartition().setWriteId(tableSnapshot.getWriteId()); + } + partitionDetails.tableSnapshot = tableSnapshot; + if (oldPartition == null) { + partitionDetails.newFiles = newFiles; + partitionDetails.partition = partition; + } - if (inPlaceEligible) { - synchronized (ps) { - InPlaceUpdate.rePositionCursor(ps); - partitionsLoaded.incrementAndGet(); - InPlaceUpdate.reprintLine(ps, "Loaded : " + partitionsLoaded.get() + "/" - + partsToLoad + " partitions."); + if (inPlaceEligible) { + synchronized (ps) { + InPlaceUpdate.rePositionCursor(ps); + partitionsLoaded.incrementAndGet(); + InPlaceUpdate.reprintLine(ps, "Loaded : " + partitionsLoaded.get() + "/" + + partsToLoad + " partitions."); + } } + + return partition; + } catch (Exception e) { + LOG.error("Exception when loading partition with parameters " + + " partPath=" + entry.getKey() + ", " + + " table=" + tbl.getTableName() + ", " + + " partSpec=" + fullPartSpec + ", " + + " loadFileType=" + loadFileType.toString() + ", " + + " listBucketingLevel=" + numLB + ", " + + " isAcid=" + isAcid + ", " + + " resetStatistics=" + resetStatistics, e); + throw e; } + }); + } - return partition; - } catch (Exception e) { - LOG.error("Exception when loading partition with parameters " - + " partPath=" + entry.getKey() + ", " - + " table=" + tbl.getTableName() + ", " - + " partSpec=" + fullPartSpec + ", " - + " loadFileType=" + loadFileType.toString() + ", " - + " listBucketingLevel=" + numLB + ", " - + " isAcid=" + isAcid + ", " - + " resetStatistics=" + resetStatistics, e); - throw e; + int poolSize = conf.getInt(ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT.varname, 1); + ExecutorService executor = Executors.newFixedThreadPool(poolSize, + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("load-dynamic-partitionsToAdd-%d").build()); + + List> futures = Lists.newLinkedList(); + Map, Partition> result = Maps.newLinkedHashMap(); + try { + futures = executor.invokeAll(tasks); + LOG.debug("Number of partitionsToAdd to be added is " + futures.size()); + for (Future future : futures) { + Partition partition = future.get(); + result.put(partition.getSpec(), partition); + } + // add new partitions in batch + + addPartitionsToMetastore( + partitionDetailsMap.entrySet() + .stream() + .filter(entry -> !entry.getValue().hasOldPartition) + .map(entry -> entry.getValue().partition) + .collect(Collectors.toList()), + resetStatistics, + tbl, + partitionDetailsMap.entrySet() + .stream() + .filter(entry -> !entry.getValue().hasOldPartition) + .map(entry -> entry.getValue().tableSnapshot) + .collect(Collectors.toList())); + // For acid table, add the acid_write event with file list at the time of load itself. But + // it should be done after partition is created. + + for (Entry entry : partitionDetailsMap.entrySet()) { + PartitionDetails partitionDetails = entry.getValue(); + if (isTxnTable && partitionDetails.newFiles != null) { + addWriteNotificationLog(tbl, partitionDetails.fullSpec, partitionDetails.newFiles, writeId); + } } - }); - } - int poolSize = conf.getInt(ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT.varname, 1); - ExecutorService executor = Executors.newFixedThreadPool(poolSize, - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("load-dynamic-partitionsToAdd-%d").build()); - - List> futures = Lists.newLinkedList(); - Map, Partition> result = Maps.newLinkedHashMap(); - try { - futures = executor.invokeAll(tasks); - LOG.debug("Number of partitionsToAdd to be added is " + futures.size()); - for (Future future : futures) { - Partition partition = future.get(); - result.put(partition.getSpec(), partition); - } - // add new partitions in batch - - addPartitionsToMetastore( - partitionDetailsMap.entrySet() - .stream() - .filter(entry -> !entry.getValue().hasOldPartition) - .map(entry -> entry.getValue().partition) - .collect(Collectors.toList()), - resetStatistics, - tbl, - partitionDetailsMap.entrySet() - .stream() - .filter(entry -> !entry.getValue().hasOldPartition) - .map(entry -> entry.getValue().tableSnapshot) - .collect(Collectors.toList())); - // For acid table, add the acid_write event with file list at the time of load itself. But - // it should be done after partition is created. + setStatsPropAndAlterPartitions(resetStatistics, tbl, + partitionDetailsMap.entrySet().stream() + .filter(entry -> entry.getValue().hasOldPartition) + .map(entry -> entry.getValue().partition) + .collect(Collectors.toList()), tableSnapshot); - for (Entry entry : partitionDetailsMap.entrySet()) { - PartitionDetails partitionDetails = entry.getValue(); - if (isTxnTable && partitionDetails.newFiles != null) { - addWriteNotificationLog(tbl, partitionDetails.fullSpec, partitionDetails.newFiles, writeId); - } + } catch (InterruptedException | ExecutionException e) { + throw new HiveException("Exception when loading " + validPartitions.size() + + " in table " + tbl.getTableName() + + " with loadPath=" + loadPath); + } catch (TException e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } catch (Exception e) { + + StringBuffer logMsg = new StringBuffer(); + logMsg.append("Exception when loading partitionsToAdd with parameters "); + logMsg.append("partPaths="); + validPartitions.forEach(path -> logMsg.append(path + ", ")); + logMsg.append("table=" + tbl.getTableName() + ", "). + append("partSpec=" + partSpec + ", "). + append("loadFileType=" + loadFileType.toString() + ", "). + append("listBucketingLevel=" + numLB + ", "). + append("isAcid=" + isAcid + ", "). + append("resetStatistics=" + resetStatistics); + + LOG.error(logMsg.toString(), e); + throw e; + } finally { + LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks"); + executor.shutdownNow(); } - setStatsPropAndAlterPartitions(resetStatistics, tbl, - partitionDetailsMap.entrySet().stream() - .filter(entry -> entry.getValue().hasOldPartition) - .map(entry -> entry.getValue().partition) - .collect(Collectors.toList()), tableSnapshot); + try { + if (isAcid) { + List partNames = + result.values().stream().map(Partition::getName).collect(Collectors.toList()); + getMSC().addDynamicPartitions(parentSession.getTxnMgr().getCurrentTxnId(), writeId, + tbl.getDbName(), tbl.getTableName(), partNames, + AcidUtils.toDataOperationType(operation)); + } + LOG.info("Loaded " + result.size() + "partitionsToAdd"); - } catch (InterruptedException | ExecutionException e) { - throw new HiveException("Exception when loading " + validPartitions.size() - + " in table " + tbl.getTableName() - + " with loadPath=" + loadPath); - } catch (TException e) { - LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); - } catch (Exception e) { + perfLogger.PerfLogEnd("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS); - StringBuffer logMsg = new StringBuffer(); - logMsg.append("Exception when loading partitionsToAdd with parameters "); - logMsg.append("partPaths="); - validPartitions.forEach(path -> logMsg.append(path + ", ")); - logMsg.append("table=" + tbl.getTableName() + ", "). - append("partSpec=" + partSpec + ", "). - append("loadFileType=" + loadFileType.toString() + ", "). - append("listBucketingLevel=" + numLB + ", "). - append("isAcid=" + isAcid + ", "). - append("resetStatistics=" + resetStatistics); - - LOG.error(logMsg.toString(), e); - throw e; + return result; + } catch (TException te) { + LOG.error(StringUtils.stringifyException(te)); + throw new HiveException("Exception updating metastore for acid table " + + tableName + " with partitions " + result.values(), te); + } } finally { - LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks"); - executor.shutdownNow(); - } - - try { - if (isAcid) { - List partNames = - result.values().stream().map(Partition::getName).collect(Collectors.toList()); - getMSC().addDynamicPartitions(parentSession.getTxnMgr().getCurrentTxnId(), writeId, - tbl.getDbName(), tbl.getTableName(), partNames, - AcidUtils.toDataOperationType(operation)); + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } } - LOG.info("Loaded " + result.size() + "partitionsToAdd"); - - perfLogger.PerfLogEnd("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS); - - return result; - } catch (TException te) { - LOG.error(StringUtils.stringifyException(te)); - throw new HiveException("Exception updating metastore for acid table " - + tableName + " with partitions " + result.values(), te); } } @@ -3000,109 +3114,144 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType */ @VisibleForTesting public Partition createPartition(Table tbl, Map partSpec) throws HiveException { + boolean txnOpened = false; try { org.apache.hadoop.hive.metastore.api.Partition part = Partition.createMetaPartitionObject(tbl, partSpec, null); + if (AcidUtils.isTransactionalTable(tbl) && !inReplication(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); part.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : 0); return new Partition(tbl, getMSC().add_partition(part)); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } public List createPartitions(AlterTableAddPartitionDesc addPartitionDesc) throws HiveException { - // TODO: catalog name everywhere in this method - Table tbl = getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName()); - int size = addPartitionDesc.getPartitionCount(); - List in = - new ArrayList(size); - long writeId; - String validWriteIdList; - - // In case of replication, get the writeId from the source and use valid write Id list - // for replication. - if (addPartitionDesc.getReplicationSpec().isInReplicationScope() && - addPartitionDesc.getPartition(0).getWriteId() > 0) { - writeId = addPartitionDesc.getPartition(0).getWriteId(); - // We need a valid writeId list for a transactional change. During replication we do not - // have a valid writeId list which was used for this on the source. But we know for sure - // that the writeId associated with it was valid then (otherwise the change would have - // failed on the source). So use a valid transaction list with only that writeId. - validWriteIdList = new ValidReaderWriteIdList(TableName.getDbTable(tbl.getDbName(), - tbl.getTableName()), - new long[0], new BitSet(), writeId).writeToString(); - } else { - AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, true); - if (tableSnapshot != null && tableSnapshot.getWriteId() > 0) { - writeId = tableSnapshot.getWriteId(); - validWriteIdList = tableSnapshot.getValidWriteIdList(); + boolean txnOpened = false; + try { + // TODO: catalog name everywhere in this method + Table tbl = getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName()); + int size = addPartitionDesc.getPartitionCount(); + List in = + new ArrayList(size); + long writeId; + String validWriteIdList; + + // In case of replication, get the writeId from the source and use valid write Id list + // for replication. + if (addPartitionDesc.getReplicationSpec().isInReplicationScope()) { + if (addPartitionDesc.getPartition(0).getWriteId() > 0) { + writeId = addPartitionDesc.getPartition(0).getWriteId(); + } else { + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, true); + if (tableSnapshot != null && tableSnapshot.getWriteId() > 0) { + writeId = tableSnapshot.getWriteId(); + } else { + writeId = -1; + validWriteIdList = null; + } + } + // We need a valid writeId list for a transactional change. During replication we do not + // have a valid writeId list which was used for this on the source. But we know for sure + // that the writeId associated with it was valid then (otherwise the change would have + // failed on the source). So use a valid transaction list with only that writeId. + validWriteIdList = new ValidReaderWriteIdList(TableName.getDbTable(tbl.getDbName(), + tbl.getTableName()), + new long[0], new BitSet(), writeId).writeToString(); } else { - writeId = -1; - validWriteIdList = null; - } - } - for (int i = 0; i < size; ++i) { - org.apache.hadoop.hive.metastore.api.Partition tmpPart = - convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i), conf); - if (tmpPart != null && writeId > 0) { - tmpPart.setWriteId(writeId); + if (AcidUtils.isTransactionalTable(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, true); + if (tableSnapshot != null && tableSnapshot.getWriteId() > 0) { + writeId = tableSnapshot.getWriteId(); + validWriteIdList = tableSnapshot.getValidWriteIdList(); + } else { + writeId = -1; + validWriteIdList = null; + } } - in.add(tmpPart); - } - List out = new ArrayList(); - try { - if (!addPartitionDesc.getReplicationSpec().isInReplicationScope()){ - // TODO: normally, the result is not necessary; might make sense to pass false - for (org.apache.hadoop.hive.metastore.api.Partition outPart - : getMSC().add_partitions(in, addPartitionDesc.isIfNotExists(), true)) { - out.add(new Partition(tbl, outPart)); + for (int i = 0; i < size; ++i) { + org.apache.hadoop.hive.metastore.api.Partition tmpPart = + convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i), conf); + if (tmpPart != null && writeId > 0) { + tmpPart.setWriteId(writeId); } - } else { - - // For replication add-ptns, we need to follow a insert-if-not-exist, alter-if-exists scenario. - // TODO : ideally, we should push this mechanism to the metastore, because, otherwise, we have - // no choice but to iterate over the partitions here. + in.add(tmpPart); + } + List out = new ArrayList(); + try { + if (!addPartitionDesc.getReplicationSpec().isInReplicationScope()){ + // TODO: normally, the result is not necessary; might make sense to pass false + for (org.apache.hadoop.hive.metastore.api.Partition outPart + : getMSC().add_partitions(in, addPartitionDesc.isIfNotExists(), true)) { + out.add(new Partition(tbl, outPart)); + } + } else { - List partsToAdd = new ArrayList<>(); - List partsToAlter = new ArrayList<>(); - List part_names = new ArrayList<>(); - for (org.apache.hadoop.hive.metastore.api.Partition p: in){ - part_names.add(Warehouse.makePartName(tbl.getPartitionKeys(), p.getValues())); - try { - org.apache.hadoop.hive.metastore.api.Partition ptn = - getMSC().getPartition(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), p.getValues()); - if (addPartitionDesc.getReplicationSpec().allowReplacementInto(ptn.getParameters())){ - ReplicationSpec.copyLastReplId(ptn.getParameters(), p.getParameters()); - partsToAlter.add(p); - } // else ptn already exists, but we do nothing with it. - } catch (NoSuchObjectException nsoe){ - // if the object does not exist, we want to add it. - partsToAdd.add(p); + // For replication add-ptns, we need to follow a insert-if-not-exist, alter-if-exists scenario. + // TODO : ideally, we should push this mechanism to the metastore, because, otherwise, we have + // no choice but to iterate over the partitions here. + + List partsToAdd = new ArrayList<>(); + List partsToAlter = new ArrayList<>(); + List part_names = new ArrayList<>(); + for (org.apache.hadoop.hive.metastore.api.Partition p: in){ + part_names.add(Warehouse.makePartName(tbl.getPartitionKeys(), p.getValues())); + try { + org.apache.hadoop.hive.metastore.api.Partition ptn = + getMSC().getPartition(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), p.getValues()); + if (addPartitionDesc.getReplicationSpec().allowReplacementInto(ptn.getParameters())){ + ReplicationSpec.copyLastReplId(ptn.getParameters(), p.getParameters()); + partsToAlter.add(p); + } // else ptn already exists, but we do nothing with it. + } catch (NoSuchObjectException nsoe){ + // if the object does not exist, we want to add it. + partsToAdd.add(p); + } + } + for (org.apache.hadoop.hive.metastore.api.Partition outPart + : getMSC().add_partitions(partsToAdd, addPartitionDesc.isIfNotExists(), true)) { + out.add(new Partition(tbl, outPart)); + } + EnvironmentContext ec = new EnvironmentContext(); + // In case of replication, statistics is obtained from the source, so do not update those + // on replica. + ec.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); + getMSC().alter_partitions(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), + partsToAlter, ec, validWriteIdList, writeId); + + for ( org.apache.hadoop.hive.metastore.api.Partition outPart : + getMSC().getPartitionsByNames(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(),part_names)){ + out.add(new Partition(tbl,outPart)); } } - for (org.apache.hadoop.hive.metastore.api.Partition outPart - : getMSC().add_partitions(partsToAdd, addPartitionDesc.isIfNotExists(), true)) { - out.add(new Partition(tbl, outPart)); - } - EnvironmentContext ec = new EnvironmentContext(); - // In case of replication, statistics is obtained from the source, so do not update those - // on replica. - ec.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); - getMSC().alter_partitions(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), - partsToAlter, ec, validWriteIdList, writeId); - - for ( org.apache.hadoop.hive.metastore.api.Partition outPart : - getMSC().getPartitionsByNames(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(),part_names)){ - out.add(new Partition(tbl,outPart)); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } + return out; + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); } } - } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); } - return out; } public static org.apache.hadoop.hive.metastore.api.Partition convertAddSpecToMetaPartition( @@ -3441,12 +3590,25 @@ public boolean dropPartition(String db_name, String tbl_name, public boolean dropPartition(String dbName, String tableName, List partVals, PartitionDropOptions options) throws HiveException { + boolean txnOpened = false; try { + Table tbl = getTable(dbName, tableName); + if (AcidUtils.isTransactionalTable(tbl) && !inReplication(tbl)) { + // Advance writeId for ddl on transactional table + txnOpened = openTxnIfNeeded(); + AcidUtils.advanceWriteId(conf, tbl); + } return getMSC().dropPartition(dbName, tableName, partVals, options); } catch (NoSuchObjectException e) { throw new HiveException("Partition or table doesn't exist.", e); } catch (Exception e) { throw new HiveException(e.getMessage(), e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } @@ -3549,8 +3711,14 @@ public boolean dropPartition(String dbName, String tableName, List partV public List dropPartitions(String dbName, String tblName, List partSpecs, PartitionDropOptions dropOptions) throws HiveException { + boolean txnOpened = false; try { Table tbl = getTable(dbName, tblName); + if (AcidUtils.isTransactionalTable(tbl) && !inReplication(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } List> partExprs = new ArrayList<>(partSpecs.size()); for (AlterTableDropPartitionDesc.PartitionDesc partSpec : partSpecs) { @@ -3564,6 +3732,12 @@ public boolean dropPartition(String dbName, String tableName, List partV throw new HiveException("Partition or table doesn't exist.", e); } catch (Exception e) { throw new HiveException(e.getMessage(), e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } @@ -4994,7 +5168,20 @@ public static boolean isHadoop1() { public List exchangeTablePartitions(Map partitionSpecs, String sourceDb, String sourceTable, String destDb, String destinationTableName) throws HiveException { + boolean txnOpened = false; try { + Table srcTbl = getTable(sourceDb, sourceTable); + if (AcidUtils.isTransactionalTable(srcTbl) && !inReplication(srcTbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, srcTbl); + } + Table descTbl = getTable(destDb, destinationTableName); + if (AcidUtils.isTransactionalTable(descTbl) && !inReplication(descTbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, descTbl); + } List partitions = getMSC().exchange_partitions(partitionSpecs, sourceDb, sourceTable, destDb, destinationTableName); @@ -5003,6 +5190,12 @@ public static boolean isHadoop1() { } catch (Exception ex) { LOG.error(StringUtils.stringifyException(ex)); throw new HiveException(ex); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } @@ -5120,6 +5313,7 @@ public synchronized IMetaStoreClient getMSC( metaStoreClient = HiveMetaStoreClient.newSynchronizedClient(metaStoreClient); } } + metaStoreClient.setValidWriteIdList(conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY)); return metaStoreClient; } @@ -5229,21 +5423,47 @@ public AggrStats getAggrColStatsFor(String dbName, String tblName, public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws HiveException { + boolean txnOpened = false; try { + Table tbl = getTable(dbName, tableName); + if (AcidUtils.isTransactionalTable(tbl) && !inReplication(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } return getMSC().deleteTableColumnStatistics(dbName, tableName, colName); } catch(Exception e) { LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, String colName) throws HiveException { + boolean txnOpened = false; try { + Table tbl = getTable(dbName, tableName); + if (AcidUtils.isTransactionalTable(tbl) && !inReplication(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } return getMSC().deletePartitionColumnStatistics(dbName, tableName, partName, colName); } catch(Exception e) { LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } @@ -5488,12 +5708,25 @@ public void cacheFileMetadata( public void dropConstraint(String dbName, String tableName, String constraintName) throws HiveException, NoSuchObjectException { + boolean txnOpened = false; try { + Table tbl = getTable(dbName, tableName); + if (AcidUtils.isTransactionalTable(tbl) && !inReplication(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } getMSC().dropConstraint(dbName, tableName, constraintName); } catch (NoSuchObjectException e) { throw e; } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } @@ -5819,55 +6052,133 @@ public CheckConstraint getCheckConstraints(String dbName, String tblName) public void addPrimaryKey(List primaryKeyCols) throws HiveException, NoSuchObjectException { + boolean txnOpened = false; try { + Table tbl = getTable(primaryKeyCols.get(0).getTable_db(), primaryKeyCols.get(0).getTable_name()); + if (AcidUtils.isTransactionalTable(tbl) && !inReplication(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } getMSC().addPrimaryKey(primaryKeyCols); } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } public void addForeignKey(List foreignKeyCols) throws HiveException, NoSuchObjectException { + boolean txnOpened = false; try { + Table tbl = getTable(foreignKeyCols.get(0).getFktable_db(), foreignKeyCols.get(0).getFktable_name()); + if (AcidUtils.isTransactionalTable(tbl) && !inReplication(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } getMSC().addForeignKey(foreignKeyCols); } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } public void addUniqueConstraint(List uniqueConstraintCols) throws HiveException, NoSuchObjectException { + boolean txnOpened = false; try { + Table tbl = getTable(uniqueConstraintCols.get(0).getTable_db(), uniqueConstraintCols.get(0).getTable_name()); + if (AcidUtils.isTransactionalTable(tbl) && !inReplication(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } getMSC().addUniqueConstraint(uniqueConstraintCols); } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } public void addNotNullConstraint(List notNullConstraintCols) throws HiveException, NoSuchObjectException { + boolean txnOpened = false; try { + Table tbl = getTable(notNullConstraintCols.get(0).getTable_db(), notNullConstraintCols.get(0).getTable_name()); + if (AcidUtils.isTransactionalTable(tbl) && !inReplication(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } getMSC().addNotNullConstraint(notNullConstraintCols); } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } public void addDefaultConstraint(List defaultConstraints) throws HiveException, NoSuchObjectException { + boolean txnOpened = false; try { + Table tbl = getTable(defaultConstraints.get(0).getTable_db(), defaultConstraints.get(0).getTable_name()); + if (AcidUtils.isTransactionalTable(tbl) && !inReplication(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } getMSC().addDefaultConstraint(defaultConstraints); } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } public void addCheckConstraint(List checkConstraints) throws HiveException, NoSuchObjectException { + boolean txnOpened = false; try { + Table tbl = getTable(checkConstraints.get(0).getTable_db(), checkConstraints.get(0).getTable_name()); + if (AcidUtils.isTransactionalTable(tbl) && !inReplication(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } getMSC().addCheckConstraint(checkConstraints); } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } @@ -6079,5 +6390,36 @@ public StorageHandlerInfo getStorageHandlerInfo(Table table) throw new HiveException(e); } } + + private boolean openTxnIfNeeded() throws HiveException { + try { + if (SessionState.get().getTxnMgr() == null) { + SessionState.get().initTxnMgr(conf); + } + HiveTxnManager txnMgr = SessionState.get().getTxnMgr(); + if (!txnMgr.isTxnOpen()) { + Context ctx = new Context(conf); + txnMgr.openTxn(ctx, SessionState.getUserFromAuthenticator()); + return true; + } + return false; + } catch (Exception e) { + throw new HiveException(e); + } + } + + public void clearValidWriteIdList() { + if (metaStoreClient != null) { + metaStoreClient.clearValidWriteIdList(); + } + } + + boolean inReplication(Table tbl) { + if (tbl.getParameters().get(ReplicationSpec.KEY.CURR_STATE_ID.toString()) != null) { + return true; + } else { + return false; + } + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index d39a0b487f..0d944993b4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -178,43 +178,20 @@ public void truncateTable(String dbName, String tableName, super.truncateTable(dbName, tableName, partNames, validWriteIds, writeId); } - @Override - public org.apache.hadoop.hive.metastore.api.Table getTable(String dbname, String name) throws MetaException, - TException, NoSuchObjectException { - return getTable(dbname, name, false); - } - - @Override - public org.apache.hadoop.hive.metastore.api.Table getTable(String dbname, String name, - boolean getColStats) throws MetaException, - TException, NoSuchObjectException { - // First check temp tables - org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbname, name); - if (table != null) { - return deepCopy(table); // Original method used deepCopy(), do the same here. - } - // Try underlying client - return super.getTable(MetaStoreUtils.getDefaultCatalog(conf), dbname, name, getColStats); - } - // Need to override this one too or dropTable breaks because it doesn't find the table when checks // before the drop. @Override public org.apache.hadoop.hive.metastore.api.Table getTable(String catName, String dbName, - String tableName) throws TException { - return getTable(catName, dbName, tableName, false); - } - - // Need to override this one too or dropTable breaks because it doesn't find the table when checks - // before the drop. - @Override - public org.apache.hadoop.hive.metastore.api.Table getTable(String catName, String dbName, - String tableName, boolean getColStats) + String tableName, String validWriteIdList, boolean getColStats) throws TException { if (!DEFAULT_CATALOG_NAME.equals(catName)) { - return super.getTable(catName, dbName, tableName, getColStats); + return super.getTable(catName, dbName, tableName, validWriteIdList, getColStats); } else { - return getTable(dbName, tableName, getColStats); + org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbName, tableName); + if (table != null) { + return deepCopy(table); // Original method used deepCopy(), do the same here. + } + return super.getTable(catName, dbName, tableName, validWriteIdList, getColStats); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java index 33247f0745..65dc3062f4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java @@ -408,7 +408,14 @@ private boolean isTargetTable(Entity entity, Table targetTable) { * is this the right way to compare? Should it just compare paths? * equals() impl looks heavy weight */ - return targetTable.equals(entity.getTable()); + long targetWriteId = targetTable.getTTable().getWriteId(); + long entityWriteId = entity.getTable().getTTable().getWriteId(); + targetTable.getTTable().setWriteId(0L); + entity.getTable().getTTable().setWriteId(0L); + boolean result = targetTable.equals(entity.getTable()); + targetTable.getTTable().setWriteId(targetWriteId); + entity.getTable().getTTable().setWriteId(entityWriteId); + return result; } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 826b23e5fa..d469b3d2c5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -67,10 +67,13 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.StatsSetupConst.StatDB; import org.apache.hadoop.hive.common.StringInternUtils; +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; @@ -12489,9 +12492,53 @@ else if(ast.getChild(0).getType() == HiveParser.TOK_FALSE) { // if phase1Result false return return false; } + + // 5. Set write id for HMS client + if (getTxnMgr().supportsAcid() && conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY) == null) { + + ValidTxnWriteIdList txnWriteIds = null; + + if (conf.get(ValidTxnWriteIdList.COMPACTOR_VALID_TABLES_WRITEIDS_KEY) != null) { + txnWriteIds = new ValidTxnWriteIdList(conf.getLong(ValidTxnList.COMPACTOR_VALID_TXNS_ID_KEY, 0)); + txnWriteIds.addTableValidWriteIdList(new ValidReaderWriteIdList(conf.get(ValidTxnWriteIdList.COMPACTOR_VALID_TABLES_WRITEIDS_KEY))); + } + else { + List tabNames = new ArrayList<>(); + for (String tabName : collectTables(qb)) { + String fullName = TableName.fromString(tabName, SessionState.get().getCurrentCatalog(), SessionState.get().getCurrentDatabase()).getDbTable(); + tabNames.add(fullName); + } + + if (!tabNames.isEmpty()) { + String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY); + + try { + if ((txnString == null) || (txnString.isEmpty())) { + txnString = getTxnMgr().getValidTxns().toString(); + conf.set(ValidTxnList.VALID_TXNS_KEY, txnString); + } + + txnWriteIds = getTxnMgr().getValidWriteIds(tabNames, txnString); + } catch (LockException e) { + throw new SemanticException("Failed to fetch write Id from TxnManager", e); + } + } + } + + if (txnWriteIds != null) { + conf.set(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY, txnWriteIds.toString()); + try { + db.getMSC().setValidWriteIdList(txnWriteIds.toString()); + Hive.get().getMSC().setValidWriteIdList(txnWriteIds.toString()); + } catch (HiveException|MetaException e) { + throw new SemanticException("Failed to set write Id for HMS client", e); + } + } + } + LOG.info("Completed phase 1 of Semantic Analysis"); - // 5. Resolve Parse Tree + // 6. Resolve Parse Tree // Materialization is allowed if it is not a view definition getMetaData(qb, createVwDesc == null); LOG.info("Completed getting MetaData in Semantic Analysis"); @@ -12499,6 +12546,42 @@ else if(ast.getChild(0).getType() == HiveParser.TOK_FALSE) { return true; } + private Set collectTables(QBExpr qbExpr) { + Set result = new HashSet<>(); + if (qbExpr.getQB() != null) { + result.addAll(collectTables(qbExpr.getQB())); + } else { + if (qbExpr.getQBExpr1() != null) { + result.addAll(collectTables(qbExpr.getQBExpr1())); + } + if (qbExpr.getQBExpr2() != null) { + result.addAll(collectTables(qbExpr.getQBExpr2())); + } + } + return result; + } + + private Set collectTables(QB qb) { + Set result = new HashSet<>(); + for (String alias : qb.getTabAliases()) { + result.add(qb.getTabNameForAlias(alias)); + } + for (String alias : qb.getSubqAliases()) { + QBExpr qbExpr = qb.getSubqForAlias(alias); + if (qbExpr.getQB() != null) { + result.addAll(collectTables(qbExpr.getQB())); + } else { + if (qbExpr.getQBExpr1() != null) { + result.addAll(collectTables(qbExpr.getQBExpr1())); + } + if (qbExpr.getQBExpr2() != null) { + result.addAll(collectTables(qbExpr.getQBExpr2())); + } + } + } + return result; + } + public void getHintsFromQB(QB qb, List hints) { if (qb.getParseInfo().getHints() != null) { hints.add(qb.getParseInfo().getHints()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java index 2cc057ee6e..b4da7d4354 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java @@ -467,7 +467,7 @@ public PartitionWrapper(org.apache.hadoop.hive.metastore.api.Partition mapiPart, String catName = mapiPart.isSetCatName() ? mapiPart.getCatName() : MetaStoreUtils.getDefaultCatalog(context.getHandler().getConf()); org.apache.hadoop.hive.metastore.api.Table t = context.getHandler().get_table_core( - catName, mapiPart.getDbName(), mapiPart.getTableName()); + catName, mapiPart.getDbName(), mapiPart.getTableName(), null); if (wrapperApiPart.getSd() == null){ // In the cases of create partition, by the time this event fires, the partition // object has not yet come into existence, and thus will not yet have a diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java index 2e25ecef65..4f3c7ef119 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java @@ -23,6 +23,7 @@ import java.util.Collection; import java.util.List; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.Warehouse; @@ -182,9 +183,11 @@ public int persistColumnStats(Hive db, Table tbl) throws HiveException, MetaExce HiveTxnManager txnMgr = AcidUtils.isTransactionalTable(tbl) ? SessionState.get().getTxnMgr() : null; if (txnMgr != null) { - request.setValidWriteIdList(AcidUtils.getTableValidWriteIdList(conf, - AcidUtils.getFullTableName(tbl.getDbName(), tbl.getTableName())).toString()); request.setWriteId(txnMgr.getAllocatedTableWriteId(tbl.getDbName(), tbl.getTableName())); + ValidWriteIdList writeId = AcidUtils.getTableValidWriteIdList(conf, + AcidUtils.getFullTableName(tbl.getDbName(), tbl.getTableName())); + writeId.commitWriteId(request.getWriteId()); + request.setValidWriteIdList(writeId.toString()); } db.setPartitionColumnStatistics(request); return 0; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java index 444c7add2f..0cb109a387 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java @@ -214,7 +214,7 @@ private void stopWorkers() { throws MetaException, NoSuchTxnException, NoSuchObjectException { if (isAnalyzeTableInProgress(fullTableName)) return null; String cat = fullTableName.getCat(), db = fullTableName.getDb(), tbl = fullTableName.getTable(); - Table table = rs.getTable(cat, db, tbl); + Table table = rs.getTable(cat, db, tbl, null); LOG.debug("Processing table {}", table); // Check if the table should be skipped. @@ -309,7 +309,7 @@ private void stopWorkers() { try { colsPerPartition = rs.getPartitionColsWithStats(cat, db, tbl); partNames = Lists.newArrayList(colsPerPartition.keySet()); - int partitionCount = rs.getNumPartitionsByFilter(cat, db, tbl, ""); + int partitionCount = rs.getNumPartitionsByFilter(cat, db, tbl, "", null); isAllParts = partitionCount == partNames.size(); isOk = true; } finally { @@ -320,10 +320,10 @@ private void stopWorkers() { } } } else { - partNames = rs.listPartitionNames(cat, db, tbl, (short) -1); + partNames = rs.listPartitionNames(cat, db, tbl, (short) -1, null); isAllParts = true; } - Table t = rs.getTable(cat, db, tbl); + Table t = rs.getTable(cat, db, tbl, null); List currentBatch = null; int nextBatchStart = 0, nextIxInBatch = -1, currentBatchStart = 0; List colsToUpdateForAll = null; @@ -337,7 +337,7 @@ private void stopWorkers() { currentBatchStart = nextBatchStart; nextBatchStart = nextBatchEnd; try { - currentBatch = rs.getPartitionsByNames(cat, db, tbl, currentNames); + currentBatch = rs.getPartitionsByNames(cat, db, tbl, currentNames, null); } catch (NoSuchObjectException e) { LOG.error("Failed to get partitions for " + fullTableName + ", skipping some partitions", e); currentBatch = null; @@ -456,7 +456,7 @@ private String buildPartColStr(Table table) { try { // Note: this should NOT do txn verification - we want to get outdated stats, to // see if we need to update anything. - existingStats = rs.getTableColumnStatistics(cat, db, tbl, allCols); + existingStats = rs.getTableColumnStatistics(cat, db, tbl, allCols, null); } catch (NoSuchObjectException e) { LOG.error("Cannot retrieve existing stats, skipping " + fullTableName, e); return null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MetaStoreCompactorThread.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MetaStoreCompactorThread.java index a6dd4fa003..582c4bfe48 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MetaStoreCompactorThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MetaStoreCompactorThread.java @@ -67,7 +67,7 @@ public void init(AtomicBoolean stop, AtomicBoolean looped) throws Exception { @Override Table resolveTable(CompactionInfo ci) throws MetaException { try { - return rs.getTable(getDefaultCatalog(conf), ci.dbname, ci.tableName); + return rs.getTable(getDefaultCatalog(conf), ci.dbname, ci.tableName, null); } catch (MetaException e) { LOG.error("Unable to find table " + ci.getFullTableName() + ", " + e.getMessage()); throw e; @@ -88,7 +88,7 @@ public void init(AtomicBoolean stop, AtomicBoolean looped) throws Exception { @Override List getPartitionsByNames(CompactionInfo ci) throws MetaException { try { return rs.getPartitionsByNames(getDefaultCatalog(conf), ci.dbname, ci.tableName, - Collections.singletonList(ci.partName)); + Collections.singletonList(ci.partName), null); } catch (MetaException e) { LOG.error("Unable to get partitions by name for CompactionInfo=" + ci); throw e; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnAddPartition.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnAddPartition.java index e6bc11e6aa..0260a1195f 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnAddPartition.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnAddPartition.java @@ -104,14 +104,14 @@ private void addPartition(boolean isVectorized) throws Exception { String testQuery = isVectorized ? "select ROW__ID, p, a, b from T order by p, ROW__ID" : "select ROW__ID, p, a, b, INPUT__FILE__NAME from T order by p, ROW__ID"; String[][] expected = new String[][]{ - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t2", - "warehouse/t/p=0/delta_0000001_0000001_0000/000000_0"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t0\t0\t4", - "warehouse/t/p=0/delta_0000001_0000001_0000/000000_0"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t0\t2", - "warehouse/t/p=1/delta_0000001_0000001_0000/000000_0"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t0\t4", - "warehouse/t/p=1/delta_0000001_0000001_0000/000000_0"}}; + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t2", + "warehouse/t/p=0/delta_0000002_0000002_0000/000000_0"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t0\t0\t4", + "warehouse/t/p=0/delta_0000002_0000002_0000/000000_0"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t0\t2", + "warehouse/t/p=1/delta_0000002_0000002_0000/000000_0"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t0\t4", + "warehouse/t/p=1/delta_0000002_0000002_0000/000000_0"}}; checkResult(expected, testQuery, isVectorized, "add 2 parts w/data and 1 empty", LOG); runStatementOnDriver("export table Tstage to '" + getWarehouseDir() + "/3'"); @@ -128,18 +128,18 @@ private void addPartition(boolean isVectorized) throws Exception { + "PARTITION (p=3) location '" + getWarehouseDir() + "/3/data'";//p=3 doesn't exist runStatementOnDriver(stmt); String[][] expected2 = new String[][]{ - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t2", - "warehouse/t/p=0/delta_0000001_0000001_0000/000000_0"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t0\t0\t4", - "warehouse/t/p=0/delta_0000001_0000001_0000/000000_0"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t0\t2", - "warehouse/t/p=1/delta_0000001_0000001_0000/000000_0"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t0\t4", - "warehouse/t/p=1/delta_0000001_0000001_0000/000000_0"}, - {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t3\t0\t2", - "warehouse/t/p=3/delta_0000003_0000003_0000/000000_0"}, - {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\t3\t0\t4", - "warehouse/t/p=3/delta_0000003_0000003_0000/000000_0"}}; + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t2", + "warehouse/t/p=0/delta_0000002_0000002_0000/000000_0"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t0\t0\t4", + "warehouse/t/p=0/delta_0000002_0000002_0000/000000_0"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t0\t2", + "warehouse/t/p=1/delta_0000002_0000002_0000/000000_0"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t0\t4", + "warehouse/t/p=1/delta_0000002_0000002_0000/000000_0"}, + {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t3\t0\t2", + "warehouse/t/p=3/delta_0000004_0000004_0000/000000_0"}, + {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":1}\t3\t0\t4", + "warehouse/t/p=3/delta_0000004_0000004_0000/000000_0"}}; checkResult(expected2, testQuery, isVectorized, "add 2 existing parts and 1 empty", LOG); } @@ -182,10 +182,10 @@ private void addPartitionMM(boolean isVectorized) throws Exception { String testQuery = isVectorized ? "select p, a, b from T order by p, a, b" : "select p, a, b, INPUT__FILE__NAME from T order by p, a, b"; String[][] expected = new String[][]{ - {"0\t0\t2", "warehouse/t/p=0/delta_0000001_0000001_0000/000000_0"}, - {"0\t0\t4", "warehouse/t/p=0/delta_0000001_0000001_0000/000000_0"}, - {"1\t0\t2", "warehouse/t/p=1/delta_0000001_0000001_0000/000000_0"}, - {"1\t0\t4", "warehouse/t/p=1/delta_0000001_0000001_0000/000000_0"}}; + {"0\t0\t2", "warehouse/t/p=0/delta_0000002_0000002_0000/000000_0"}, + {"0\t0\t4", "warehouse/t/p=0/delta_0000002_0000002_0000/000000_0"}, + {"1\t0\t2", "warehouse/t/p=1/delta_0000002_0000002_0000/000000_0"}, + {"1\t0\t4", "warehouse/t/p=1/delta_0000002_0000002_0000/000000_0"}}; checkResult(expected, testQuery, isVectorized, "add 2 parts w/data and 1 empty", LOG); runStatementOnDriver("export table Tstage to '" + getWarehouseDir() + "/3'"); @@ -201,12 +201,12 @@ private void addPartitionMM(boolean isVectorized) throws Exception { + "PARTITION (p=2) location '" + getWarehouseDir() + "/3/data'"//p=2 exists and is empty + "PARTITION (p=3) location '" + getWarehouseDir() + "/3/data'");//p=3 doesn't exist String[][] expected2 = new String[][]{ - {"0\t0\t2", "warehouse/t/p=0/delta_0000001_0000001_0000/000000_0"}, - {"0\t0\t4", "warehouse/t/p=0/delta_0000001_0000001_0000/000000_0"}, - {"1\t0\t2", "warehouse/t/p=1/delta_0000001_0000001_0000/000000_0"}, - {"1\t0\t4", "warehouse/t/p=1/delta_0000001_0000001_0000/000000_0"}, - {"3\t0\t2", "warehouse/t/p=3/delta_0000003_0000003_0000/000000_0"}, - {"3\t0\t4", "warehouse/t/p=3/delta_0000003_0000003_0000/000000_0"}}; + {"0\t0\t2", "warehouse/t/p=0/delta_0000002_0000002_0000/000000_0"}, + {"0\t0\t4", "warehouse/t/p=0/delta_0000002_0000002_0000/000000_0"}, + {"1\t0\t2", "warehouse/t/p=1/delta_0000002_0000002_0000/000000_0"}, + {"1\t0\t4", "warehouse/t/p=1/delta_0000002_0000002_0000/000000_0"}, + {"3\t0\t2", "warehouse/t/p=3/delta_0000004_0000004_0000/000000_0"}, + {"3\t0\t4", "warehouse/t/p=3/delta_0000004_0000004_0000/000000_0"}}; checkResult(expected2, testQuery, isVectorized, "add 2 existing parts and 1 empty", LOG); } @@ -228,10 +228,10 @@ public void addPartitionBucketed() throws Exception { List rs = runStatementOnDriver( "select ROW__ID, p, a, b, INPUT__FILE__NAME from T order by p, ROW__ID"); String[][] expected = new String[][]{ - {"{\"writeid\":1,\"bucketid\":536936448,\"rowid\":0}\t0\t0\t2", - "warehouse/t/p=0/delta_0000001_0000001_0000/000001_0"}, - {"{\"writeid\":1,\"bucketid\":536936448,\"rowid\":1}\t0\t1\t4", - "warehouse/t/p=0/delta_0000001_0000001_0000/000001_0"}}; + {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":0}\t0\t0\t2", + "warehouse/t/p=0/delta_0000002_0000002_0000/000001_0"}, + {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":1}\t0\t1\t4", + "warehouse/t/p=0/delta_0000002_0000002_0000/000001_0"}}; checkExpected(rs, expected, "add partition (p=0)"); } @@ -267,10 +267,10 @@ public void addPartitionRename() throws Exception { List rs = runStatementOnDriver( "select ROW__ID, p, a, b, INPUT__FILE__NAME from T order by p, ROW__ID"); String[][] expected = new String[][]{ - {"{\"writeid\":1,\"bucketid\":536936448,\"rowid\":0}\t0\t0\t2", - "warehouse/t/p=0/delta_0000001_0000001_0000/000001_0"}, - {"{\"writeid\":1,\"bucketid\":536936448,\"rowid\":1}\t0\t1\t4", - "warehouse/t/p=0/delta_0000001_0000001_0000/000001_0"}}; + {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":0}\t0\t0\t2", + "warehouse/t/p=0/delta_0000002_0000002_0000/000001_0"}, + {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":1}\t0\t1\t4", + "warehouse/t/p=0/delta_0000002_0000002_0000/000001_0"}}; checkExpected(rs, expected, "add partition (p=0)"); } @@ -298,6 +298,6 @@ public void addPartitionTransactional() throws Exception { runStatementOnDriver("insert into Tstage partition(p=1) values(0,2),(1,4)"); runStatementOnDriver("ALTER TABLE T ADD PARTITION (p=0) location '" - + getWarehouseDir() + "/tstage/p=1/delta_0000001_0000001_0000/bucket_00001'"); + + getWarehouseDir() + "/tstage/p=1/delta_0000002_0000002_0000/bucket_00001'"); } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java index 62793311e4..8be11fb1c2 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java @@ -1243,8 +1243,8 @@ private void writeBetweenWorkerAndCleanerForVariousTblProperties(String tblPrope FileStatus[] status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + tblName.toLowerCase()), FileUtils.HIDDEN_FILES_PATH_FILTER); Set expectedDeltas = new HashSet<>(); - expectedDeltas.add("delete_delta_0000001_0000002_v0000019"); - expectedDeltas.add("delta_0000001_0000002_v0000019"); + expectedDeltas.add("delete_delta_0000002_0000003_v0000019"); + expectedDeltas.add("delta_0000002_0000003_v0000019"); Set actualDeltas = new HashSet<>(); for(FileStatus file : status) { actualDeltas.add(file.getPath().getName()); @@ -2046,9 +2046,9 @@ public void testCleanerForTxnToWriteId() throws Exception { Assert.assertEquals(TxnDbUtil.queryToString(hiveConf, "select * from MIN_HISTORY_LEVEL"), 0, TxnDbUtil.countQueryAgent(hiveConf, "select count(*) from MIN_HISTORY_LEVEL")); Assert.assertEquals(TxnDbUtil.queryToString(hiveConf, "select * from TXN_TO_WRITE_ID" + acidTblWhereClause), - 3, TxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXN_TO_WRITE_ID" + acidTblWhereClause)); + 4, TxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXN_TO_WRITE_ID" + acidTblWhereClause)); Assert.assertEquals(TxnDbUtil.queryToString(hiveConf, "select * from TXN_TO_WRITE_ID" + acidTblPartWhereClause), - 2, TxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXN_TO_WRITE_ID" + acidTblPartWhereClause)); + 3, TxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXN_TO_WRITE_ID" + acidTblPartWhereClause)); TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf); txnHandler.compact(new CompactionRequest("default", Table.ACIDTBL.name().toLowerCase(), CompactionType.MAJOR)); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands3.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands3.java index 7b3ab282d2..dbad73cfc5 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands3.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands3.java @@ -75,10 +75,10 @@ public void testRenameTable() throws Exception { String testQuery = "select ROW__ID, a, b, INPUT__FILE__NAME from mydb1.S"; String[][] expected = new String[][] { - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", - "s/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t4\t6", - "s/delta_0000002_0000002_0000/bucket_00000"}}; + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2", + "s/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t4\t6", + "s/delta_0000003_0000003_0000/bucket_00000"}}; checkResult(expected, testQuery, false, "check data", LOG); @@ -101,9 +101,9 @@ public void testRenameTable() throws Exception { "select count(*) from COMPACTION_QUEUE where CQ_TABLE='s'")); Assert.assertEquals(1, TxnDbUtil.countQueryAgent(hiveConf, "select count(*) from WRITE_SET where WS_TABLE='s'")); - Assert.assertEquals(3, TxnDbUtil.countQueryAgent(hiveConf, + Assert.assertEquals(5, TxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXN_TO_WRITE_ID where T2W_TABLE='s'")); - Assert.assertEquals(1, TxnDbUtil.countQueryAgent(hiveConf, + Assert.assertEquals(2, TxnDbUtil.countQueryAgent(hiveConf, "select count(*) from NEXT_WRITE_ID where NWI_TABLE='s'")); runStatementOnDriver("alter table mydb1.S RENAME TO mydb2.bar"); @@ -116,9 +116,9 @@ public void testRenameTable() throws Exception { "select count(*) from COMPACTION_QUEUE where CQ_TABLE='bar'")); Assert.assertEquals(1, TxnDbUtil.countQueryAgent(hiveConf, "select count(*) from WRITE_SET where WS_TABLE='bar'")); - Assert.assertEquals(4, TxnDbUtil.countQueryAgent(hiveConf, + Assert.assertEquals(7, TxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXN_TO_WRITE_ID where T2W_TABLE='bar'")); - Assert.assertEquals(1, TxnDbUtil.countQueryAgent(hiveConf, + Assert.assertEquals(3, TxnDbUtil.countQueryAgent(hiveConf, "select count(*) from NEXT_WRITE_ID where NWI_TABLE='bar'")); } @@ -166,10 +166,10 @@ private void testDeleteEventPruning() throws Exception { "select ROW__ID, a, b from T order by a, b" : "select ROW__ID, a, b, INPUT__FILE__NAME from T order by a, b"; String[][] expected = new String[][]{ - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t4\t5", - "warehouse/t/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t4\t6", - "warehouse/t/delta_0000002_0000002_0000/bucket_00000"}}; + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t4\t5", + "warehouse/t/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t4\t6", + "warehouse/t/delta_0000003_0000003_0000/bucket_00000"}}; checkResult(expected, testQuery, isVectorized, "after delete", LOG); runStatementOnDriver("alter table T compact 'MAJOR'"); @@ -184,9 +184,9 @@ private void testDeleteEventPruning() throws Exception { .startsWith("job_local")); String[][] expected2 = new String[][]{ - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t4\t5", + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t4\t5", "warehouse/t/base_0000001/bucket_00000"}, - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t4\t6", + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t4\t6", "warehouse/t/base_0000002/bucket_00000"}}; checkResult(expected2, testQuery, isVectorized, "after compaction", LOG); } @@ -272,29 +272,29 @@ private void testSdpoBucketed(boolean isVectorized, boolean isSdpo, int bucketin "select ROW__ID, a, b, ds from acid_uap order by ds, a, b" : "select ROW__ID, a, b, ds, INPUT__FILE__NAME from acid_uap order by ds, a, b"; String[][] expected = new String[][]{ - {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":0}\t1\tbah\ttoday", - "warehouse/acid_uap/ds=today/delta_0000002_0000002_0000/bucket_00001"}, - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t2\tyah\ttoday", - "warehouse/acid_uap/ds=today/delta_0000002_0000002_0000/bucket_00000"}, - - {"{\"writeid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\tbah\ttomorrow", - "warehouse/acid_uap/ds=tomorrow/delta_0000001_0000001_0000/bucket_00001"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t2\tyah\ttomorrow", - "warehouse/acid_uap/ds=tomorrow/delta_0000001_0000001_0000/bucket_00000"}}; + {"{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t1\tbah\ttoday", + "warehouse/acid_uap/ds=today/delta_0000003_0000003_0000/bucket_00001"}, + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t2\tyah\ttoday", + "warehouse/acid_uap/ds=today/delta_0000003_0000003_0000/bucket_00000"}, + + {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":0}\t1\tbah\ttomorrow", + "warehouse/acid_uap/ds=tomorrow/delta_0000002_0000002_0000/bucket_00001"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t2\tyah\ttomorrow", + "warehouse/acid_uap/ds=tomorrow/delta_0000002_0000002_0000/bucket_00000"}}; checkResult(expected, testQuery, isVectorized, "after insert", LOG); runStatementOnDriver("update acid_uap set b = 'fred'"); String[][] expected2 = new String[][]{ - {"{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t1\tfred\ttoday", - "warehouse/acid_uap/ds=today/delta_0000003_0000003_0000/bucket_00001"}, - {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t2\tfred\ttoday", - "warehouse/acid_uap/ds=today/delta_0000003_0000003_0000/bucket_00000"}, - - {"{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t1\tfred\ttomorrow", - "warehouse/acid_uap/ds=tomorrow/delta_0000003_0000003_0000/bucket_00001"}, - {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t2\tfred\ttomorrow", - "warehouse/acid_uap/ds=tomorrow/delta_0000003_0000003_0000/bucket_00000"}}; + {"{\"writeid\":4,\"bucketid\":536936448,\"rowid\":0}\t1\tfred\ttoday", + "warehouse/acid_uap/ds=today/delta_0000004_0000004_0000/bucket_00001"}, + {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t2\tfred\ttoday", + "warehouse/acid_uap/ds=today/delta_0000004_0000004_0000/bucket_00000"}, + + {"{\"writeid\":4,\"bucketid\":536936448,\"rowid\":0}\t1\tfred\ttomorrow", + "warehouse/acid_uap/ds=tomorrow/delta_0000004_0000004_0000/bucket_00001"}, + {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t2\tfred\ttomorrow", + "warehouse/acid_uap/ds=tomorrow/delta_0000004_0000004_0000/bucket_00000"}}; checkResult(expected2, testQuery, isVectorized, "after update", LOG); } @Test @@ -323,10 +323,10 @@ public void testCleaner2() throws Exception { └── bucket_00000*/ String testQuery = "select ROW__ID, a, b, INPUT__FILE__NAME from T"; String[][] expected = new String[][] { - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t2", - "t/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t4", - "t/delta_0000002_0000002_0000/bucket_00000"}}; + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t2", + "t/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t1\t4", + "t/delta_0000003_0000003_0000/bucket_00000"}}; checkResult(expected, testQuery, false, "check data", LOG); @@ -351,9 +351,9 @@ public void testCleaner2() throws Exception { FileUtils.HIDDEN_FILES_PATH_FILTER); String[] expectedList = new String[] { - "/t/delta_0000001_0000002_v0000019", - "/t/delta_0000001_0000001_0000", + "/t/delta_0000002_0000003_v0000019", "/t/delta_0000002_0000002_0000", + "/t/delta_0000003_0000003_0000", }; checkExpectedFiles(actualList, expectedList, warehousePath.toString()); @@ -384,7 +384,7 @@ so cleaner removes all files shadowed by it (which is everything in this case) runCleaner(hiveConf); expectedList = new String[] { - "/t/delta_0000001_0000003_v0000022" + "/t/delta_0000002_0000004_v0000022" }; actualList = fs.listStatus(new Path(warehousePath + "/t"), FileUtils.HIDDEN_FILES_PATH_FILTER); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java index 8676e0db11..5deff50bb0 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java @@ -56,14 +56,14 @@ public void testConcatenate() throws Exception { runStatementOnDriver("insert into " + Table.ACIDTBL + " values(5,6),(8,8)"); String testQuery = "select ROW__ID, a, b, INPUT__FILE__NAME from " + Table.ACIDTBL + " order by a, b"; String[][] expected = new String[][] { - {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":0}\t1\t4", - "acidtbl/delta_0000002_0000002_0000/bucket_00001"}, - {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":1}\t4\t4", - "acidtbl/delta_0000002_0000002_0000/bucket_00001"}, - {"{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t5\t6", + {"{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t1\t4", "acidtbl/delta_0000003_0000003_0000/bucket_00001"}, - {"{\"writeid\":3,\"bucketid\":536936448,\"rowid\":1}\t8\t8", - "acidtbl/delta_0000003_0000003_0000/bucket_00001"}}; + {"{\"writeid\":3,\"bucketid\":536936448,\"rowid\":1}\t4\t4", + "acidtbl/delta_0000003_0000003_0000/bucket_00001"}, + {"{\"writeid\":4,\"bucketid\":536936448,\"rowid\":0}\t5\t6", + "acidtbl/delta_0000004_0000004_0000/bucket_00001"}, + {"{\"writeid\":4,\"bucketid\":536936448,\"rowid\":1}\t8\t8", + "acidtbl/delta_0000004_0000004_0000/bucket_00001"}}; checkResult(expected, testQuery, false, "check data", LOG); /*in UTs, there is no standalone HMS running to kick off compaction so it's done via runWorker() @@ -80,14 +80,14 @@ public void testConcatenate() throws Exception { Assert.assertEquals(1, rsp.getCompactsSize()); Assert.assertEquals(TxnStore.CLEANING_RESPONSE, rsp.getCompacts().get(0).getState()); String[][] expected2 = new String[][] { - {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":0}\t1\t4", - "acidtbl/base_0000003_v0000019/bucket_00001"}, - {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":1}\t4\t4", - "acidtbl/base_0000003_v0000019/bucket_00001"}, - {"{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t5\t6", - "acidtbl/base_0000003_v0000019/bucket_00001"}, - {"{\"writeid\":3,\"bucketid\":536936448,\"rowid\":1}\t8\t8", - "acidtbl/base_0000003_v0000019/bucket_00001"}}; + {"{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t1\t4", + "acidtbl/base_0000004_v0000019/bucket_00001"}, + {"{\"writeid\":3,\"bucketid\":536936448,\"rowid\":1}\t4\t4", + "acidtbl/base_0000004_v0000019/bucket_00001"}, + {"{\"writeid\":4,\"bucketid\":536936448,\"rowid\":0}\t5\t6", + "acidtbl/base_0000004_v0000019/bucket_00001"}, + {"{\"writeid\":4,\"bucketid\":536936448,\"rowid\":1}\t8\t8", + "acidtbl/base_0000004_v0000019/bucket_00001"}}; checkResult(expected2, testQuery, false, "check data after concatenate", LOG); } @Test @@ -97,14 +97,14 @@ public void testConcatenatePart() throws Exception { runStatementOnDriver("insert into " + Table.ACIDTBLPART + " values(5,6,'p1'),(8,8,'p2')"); String testQuery = "select ROW__ID, a, b, INPUT__FILE__NAME from " + Table.ACIDTBLPART + " order by a, b"; String[][] expected = new String[][] { - {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":0}\t1\t4", - "acidtblpart/p=p1/delta_0000002_0000002_0000/bucket_00001"}, - {"{\"writeid\":1,\"bucketid\":536936448,\"rowid\":0}\t4\t5", - "acidtblpart/p=p2/delta_0000001_0000001_0000/bucket_00001"}, - {"{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t5\t6", + {"{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t1\t4", "acidtblpart/p=p1/delta_0000003_0000003_0000/bucket_00001"}, - {"{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t8\t8", - "acidtblpart/p=p2/delta_0000003_0000003_0000/bucket_00001"}}; + {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":0}\t4\t5", + "acidtblpart/p=p2/delta_0000002_0000002_0000/bucket_00001"}, + {"{\"writeid\":4,\"bucketid\":536936448,\"rowid\":0}\t5\t6", + "acidtblpart/p=p1/delta_0000004_0000004_0000/bucket_00001"}, + {"{\"writeid\":4,\"bucketid\":536936448,\"rowid\":0}\t8\t8", + "acidtblpart/p=p2/delta_0000004_0000004_0000/bucket_00001"}}; checkResult(expected, testQuery, false, "check data", LOG); /*in UTs, there is no standalone HMS running to kick off compaction so it's done via runWorker() @@ -121,14 +121,14 @@ public void testConcatenatePart() throws Exception { Assert.assertEquals(1, rsp.getCompactsSize()); Assert.assertEquals(TxnStore.CLEANING_RESPONSE, rsp.getCompacts().get(0).getState()); String[][] expected2 = new String[][] { - {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":0}\t1\t4", - "acidtblpart/p=p1/base_0000003_v0000019/bucket_00001"}, - {"{\"writeid\":1,\"bucketid\":536936448,\"rowid\":0}\t4\t5", - "acidtblpart/p=p2/delta_0000001_0000001_0000/bucket_00001"}, - {"{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t5\t6", - "acidtblpart/p=p1/base_0000003_v0000019/bucket_00001"}, - {"{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t8\t8", - "acidtblpart/p=p2/delta_0000003_0000003_0000/bucket_00001"}}; + {"{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t1\t4", + "acidtblpart/p=p1/base_0000004_v0000019/bucket_00001"}, + {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":0}\t4\t5", + "acidtblpart/p=p2/delta_0000002_0000002_0000/bucket_00001"}, + {"{\"writeid\":4,\"bucketid\":536936448,\"rowid\":0}\t5\t6", + "acidtblpart/p=p1/base_0000004_v0000019/bucket_00001"}, + {"{\"writeid\":4,\"bucketid\":536936448,\"rowid\":0}\t8\t8", + "acidtblpart/p=p2/delta_0000004_0000004_0000/bucket_00001"}}; checkResult(expected2, testQuery, false, "check data after concatenate", LOG); } @@ -142,10 +142,10 @@ public void testConcatenateMM() throws Exception { runStatementOnDriver("insert into T values(5,6),(8,8)"); String testQuery = "select a, b, INPUT__FILE__NAME from T order by a, b"; String[][] expected = new String[][] { - {"1\t2", "t/delta_0000001_0000001_0000/000000_0"}, - {"4\t5", "t/delta_0000001_0000001_0000/000000_0"}, - {"5\t6", "t/delta_0000002_0000002_0000/000000_0"}, - {"8\t8", "t/delta_0000002_0000002_0000/000000_0"}}; + {"1\t2", "t/delta_0000002_0000002_0000/000000_0"}, + {"4\t5", "t/delta_0000002_0000002_0000/000000_0"}, + {"5\t6", "t/delta_0000003_0000003_0000/000000_0"}, + {"8\t8", "t/delta_0000003_0000003_0000/000000_0"}}; checkResult(expected, testQuery, false, "check data", LOG); /*in UTs, there is no standalone HMS running to kick off compaction so it's done via runWorker() @@ -162,10 +162,10 @@ public void testConcatenateMM() throws Exception { Assert.assertEquals(1, rsp.getCompactsSize()); Assert.assertEquals(TxnStore.CLEANING_RESPONSE, rsp.getCompacts().get(0).getState()); String[][] expected2 = new String[][] { - {"1\t2", "t/base_0000002_v0000020/000000_0"}, - {"4\t5", "t/base_0000002_v0000020/000000_0"}, - {"5\t6", "t/base_0000002_v0000020/000000_0"}, - {"8\t8", "t/base_0000002_v0000020/000000_0"}}; + {"1\t2", "t/base_0000003_v0000020/000000_0"}, + {"4\t5", "t/base_0000003_v0000020/000000_0"}, + {"5\t6", "t/base_0000003_v0000020/000000_0"}, + {"8\t8", "t/base_0000003_v0000020/000000_0"}}; checkResult(expected2, testQuery, false, "check data after concatenate", LOG); } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnExIm.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnExIm.java index fe9f90047e..e5b4272e87 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnExIm.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnExIm.java @@ -327,34 +327,67 @@ private void testImport(boolean isVectorized, boolean existingTarget) throws Exc String testQuery = isVectorized ? "select ROW__ID, a, b from T order by ROW__ID" : "select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID"; - String[][] expected = new String[][] { - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", - "t/delta_0000001_0000001_0000/000000_0"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", - "t/delta_0000001_0000001_0000/000000_0"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":2}\t5\t6", - "t/delta_0000001_0000001_0000/000000_0"}}; + String[][] expected; + if (existingTarget) { + expected = new String[][] { + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2", + "t/delta_0000002_0000002_0000/000000_0"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t4", + "t/delta_0000002_0000002_0000/000000_0"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":2}\t5\t6", + "t/delta_0000002_0000002_0000/000000_0"}}; + } else { + expected = new String[][] { + {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", + "t/delta_0000001_0000001_0000/000000_0"}, + {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", + "t/delta_0000001_0000001_0000/000000_0"}, + {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":2}\t5\t6", + "t/delta_0000001_0000001_0000/000000_0"}}; + } checkResult(expected, testQuery, isVectorized, "import existing table"); runStatementOnDriver("update T set a = 0 where b = 6"); - String[][] expected2 = new String[][] { - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", - "t/delta_0000001_0000001_0000/000000_0"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", - "t/delta_0000001_0000001_0000/000000_0"}, - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t6", - "t/delta_0000002_0000002_0000/bucket_00000"}}; + String[][] expected2; + if (existingTarget) { + expected2 = new String[][] { + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2", + "t/delta_0000002_0000002_0000/000000_0"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t4", + "t/delta_0000002_0000002_0000/000000_0"}, + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t0\t6", + "t/delta_0000003_0000003_0000/bucket_00000"}}; + } else { + expected2 = new String[][] { + {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", + "t/delta_0000001_0000001_0000/000000_0"}, + {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", + "t/delta_0000001_0000001_0000/000000_0"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t6", + "t/delta_0000002_0000002_0000/bucket_00000"}}; + } checkResult(expected2, testQuery, isVectorized, "update imported table"); runStatementOnDriver("alter table T compact 'minor'"); TestTxnCommands2.runWorker(hiveConf); - String[][] expected3 = new String[][] { - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", - ".*t/delta_0000001_0000002_v000002[5-6]/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", - ".*t/delta_0000001_0000002_v000002[5-6]/bucket_00000"}, - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t6", - ".*t/delta_0000001_0000002_v000002[5-6]/bucket_00000"}}; + String[][] expected3; + if (existingTarget) { + expected3 = new String[][] { + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2", + ".*t/delta_0000002_0000003_v000002[5-6]/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t4", + ".*t/delta_0000002_0000003_v000002[5-6]/bucket_00000"}, + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t0\t6", + ".*t/delta_0000002_0000003_v000002[5-6]/bucket_00000"}}; + } else { + expected3 = new String[][] { + {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", + ".*t/delta_0000001_0000002_v000002[5-6]/bucket_00000"}, + {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", + ".*t/delta_0000001_0000002_v000002[5-6]/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t6", + ".*t/delta_0000001_0000002_v000002[5-6]/bucket_00000"}}; + } checkResult(expected3, testQuery, isVectorized, "minor compact imported table"); } @@ -382,12 +415,12 @@ public void testImportPartitioned() throws Exception { String testQuery = isVectorized ? "select ROW__ID, a, b from T order by ROW__ID" : "select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID"; String[][] expected = new String[][] { - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t0", - "t/p=10/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t3\t4", - "t/p=11/delta_0000002_0000002_0000/000000_0"}, - {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t5\t6", - "t/p=12/delta_0000003_0000003_0000/000000_0"}}; + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t0", + "t/p=10/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t3\t4", + "t/p=11/delta_0000003_0000003_0000/000000_0"}, + {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t5\t6", + "t/p=12/delta_0000004_0000004_0000/000000_0"}}; checkResult(expected, testQuery, isVectorized, "import existing table"); } @@ -533,7 +566,11 @@ private void testMM(boolean existingTable, boolean isSourceMM) throws Exception rs = runStatementOnDriver("select INPUT__FILE__NAME from T order by INPUT__FILE__NAME"); Assert.assertEquals(3, rs.size()); for (String s : rs) { - Assert.assertTrue(s, s.contains("/delta_0000001_0000001_0000/")); + if (existingTable) { + Assert.assertTrue(s, s.contains("/delta_0000002_0000002_0000/")); + } else { + Assert.assertTrue(s, s.contains("/delta_0000001_0000001_0000/")); + } Assert.assertTrue(s, s.endsWith("/000000_0")); } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java index 3231a97009..60868f9cb7 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java @@ -112,13 +112,13 @@ private void loadDataUpdate(boolean isVectorized) throws Exception { String testQuery = isVectorized ? "select ROW__ID, a, b from T order by ROW__ID" : "select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID"; String[][] expected = new String[][]{ - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000001_0000/000000_0"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/000000_0"}}; + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000002_0000002_0000/000000_0"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000002_0000002_0000/000000_0"}}; checkResult(expected, testQuery, isVectorized, "load data inpath"); runStatementOnDriver("update T set b = 17 where a = 1"); String[][] expected2 = new String[][]{ - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/000000_0"}, - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/delta_0000002_0000002_0000/bucket_00000"} + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000002_0000002_0000/000000_0"}, + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/delta_0000003_0000003_0000/bucket_00000"} }; checkResult(expected2, testQuery, isVectorized, "update"); @@ -128,15 +128,15 @@ private void loadDataUpdate(boolean isVectorized) throws Exception { runStatementOnDriver("alter table T compact 'minor'"); TestTxnCommands2.runWorker(hiveConf); String[][] expected3 = new String[][] { - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/delta_0000001_0000004_v0000029/bucket_00000"}, - {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/delta_0000001_0000004_v0000029/bucket_00000"} + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/delta_0000002_0000005_v0000029/bucket_00000"}, + {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/delta_0000002_0000005_v0000029/bucket_00000"} }; checkResult(expected3, testQuery, isVectorized, "delete compact minor"); runStatementOnDriver("load data local inpath '" + getWarehouseDir() + "/1/data' overwrite into table T"); String[][] expected4 = new String[][]{ - {"{\"writeid\":5,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000005/000000_0"}, - {"{\"writeid\":5,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/base_0000005/000000_0"}}; + {"{\"writeid\":6,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000006/000000_0"}, + {"{\"writeid\":6,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/base_0000006/000000_0"}}; checkResult(expected4, testQuery, isVectorized, "load data inpath overwrite"); //load same data again (additive) @@ -145,9 +145,9 @@ private void loadDataUpdate(boolean isVectorized) throws Exception { runStatementOnDriver("delete from T where a = 3");//matches 2 rows runStatementOnDriver("insert into T values(2,2)"); String[][] expected5 = new String[][]{ - {"{\"writeid\":7,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/delta_0000007_0000007_0000/bucket_00000"}, - {"{\"writeid\":7,\"bucketid\":536870912,\"rowid\":1}\t1\t17", "t/delta_0000007_0000007_0000/bucket_00000"}, - {"{\"writeid\":9,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/delta_0000009_0000009_0000/bucket_00000"} + {"{\"writeid\":8,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/delta_0000008_0000008_0000/bucket_00000"}, + {"{\"writeid\":8,\"bucketid\":536870912,\"rowid\":1}\t1\t17", "t/delta_0000008_0000008_0000/bucket_00000"}, + {"{\"writeid\":10,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/delta_0000010_0000010_0000/bucket_00000"} }; checkResult(expected5, testQuery, isVectorized, "load data inpath overwrite update"); @@ -155,9 +155,9 @@ private void loadDataUpdate(boolean isVectorized) throws Exception { runStatementOnDriver("alter table T compact 'major'"); TestTxnCommands2.runWorker(hiveConf); String[][] expected6 = new String[][]{ - {"{\"writeid\":7,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/base_0000009_v0000042/bucket_00000"}, - {"{\"writeid\":7,\"bucketid\":536870912,\"rowid\":1}\t1\t17", "t/base_0000009_v0000042/bucket_00000"}, - {"{\"writeid\":9,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/base_0000009_v0000042/bucket_00000"} + {"{\"writeid\":8,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "t/base_0000010_v0000042/bucket_00000"}, + {"{\"writeid\":8,\"bucketid\":536870912,\"rowid\":1}\t1\t17", "t/base_0000010_v0000042/bucket_00000"}, + {"{\"writeid\":10,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/base_0000010_v0000042/bucket_00000"} }; checkResult(expected6, testQuery, isVectorized, "load data inpath compact major"); } @@ -181,21 +181,21 @@ private void loadData(boolean isVectorized) throws Exception { "select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID"; String[][] expected = new String[][] { //normal insert - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/delta_0000002_0000002_0000/bucket_00000"}, //Load Data - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000002_0000002_0000/000000_0"}, - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000002_0000002_0000/000000_0"}}; + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000003_0000003_0000/000000_0"}, + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000003_0000003_0000/000000_0"}}; checkResult(expected, testQuery, isVectorized, "load data inpath"); //test minor compaction runStatementOnDriver("alter table T compact 'minor'"); TestTxnCommands2.runWorker(hiveConf); String[][] expected1 = new String[][] { - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/delta_0000001_0000002_v0000025/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/delta_0000001_0000002_v0000025/bucket_00000"}, - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000002_v0000025/bucket_00000"}, - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000002_v0000025/bucket_00000"} + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/delta_0000002_0000003_v0000025/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/delta_0000002_0000003_v0000025/bucket_00000"}, + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000002_0000003_v0000025/bucket_00000"}, + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000002_0000003_v0000025/bucket_00000"} }; checkResult(expected1, testQuery, isVectorized, "load data inpath (minor)"); @@ -204,11 +204,11 @@ private void loadData(boolean isVectorized) throws Exception { runStatementOnDriver("alter table T compact 'major'"); TestTxnCommands2.runWorker(hiveConf); String[][] expected2 = new String[][] { - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/base_0000003_v0000030/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/base_0000003_v0000030/bucket_00000"}, - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000003_v0000030/bucket_00000"}, - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/base_0000003_v0000030/bucket_00000"}, - {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/base_0000003_v0000030/bucket_00000"} + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t2", "t/base_0000004_v0000030/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t0\t4", "t/base_0000004_v0000030/bucket_00000"}, + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000004_v0000030/bucket_00000"}, + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/base_0000004_v0000030/bucket_00000"}, + {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t2\t2", "t/base_0000004_v0000030/bucket_00000"} }; checkResult(expected2, testQuery, isVectorized, "load data inpath (major)"); @@ -217,8 +217,8 @@ private void loadData(boolean isVectorized) throws Exception { runStatementOnDriver("export table Tstage to '" + getWarehouseDir() +"/2'"); runStatementOnDriver("load data inpath '" + getWarehouseDir() + "/2/data' overwrite into table T"); String[][] expected3 = new String[][] { - {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000004/000000_0"}, - {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000004/000000_0"}}; + {"{\"writeid\":5,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000005/000000_0"}, + {"{\"writeid\":5,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000005/000000_0"}}; checkResult(expected3, testQuery, isVectorized, "load data inpath overwrite"); //one more major compaction @@ -226,9 +226,9 @@ private void loadData(boolean isVectorized) throws Exception { runStatementOnDriver("alter table T compact 'major'"); TestTxnCommands2.runWorker(hiveConf); String[][] expected4 = new String[][] { - {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000005_v0000040/bucket_00000"}, - {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000005_v0000040/bucket_00000"}, - {"{\"writeid\":5,\"bucketid\":536870912,\"rowid\":0}\t6\t6", "t/base_0000005_v0000040/bucket_00000"}}; + {"{\"writeid\":5,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000006_v0000040/bucket_00000"}, + {"{\"writeid\":5,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000006_v0000040/bucket_00000"}, + {"{\"writeid\":6,\"bucketid\":536870912,\"rowid\":0}\t6\t6", "t/base_0000006_v0000040/bucket_00000"}}; checkResult(expected4, testQuery, isVectorized, "load data inpath overwrite (major)"); } /** @@ -344,12 +344,12 @@ public void loadDataPartitioned() throws Exception { List rs = runStatementOnDriver("select ROW__ID, p, a, b, INPUT__FILE__NAME from T order by p, ROW__ID"); String[][] expected = new String[][] { - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t2", "t/p=0/delta_0000001_0000001_0000/000000_0"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t0\t0\t4", "t/p=0/delta_0000001_0000001_0000/000000_0"}, - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t2", "t/p=1/delta_0000002_0000002_0000/000000_0"}, - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4", "t/p=1/delta_0000002_0000002_0000/000000_0"}, - {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t2", "t/p=1/delta_0000003_0000003_0000/000000_0"}, - {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4", "t/p=1/delta_0000003_0000003_0000/000000_0"}}; + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t2", "t/p=0/delta_0000002_0000002_0000/000000_0"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t0\t0\t4", "t/p=0/delta_0000002_0000002_0000/000000_0"}, + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t2", "t/p=1/delta_0000003_0000003_0000/000000_0"}, + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4", "t/p=1/delta_0000003_0000003_0000/000000_0"}, + {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t2", "t/p=1/delta_0000004_0000004_0000/000000_0"}, + {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4", "t/p=1/delta_0000004_0000004_0000/000000_0"}}; checkExpected(rs, expected, "load data inpath partitioned"); @@ -358,10 +358,10 @@ public void loadDataPartitioned() throws Exception { runStatementOnDriver("truncate table Tstage"); runStatementOnDriver("load data inpath '" + getWarehouseDir() + "/4/data' overwrite into table T partition(p=1)"); String[][] expected2 = new String[][] { - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t2", "t/p=0/delta_0000001_0000001_0000/000000_0"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t0\t0\t4", "t/p=0/delta_0000001_0000001_0000/000000_0"}, - {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t1\t5\t2", "t/p=1/base_0000004/000000_0"}, - {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":1}\t1\t5\t4", "t/p=1/base_0000004/000000_0"}}; + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t2", "t/p=0/delta_0000002_0000002_0000/000000_0"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t0\t0\t4", "t/p=0/delta_0000002_0000002_0000/000000_0"}, + {"{\"writeid\":5,\"bucketid\":536870912,\"rowid\":0}\t1\t5\t2", "t/p=1/base_0000005/000000_0"}, + {"{\"writeid\":5,\"bucketid\":536870912,\"rowid\":1}\t1\t5\t4", "t/p=1/base_0000005/000000_0"}}; rs = runStatementOnDriver("select ROW__ID, p, a, b, INPUT__FILE__NAME from T order by p, ROW__ID"); checkExpected(rs, expected2, "load data inpath partitioned overwrite"); } @@ -426,20 +426,20 @@ private void testMultiStatement(boolean isVectorized) throws Exception { String testQuery = isVectorized ? "select ROW__ID, a, b from T order by ROW__ID" : "select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID"; String[][] expected = new String[][] { - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":0}\t5\t5", "t/delta_0000001_0000001_0001/000000_0"}, - {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":1}\t6\t6", "t/delta_0000001_0000001_0001/000000_0"} + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870913,\"rowid\":0}\t5\t5", "t/delta_0000002_0000002_0001/000000_0"}, + {"{\"writeid\":2,\"bucketid\":536870913,\"rowid\":1}\t6\t6", "t/delta_0000002_0000002_0001/000000_0"} }; checkResult(expected, testQuery, isVectorized, "load data inpath"); runStatementOnDriver("alter table T compact 'major'"); TestTxnCommands2.runWorker(hiveConf); String[][] expected2 = new String[][] { - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000001_v0000023/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/base_0000001_v0000023/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":0}\t5\t5", "t/base_0000001_v0000023/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":1}\t6\t6", "t/base_0000001_v0000023/bucket_00000"} + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/base_0000002_v0000023/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/base_0000002_v0000023/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870913,\"rowid\":0}\t5\t5", "t/base_0000002_v0000023/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870913,\"rowid\":1}\t6\t6", "t/base_0000002_v0000023/bucket_00000"} }; checkResult(expected2, testQuery, isVectorized, "load data inpath (major)"); //at lest for now, Load Data w/Overwrite is not allowed in a txn: HIVE-18154 @@ -465,8 +465,8 @@ public void testAbort() throws Exception { String testQuery = isVectorized ? "select ROW__ID, a, b from T order by ROW__ID" : "select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID"; String[][] expected = new String[][] { - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/bucket_00000"} + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000002_0000002_0000/bucket_00000"} }; checkResult(expected, testQuery, isVectorized, "load data inpath"); } @@ -487,7 +487,7 @@ public void testLoadAcidFile() throws Exception { List rs = runStatementOnDriver("select INPUT__FILE__NAME from T"); Assert.assertEquals(1, rs.size()); Assert.assertTrue("Unexpcted file name", rs.get(0) - .endsWith("t/delta_0000001_0000001_0000/bucket_00000")); + .endsWith("t/delta_0000002_0000002_0000/bucket_00000")); //T2 is an acid table so this should fail CommandProcessorResponse cpr = runStatementOnDriverNegative( "load data local inpath '" + rs.get(0) + "' into table T2"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java index 0db926acfb..ff76bb7451 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java @@ -95,14 +95,14 @@ public void testNoBuckets() throws Exception { /**the insert creates 2 output files (presumably because there are 2 input files) * The number in the file name is writerId. This is the number encoded in ROW__ID.bucketId - * see {@link org.apache.hadoop.hive.ql.io.BucketCodec}*/ - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t0\t")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00000")); - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\t1\t1\t")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00001")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":1,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00001")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t0\t")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/delta_0000002_0000002_0000/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/delta_0000002_0000002_0000/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":2,\"bucketid\":536936448,\"rowid\":0}\t1\t1\t1\t")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/delta_0000002_0000002_0000/bucket_00001")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":2,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/delta_0000002_0000002_0000/bucket_00001")); hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_EXPLAIN_USER, false); rs = runStatementOnDriver("explain update nobuckets set c3 = 17 where c3 in(0,1)"); @@ -117,25 +117,25 @@ public void testNoBuckets() throws Exception { for(String s : rs) { LOG.warn(s); } - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":1,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00001")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/delta_0000002_0000002_0000/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":2,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/delta_0000002_0000002_0000/bucket_00001")); //so update has 1 writer, but which creates buckets where the new rows land - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t17\t")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/delta_0000002_0000002_0000/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t17\t")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/delta_0000003_0000003_0000/bucket_00000")); // update for "{\"writeid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\t1\t1\t" - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":2,\"bucketid\":536936448,\"rowid\":0}\t1\t1\t17\t")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/delta_0000002_0000002_0000/bucket_00001")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t1\t1\t17\t")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/delta_0000003_0000003_0000/bucket_00001")); Set expectedFiles = new HashSet<>(); //both delete events land in corresponding buckets to the original row-ids - expectedFiles.add("ts/delete_delta_0000002_0000002_0000/bucket_00000"); - expectedFiles.add("ts/delete_delta_0000002_0000002_0000/bucket_00001"); - expectedFiles.add("nobuckets/delta_0000001_0000001_0000/bucket_00000"); - expectedFiles.add("nobuckets/delta_0000001_0000001_0000/bucket_00001"); + expectedFiles.add("ts/delete_delta_0000003_0000003_0000/bucket_00000"); + expectedFiles.add("ts/delete_delta_0000003_0000003_0000/bucket_00001"); expectedFiles.add("nobuckets/delta_0000002_0000002_0000/bucket_00000"); expectedFiles.add("nobuckets/delta_0000002_0000002_0000/bucket_00001"); + expectedFiles.add("nobuckets/delta_0000003_0000003_0000/bucket_00000"); + expectedFiles.add("nobuckets/delta_0000003_0000003_0000/bucket_00001"); //check that we get the right files on disk assertExpectedFileSet(expectedFiles, getWarehouseDir() + "/nobuckets"); //todo: it would be nice to check the contents of the files... could use orc.FileDump - it has @@ -163,10 +163,10 @@ public void testNoBuckets() throws Exception { */ String expected[][] = { - {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t17", "nobuckets/base_0000002_v0000025/bucket_00000"}, - {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":0}\t1\t1\t17", "nobuckets/base_0000002_v0000025/bucket_00001"}, - {"{\"writeid\":1,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2", "nobuckets/base_0000002_v0000025/bucket_00001"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3", "nobuckets/base_0000002_v0000025/bucket_00000"} + {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t17", "nobuckets/base_0000003_v0000025/bucket_00000"}, + {"{\"writeid\":3,\"bucketid\":536936448,\"rowid\":0}\t1\t1\t17", "nobuckets/base_0000003_v0000025/bucket_00001"}, + {"{\"writeid\":2,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2", "nobuckets/base_0000003_v0000025/bucket_00001"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3", "nobuckets/base_0000003_v0000025/bucket_00000"} }; checkResult(expected, "select ROW__ID, c1, c2, c3" + (shouldVectorize() ? "" : ", INPUT__FILE__NAME") @@ -175,14 +175,14 @@ public void testNoBuckets() throws Exception { "After Major Compaction", LOG); expectedFiles.clear(); - expectedFiles.add("obuckets/delete_delta_0000002_0000002_0000/bucket_00000"); - expectedFiles.add("obuckets/delete_delta_0000002_0000002_0000/bucket_00001"); - expectedFiles.add("house/nobuckets/delta_0000001_0000001_0000/bucket_00000"); - expectedFiles.add("house/nobuckets/delta_0000001_0000001_0000/bucket_00001"); + expectedFiles.add("obuckets/delete_delta_0000003_0000003_0000/bucket_00000"); + expectedFiles.add("obuckets/delete_delta_0000003_0000003_0000/bucket_00001"); expectedFiles.add("house/nobuckets/delta_0000002_0000002_0000/bucket_00000"); expectedFiles.add("house/nobuckets/delta_0000002_0000002_0000/bucket_00001"); - expectedFiles.add("/warehouse/nobuckets/base_0000002_v0000025/bucket_00000"); - expectedFiles.add("/warehouse/nobuckets/base_0000002_v0000025/bucket_00001"); + expectedFiles.add("house/nobuckets/delta_0000003_0000003_0000/bucket_00000"); + expectedFiles.add("house/nobuckets/delta_0000003_0000003_0000/bucket_00001"); + expectedFiles.add("/warehouse/nobuckets/base_0000003_v0000025/bucket_00000"); + expectedFiles.add("/warehouse/nobuckets/base_0000003_v0000025/bucket_00001"); assertExpectedFileSet(expectedFiles, getWarehouseDir() + "/nobuckets"); TestTxnCommands2.runCleaner(hiveConf); @@ -191,8 +191,8 @@ public void testNoBuckets() throws Exception { Assert.assertEquals("Unexpected result after clean", stringifyValues(result), rs); expectedFiles.clear(); - expectedFiles.add("/warehouse/nobuckets/base_0000002_v0000025/bucket_00000"); - expectedFiles.add("/warehouse/nobuckets/base_0000002_v0000025/bucket_00001"); + expectedFiles.add("/warehouse/nobuckets/base_0000003_v0000025/bucket_00000"); + expectedFiles.add("/warehouse/nobuckets/base_0000003_v0000025/bucket_00001"); assertExpectedFileSet(expectedFiles, getWarehouseDir() + "/nobuckets"); } @@ -260,8 +260,8 @@ public void testCTAS() throws Exception { "'='true', 'transactional_properties'='default') as select a, b from " + Table.ACIDTBL); rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from myctas2 order by ROW__ID"); String expected2[][] = { - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "warehouse/myctas2/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536936448,\"rowid\":0}\t3\t4", "warehouse/myctas2/delta_0000001_0000001_0000/bucket_00001"} + {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas2/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"writeid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\t2", "warehouse/myctas2/delta_0000001_0000001_0000/bucket_00001"} }; checkExpected(rs, expected2, "Unexpected row count after ctas from acid table"); @@ -333,11 +333,11 @@ public void testInsertToAcidWithUnionRemove() throws Exception { List rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID"); String expected[][] = { - {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "/delta_0000001_0000001_0001/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "/delta_0000001_0000001_0001/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870914,\"rowid\":0}\t5\t6", "/delta_0000001_0000001_0002/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870915,\"rowid\":0}\t9\t10", "/delta_0000001_0000001_0003/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536936450,\"rowid\":0}\t7\t8", "/delta_0000001_0000001_0002/bucket_00001"}, + {"{\"writeid\":2,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "/delta_0000002_0000002_0001/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "/delta_0000002_0000002_0001/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870914,\"rowid\":0}\t5\t6", "/delta_0000002_0000002_0002/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870915,\"rowid\":0}\t9\t10", "/delta_0000002_0000002_0003/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536936450,\"rowid\":0}\t7\t8", "/delta_0000002_0000002_0002/bucket_00001"}, }; checkExpected(rs, expected, "Unexpected row count after ctas"); } @@ -794,14 +794,14 @@ public void testCompactStatsGather() throws Exception { String query = "select ROW__ID, p, q, a, b, INPUT__FILE__NAME from T order by p, q, a, b"; List rs = runStatementOnDriver(query); String[][] expected = { - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t4\t1", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4\t3", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t5\t1", "t/p=1/q=1/delta_0000003_0000003_0000/bucket_00000"}, - {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t5\t3", "t/p=1/q=1/delta_0000003_0000003_0000/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t4\t2", "t/p=1/q=2/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4\t4", "t/p=1/q=2/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t5\t2", "t/p=1/q=2/delta_0000003_0000003_0000/bucket_00000"}, - {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t5\t4", "t/p=1/q=2/delta_0000003_0000003_0000/bucket_00000"} + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t4\t1", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4\t3", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t5\t1", "t/p=1/q=1/delta_0000004_0000004_0000/bucket_00000"}, + {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t5\t3", "t/p=1/q=1/delta_0000004_0000004_0000/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t4\t2", "t/p=1/q=2/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4\t4", "t/p=1/q=2/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t5\t2", "t/p=1/q=2/delta_0000004_0000004_0000/bucket_00000"}, + {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t5\t4", "t/p=1/q=2/delta_0000004_0000004_0000/bucket_00000"} }; checkExpected(rs, expected, "insert data"); @@ -812,14 +812,14 @@ public void testCompactStatsGather() throws Exception { query = "select ROW__ID, p, q, a, b, INPUT__FILE__NAME from T order by p, q, a, b"; rs = runStatementOnDriver(query); String[][] expected2 = { - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t4\t1", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4\t3", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t5\t1", "t/p=1/q=1/delta_0000003_0000003_0000/bucket_00000"}, - {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t5\t3", "t/p=1/q=1/delta_0000003_0000003_0000/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t4\t2", "t/p=1/q=2/base_0000003_v0000020/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4\t4", "t/p=1/q=2/base_0000003_v0000020/bucket_00000"}, - {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t5\t2", "t/p=1/q=2/base_0000003_v0000020/bucket_00000"}, - {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t5\t4", "t/p=1/q=2/base_0000003_v0000020/bucket_00000"} + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t4\t1", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4\t3", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t5\t1", "t/p=1/q=1/delta_0000004_0000004_0000/bucket_00000"}, + {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t5\t3", "t/p=1/q=1/delta_0000004_0000004_0000/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t4\t2", "t/p=1/q=2/base_0000004_v0000020/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4\t4", "t/p=1/q=2/base_0000004_v0000020/bucket_00000"}, + {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t5\t2", "t/p=1/q=2/base_0000004_v0000020/bucket_00000"}, + {"{\"writeid\":4,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t5\t4", "t/p=1/q=2/base_0000004_v0000020/bucket_00000"} }; checkExpected(rs, expected2, "after major compaction"); @@ -844,8 +844,8 @@ public void testDefault() throws Exception { List rs = runStatementOnDriver(query); String[][] expected = { //this proves data is written in Acid layout so T was made Acid - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/bucket_00000"} + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000002_0000002_0000/bucket_00000"}, + {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000002_0000002_0000/bucket_00000"} }; checkExpected(rs, expected, "insert data"); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java index 43a3047f87..7646e1c1f2 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java @@ -2432,7 +2432,7 @@ public void testValidWriteIdListSnapshot() throws Exception { // Open a base txn which allocates write ID and then committed. long baseTxnId = txnMgr.openTxn(ctx, "u0"); long baseWriteId = txnMgr.getTableWriteId("temp", "T7"); - Assert.assertEquals(1, baseWriteId); + Assert.assertEquals(2, baseWriteId); txnMgr.commitTxn(); // committed baseTxnId // Open a txn with no writes. @@ -2457,11 +2457,11 @@ public void testValidWriteIdListSnapshot() throws Exception { long aboveHwmOpenTxnId = txnMgr3.openTxn(ctx, "u3"); Assert.assertTrue("Invalid txn ID", aboveHwmOpenTxnId > testTxnId); long aboveHwmOpenWriteId = txnMgr3.getTableWriteId("temp", "T7"); - Assert.assertEquals(2, aboveHwmOpenWriteId); + Assert.assertEquals(3, aboveHwmOpenWriteId); // Allocate writeId to txn under HWM. This will get Id greater than a txn > HWM. long underHwmOpenWriteId = txnMgr1.getTableWriteId("temp", "T7"); - Assert.assertEquals(3, underHwmOpenWriteId); + Assert.assertEquals(4, underHwmOpenWriteId); // Verify the ValidWriteIdList with one open txn on this table. Write ID of open txn should be invalid. testValidWriteIds = txnMgr2.getValidWriteIds(Collections.singletonList("temp.t7"), testValidTxns) @@ -2485,7 +2485,7 @@ public void testValidWriteIdListSnapshot() throws Exception { // Write Ids of committed and self test txn should be valid but writeId of open txn should be invalid. // WriteId of recently committed txn which was open when get ValidTxnList snapshot should be invalid as well. long testWriteId = txnMgr2.getTableWriteId("temp", "T7"); - Assert.assertEquals(4, testWriteId); + Assert.assertEquals(5, testWriteId); testValidWriteIds = txnMgr2.getValidWriteIds(Collections.singletonList("temp.t7"), testValidTxns) .getTableValidWriteIdList("temp.t7"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index 5626dbefbf..6cdf666d29 100755 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -189,6 +189,7 @@ public void testTable() throws Throwable { tbl.setSerdeParam(serdeConstants.FIELD_DELIM, "1"); tbl.setSerializationLib(LazySimpleSerDe.class.getName()); tbl.setStoredAsSubDirectories(false); + tbl.setTemporary(false); tbl.setRewriteEnabled(false); @@ -251,6 +252,7 @@ public void testThriftTable() throws Throwable { tbl.setSerdeParam(serdeConstants.SERIALIZATION_FORMAT, TBinaryProtocol.class .getName()); tbl.setStoredAsSubDirectories(false); + tbl.setTemporary(false); tbl.setRewriteEnabled(false); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDTFGetSQLSchema.java b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDTFGetSQLSchema.java index 3615d2b778..bce6f6c048 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDTFGetSQLSchema.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDTFGetSQLSchema.java @@ -45,6 +45,7 @@ public static void setUpBeforeClass() throws Exception { conf.set("hive.security.authorization.manager", "org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider"); sessionState = SessionState.start(conf); + sessionState.initTxnMgr(conf); } @AfterClass diff --git a/ql/src/test/queries/clientpositive/compute_stats_date.q b/ql/src/test/queries/clientpositive/compute_stats_date.q index bf478526ba..2faabdc33b 100644 --- a/ql/src/test/queries/clientpositive/compute_stats_date.q +++ b/ql/src/test/queries/clientpositive/compute_stats_date.q @@ -23,6 +23,6 @@ analyze table tab_date compute statistics for columns fl_date; describe formatted tab_date fl_date; -- Update stats manually. Try both yyyy-mm-dd and integer value for high/low value -alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0'); +alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0', 'numNulls'='0'); describe formatted tab_date fl_date; diff --git a/ql/src/test/queries/clientpositive/lock4.q b/ql/src/test/queries/clientpositive/lock4.q index 256ca9deb4..1c40c2a798 100644 --- a/ql/src/test/queries/clientpositive/lock4.q +++ b/ql/src/test/queries/clientpositive/lock4.q @@ -1,5 +1,4 @@ --! qt:dataset:srcpart -set hive.lock.mapred.only.operation=true; drop table tstsrcpart_n3; create table tstsrcpart_n3 like srcpart; diff --git a/ql/src/test/queries/clientpositive/perf/query1.q b/ql/src/test/queries/clientpositive/perf/query1.q index a8d70727f1..5f69772b7a 100644 --- a/ql/src/test/queries/clientpositive/perf/query1.q +++ b/ql/src/test/queries/clientpositive/perf/query1.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query1.tpl and seed 2031708268 explain with customer_total_return as diff --git a/ql/src/test/queries/clientpositive/perf/query10.q b/ql/src/test/queries/clientpositive/perf/query10.q index d3b1be7a75..72f7cc35ac 100644 --- a/ql/src/test/queries/clientpositive/perf/query10.q +++ b/ql/src/test/queries/clientpositive/perf/query10.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query10.tpl and seed 797269820 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query11.q b/ql/src/test/queries/clientpositive/perf/query11.q index 6017c89790..0062dcf873 100644 --- a/ql/src/test/queries/clientpositive/perf/query11.q +++ b/ql/src/test/queries/clientpositive/perf/query11.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query11.tpl and seed 1819994127 explain with year_total as ( diff --git a/ql/src/test/queries/clientpositive/perf/query12.q b/ql/src/test/queries/clientpositive/perf/query12.q index 59b50acb46..14225be2fa 100644 --- a/ql/src/test/queries/clientpositive/perf/query12.q +++ b/ql/src/test/queries/clientpositive/perf/query12.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query12.tpl and seed 345591136 explain select i_item_desc diff --git a/ql/src/test/queries/clientpositive/perf/query13.q b/ql/src/test/queries/clientpositive/perf/query13.q index dca19b0161..0b49fb4673 100644 --- a/ql/src/test/queries/clientpositive/perf/query13.q +++ b/ql/src/test/queries/clientpositive/perf/query13.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query13.tpl and seed 622697896 explain select avg(ss_quantity) diff --git a/ql/src/test/queries/clientpositive/perf/query14.q b/ql/src/test/queries/clientpositive/perf/query14.q index c12ecb56c4..9ba2a77d40 100644 --- a/ql/src/test/queries/clientpositive/perf/query14.q +++ b/ql/src/test/queries/clientpositive/perf/query14.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query14.tpl and seed 1819994127 explain with cross_items as diff --git a/ql/src/test/queries/clientpositive/perf/query15.q b/ql/src/test/queries/clientpositive/perf/query15.q index 9e1711a1d2..385c91c8fc 100644 --- a/ql/src/test/queries/clientpositive/perf/query15.q +++ b/ql/src/test/queries/clientpositive/perf/query15.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query15.tpl and seed 1819994127 explain select ca_zip diff --git a/ql/src/test/queries/clientpositive/perf/query16.q b/ql/src/test/queries/clientpositive/perf/query16.q index 05625f71aa..cae35456bb 100644 --- a/ql/src/test/queries/clientpositive/perf/query16.q +++ b/ql/src/test/queries/clientpositive/perf/query16.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query16.tpl and seed 171719422 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query17.q b/ql/src/test/queries/clientpositive/perf/query17.q index 0cd4201f51..ed1c37c331 100644 --- a/ql/src/test/queries/clientpositive/perf/query17.q +++ b/ql/src/test/queries/clientpositive/perf/query17.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query17.tpl and seed 1819994127 explain select i_item_id diff --git a/ql/src/test/queries/clientpositive/perf/query18.q b/ql/src/test/queries/clientpositive/perf/query18.q index bf1ff5983b..c8960dc7dc 100644 --- a/ql/src/test/queries/clientpositive/perf/query18.q +++ b/ql/src/test/queries/clientpositive/perf/query18.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query18.tpl and seed 1978355063 explain select i_item_id, diff --git a/ql/src/test/queries/clientpositive/perf/query19.q b/ql/src/test/queries/clientpositive/perf/query19.q index 5768e4b04e..351e60a18b 100644 --- a/ql/src/test/queries/clientpositive/perf/query19.q +++ b/ql/src/test/queries/clientpositive/perf/query19.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query19.tpl and seed 1930872976 explain select i_brand_id brand_id, i_brand brand, i_manufact_id, i_manufact, diff --git a/ql/src/test/queries/clientpositive/perf/query2.q b/ql/src/test/queries/clientpositive/perf/query2.q index 26a52ef264..c64ce1e1af 100644 --- a/ql/src/test/queries/clientpositive/perf/query2.q +++ b/ql/src/test/queries/clientpositive/perf/query2.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query2.tpl and seed 1819994127 explain with wscs as diff --git a/ql/src/test/queries/clientpositive/perf/query20.q b/ql/src/test/queries/clientpositive/perf/query20.q index c5f8848fb3..6d116a6859 100644 --- a/ql/src/test/queries/clientpositive/perf/query20.q +++ b/ql/src/test/queries/clientpositive/perf/query20.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query20.tpl and seed 345591136 explain select i_item_desc diff --git a/ql/src/test/queries/clientpositive/perf/query21.q b/ql/src/test/queries/clientpositive/perf/query21.q index 34b458b709..e328ee90e9 100644 --- a/ql/src/test/queries/clientpositive/perf/query21.q +++ b/ql/src/test/queries/clientpositive/perf/query21.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query21.tpl and seed 1819994127 explain select * diff --git a/ql/src/test/queries/clientpositive/perf/query22.q b/ql/src/test/queries/clientpositive/perf/query22.q index 70491731f8..29faf7df77 100644 --- a/ql/src/test/queries/clientpositive/perf/query22.q +++ b/ql/src/test/queries/clientpositive/perf/query22.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query22.tpl and seed 1819994127 explain select i_product_name diff --git a/ql/src/test/queries/clientpositive/perf/query23.q b/ql/src/test/queries/clientpositive/perf/query23.q index 1e02655927..ae3e5dfbc3 100644 --- a/ql/src/test/queries/clientpositive/perf/query23.q +++ b/ql/src/test/queries/clientpositive/perf/query23.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query23.tpl and seed 2031708268 explain with frequent_ss_items as diff --git a/ql/src/test/queries/clientpositive/perf/query24.q b/ql/src/test/queries/clientpositive/perf/query24.q index b3cdaef4a5..e9d00828cd 100644 --- a/ql/src/test/queries/clientpositive/perf/query24.q +++ b/ql/src/test/queries/clientpositive/perf/query24.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query24.tpl and seed 1220860970 explain with ssales as diff --git a/ql/src/test/queries/clientpositive/perf/query25.q b/ql/src/test/queries/clientpositive/perf/query25.q index 358cdc58b4..9d1369f617 100644 --- a/ql/src/test/queries/clientpositive/perf/query25.q +++ b/ql/src/test/queries/clientpositive/perf/query25.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query25.tpl and seed 1819994127 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query26.q b/ql/src/test/queries/clientpositive/perf/query26.q index b35d98ccbc..e56416d196 100644 --- a/ql/src/test/queries/clientpositive/perf/query26.q +++ b/ql/src/test/queries/clientpositive/perf/query26.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query26.tpl and seed 1930872976 explain select i_item_id, diff --git a/ql/src/test/queries/clientpositive/perf/query27.q b/ql/src/test/queries/clientpositive/perf/query27.q index ec09e1d3af..b74cc8d092 100644 --- a/ql/src/test/queries/clientpositive/perf/query27.q +++ b/ql/src/test/queries/clientpositive/perf/query27.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query27.tpl and seed 2017787633 explain select i_item_id, diff --git a/ql/src/test/queries/clientpositive/perf/query28.q b/ql/src/test/queries/clientpositive/perf/query28.q index fc3c1b2d40..83caa9037a 100644 --- a/ql/src/test/queries/clientpositive/perf/query28.q +++ b/ql/src/test/queries/clientpositive/perf/query28.q @@ -1,5 +1,6 @@ set hive.mapred.mode=nonstrict; set hive.optimize.metadataonly=true; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query28.tpl and seed 444293455 explain diff --git a/ql/src/test/queries/clientpositive/perf/query29.q b/ql/src/test/queries/clientpositive/perf/query29.q index 8bf4d512ed..4d193d0759 100644 --- a/ql/src/test/queries/clientpositive/perf/query29.q +++ b/ql/src/test/queries/clientpositive/perf/query29.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query29.tpl and seed 2031708268 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query3.q b/ql/src/test/queries/clientpositive/perf/query3.q index a70a62fd88..156f0bf1c5 100644 --- a/ql/src/test/queries/clientpositive/perf/query3.q +++ b/ql/src/test/queries/clientpositive/perf/query3.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query3.tpl and seed 2031708268 explain select dt.d_year diff --git a/ql/src/test/queries/clientpositive/perf/query30.q b/ql/src/test/queries/clientpositive/perf/query30.q index 47f0d935ea..af0a1c9b5a 100644 --- a/ql/src/test/queries/clientpositive/perf/query30.q +++ b/ql/src/test/queries/clientpositive/perf/query30.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query30.tpl and seed 1819994127 explain with customer_total_return as diff --git a/ql/src/test/queries/clientpositive/perf/query31.q b/ql/src/test/queries/clientpositive/perf/query31.q index 42c3ca6e27..a62b6c42c9 100644 --- a/ql/src/test/queries/clientpositive/perf/query31.q +++ b/ql/src/test/queries/clientpositive/perf/query31.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query31.tpl and seed 1819994127 explain with ss as diff --git a/ql/src/test/queries/clientpositive/perf/query32.q b/ql/src/test/queries/clientpositive/perf/query32.q index ed43b4d628..e675c859ab 100644 --- a/ql/src/test/queries/clientpositive/perf/query32.q +++ b/ql/src/test/queries/clientpositive/perf/query32.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query32.tpl and seed 2031708268 explain select sum(cs_ext_discount_amt) as `excess discount amount` diff --git a/ql/src/test/queries/clientpositive/perf/query33.q b/ql/src/test/queries/clientpositive/perf/query33.q index 1dfa9bee8f..11d68987d8 100644 --- a/ql/src/test/queries/clientpositive/perf/query33.q +++ b/ql/src/test/queries/clientpositive/perf/query33.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query33.tpl and seed 1930872976 explain with ss as ( diff --git a/ql/src/test/queries/clientpositive/perf/query34.q b/ql/src/test/queries/clientpositive/perf/query34.q index 427eed6e4d..473eddbb92 100644 --- a/ql/src/test/queries/clientpositive/perf/query34.q +++ b/ql/src/test/queries/clientpositive/perf/query34.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query34.tpl and seed 1971067816 explain select c_last_name diff --git a/ql/src/test/queries/clientpositive/perf/query35.q b/ql/src/test/queries/clientpositive/perf/query35.q index 19951ac9c1..7d5e2efe2b 100644 --- a/ql/src/test/queries/clientpositive/perf/query35.q +++ b/ql/src/test/queries/clientpositive/perf/query35.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query35.tpl and seed 1930872976 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query36.q b/ql/src/test/queries/clientpositive/perf/query36.q index 789f9324f6..3f1161a4da 100644 --- a/ql/src/test/queries/clientpositive/perf/query36.q +++ b/ql/src/test/queries/clientpositive/perf/query36.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query36.tpl and seed 1544728811 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query37.q b/ql/src/test/queries/clientpositive/perf/query37.q index 811eab0489..db39f7dfc1 100644 --- a/ql/src/test/queries/clientpositive/perf/query37.q +++ b/ql/src/test/queries/clientpositive/perf/query37.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query37.tpl and seed 301843662 explain select i_item_id diff --git a/ql/src/test/queries/clientpositive/perf/query38.q b/ql/src/test/queries/clientpositive/perf/query38.q index 8eade8a363..e6250e9571 100644 --- a/ql/src/test/queries/clientpositive/perf/query38.q +++ b/ql/src/test/queries/clientpositive/perf/query38.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query38.tpl and seed 1819994127 explain select count(*) from ( diff --git a/ql/src/test/queries/clientpositive/perf/query39.q b/ql/src/test/queries/clientpositive/perf/query39.q index d3c806d2d3..e64f693921 100644 --- a/ql/src/test/queries/clientpositive/perf/query39.q +++ b/ql/src/test/queries/clientpositive/perf/query39.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query39.tpl and seed 1327317894 explain with inv as diff --git a/ql/src/test/queries/clientpositive/perf/query4.q b/ql/src/test/queries/clientpositive/perf/query4.q index 631a464028..87845b42c1 100644 --- a/ql/src/test/queries/clientpositive/perf/query4.q +++ b/ql/src/test/queries/clientpositive/perf/query4.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query4.tpl and seed 1819994127 explain with year_total as ( diff --git a/ql/src/test/queries/clientpositive/perf/query40.q b/ql/src/test/queries/clientpositive/perf/query40.q index 61f5ad3c91..da46f4b380 100644 --- a/ql/src/test/queries/clientpositive/perf/query40.q +++ b/ql/src/test/queries/clientpositive/perf/query40.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query40.tpl and seed 1819994127 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query42.q b/ql/src/test/queries/clientpositive/perf/query42.q index 6b8abe090e..4e075f47d3 100644 --- a/ql/src/test/queries/clientpositive/perf/query42.q +++ b/ql/src/test/queries/clientpositive/perf/query42.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query42.tpl and seed 1819994127 explain select dt.d_year diff --git a/ql/src/test/queries/clientpositive/perf/query43.q b/ql/src/test/queries/clientpositive/perf/query43.q index ebdc69d933..9f6cd270f5 100644 --- a/ql/src/test/queries/clientpositive/perf/query43.q +++ b/ql/src/test/queries/clientpositive/perf/query43.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query43.tpl and seed 1819994127 explain select s_store_name, s_store_id, diff --git a/ql/src/test/queries/clientpositive/perf/query44.q b/ql/src/test/queries/clientpositive/perf/query44.q index 712bbfb32d..cd074dadcd 100644 --- a/ql/src/test/queries/clientpositive/perf/query44.q +++ b/ql/src/test/queries/clientpositive/perf/query44.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query44.tpl and seed 1819994127 explain select asceding.rnk, i1.i_product_name best_performing, i2.i_product_name worst_performing diff --git a/ql/src/test/queries/clientpositive/perf/query45.q b/ql/src/test/queries/clientpositive/perf/query45.q index 4db3fb2248..c4f8bf4d9b 100644 --- a/ql/src/test/queries/clientpositive/perf/query45.q +++ b/ql/src/test/queries/clientpositive/perf/query45.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query45.tpl and seed 2031708268 explain select ca_zip, ca_county, sum(ws_sales_price) diff --git a/ql/src/test/queries/clientpositive/perf/query46.q b/ql/src/test/queries/clientpositive/perf/query46.q index 46f8be34bd..62adbd03de 100644 --- a/ql/src/test/queries/clientpositive/perf/query46.q +++ b/ql/src/test/queries/clientpositive/perf/query46.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query46.tpl and seed 803547492 explain select c_last_name diff --git a/ql/src/test/queries/clientpositive/perf/query47.q b/ql/src/test/queries/clientpositive/perf/query47.q index 5c26ba5a22..c0b999f601 100644 --- a/ql/src/test/queries/clientpositive/perf/query47.q +++ b/ql/src/test/queries/clientpositive/perf/query47.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query47.tpl and seed 2031708268 explain with v1 as( diff --git a/ql/src/test/queries/clientpositive/perf/query48.q b/ql/src/test/queries/clientpositive/perf/query48.q index cfff1d7857..bab2935ca6 100644 --- a/ql/src/test/queries/clientpositive/perf/query48.q +++ b/ql/src/test/queries/clientpositive/perf/query48.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query48.tpl and seed 622697896 explain select sum (ss_quantity) diff --git a/ql/src/test/queries/clientpositive/perf/query49.q b/ql/src/test/queries/clientpositive/perf/query49.q index 6c62e1f13c..e53e3de712 100644 --- a/ql/src/test/queries/clientpositive/perf/query49.q +++ b/ql/src/test/queries/clientpositive/perf/query49.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query49.tpl and seed 1819994127 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query5.q b/ql/src/test/queries/clientpositive/perf/query5.q index bf61fb2ed4..216210a545 100644 --- a/ql/src/test/queries/clientpositive/perf/query5.q +++ b/ql/src/test/queries/clientpositive/perf/query5.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query5.tpl and seed 1819994127 explain with ssr as diff --git a/ql/src/test/queries/clientpositive/perf/query50.q b/ql/src/test/queries/clientpositive/perf/query50.q index 0e2caf6b86..644aa66c56 100644 --- a/ql/src/test/queries/clientpositive/perf/query50.q +++ b/ql/src/test/queries/clientpositive/perf/query50.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query50.tpl and seed 1819994127 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query51.q b/ql/src/test/queries/clientpositive/perf/query51.q index 9f90525ef7..52a8ae97a5 100644 --- a/ql/src/test/queries/clientpositive/perf/query51.q +++ b/ql/src/test/queries/clientpositive/perf/query51.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query51.tpl and seed 1819994127 explain WITH web_v1 as ( diff --git a/ql/src/test/queries/clientpositive/perf/query52.q b/ql/src/test/queries/clientpositive/perf/query52.q index 1fee84674a..9b7f9dacaa 100644 --- a/ql/src/test/queries/clientpositive/perf/query52.q +++ b/ql/src/test/queries/clientpositive/perf/query52.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query52.tpl and seed 1819994127 explain select dt.d_year diff --git a/ql/src/test/queries/clientpositive/perf/query53.q b/ql/src/test/queries/clientpositive/perf/query53.q index 0b81574c13..1491d5bfdb 100644 --- a/ql/src/test/queries/clientpositive/perf/query53.q +++ b/ql/src/test/queries/clientpositive/perf/query53.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query53.tpl and seed 1819994127 explain select * from diff --git a/ql/src/test/queries/clientpositive/perf/query54.q b/ql/src/test/queries/clientpositive/perf/query54.q index 424f3855d2..54db5c64c9 100644 --- a/ql/src/test/queries/clientpositive/perf/query54.q +++ b/ql/src/test/queries/clientpositive/perf/query54.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query54.tpl and seed 1930872976 explain with my_customers as ( diff --git a/ql/src/test/queries/clientpositive/perf/query55.q b/ql/src/test/queries/clientpositive/perf/query55.q index f953f117af..0f0325d39b 100644 --- a/ql/src/test/queries/clientpositive/perf/query55.q +++ b/ql/src/test/queries/clientpositive/perf/query55.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query55.tpl and seed 2031708268 explain select i_brand_id brand_id, i_brand brand, diff --git a/ql/src/test/queries/clientpositive/perf/query56.q b/ql/src/test/queries/clientpositive/perf/query56.q index f3c83236ac..0027d1d12c 100644 --- a/ql/src/test/queries/clientpositive/perf/query56.q +++ b/ql/src/test/queries/clientpositive/perf/query56.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query56.tpl and seed 1951559352 explain with ss as ( diff --git a/ql/src/test/queries/clientpositive/perf/query57.q b/ql/src/test/queries/clientpositive/perf/query57.q index 4dc6e63257..e352648371 100644 --- a/ql/src/test/queries/clientpositive/perf/query57.q +++ b/ql/src/test/queries/clientpositive/perf/query57.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query57.tpl and seed 2031708268 explain with v1 as( diff --git a/ql/src/test/queries/clientpositive/perf/query58.q b/ql/src/test/queries/clientpositive/perf/query58.q index 8d918ef4cb..a5ca9a0738 100644 --- a/ql/src/test/queries/clientpositive/perf/query58.q +++ b/ql/src/test/queries/clientpositive/perf/query58.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query58.tpl and seed 1819994127 explain with ss_items as diff --git a/ql/src/test/queries/clientpositive/perf/query59.q b/ql/src/test/queries/clientpositive/perf/query59.q index 099965306b..4a75731c04 100644 --- a/ql/src/test/queries/clientpositive/perf/query59.q +++ b/ql/src/test/queries/clientpositive/perf/query59.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query59.tpl and seed 1819994127 explain with wss as diff --git a/ql/src/test/queries/clientpositive/perf/query6.q b/ql/src/test/queries/clientpositive/perf/query6.q index aabce5202e..10e8d8f852 100644 --- a/ql/src/test/queries/clientpositive/perf/query6.q +++ b/ql/src/test/queries/clientpositive/perf/query6.q @@ -1,6 +1,7 @@ set hive.auto.convert.join=true; set hive.tez.cartesian-product.enabled=true; set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query6.tpl and seed 1819994127 explain select a.ca_state state, count(*) cnt diff --git a/ql/src/test/queries/clientpositive/perf/query60.q b/ql/src/test/queries/clientpositive/perf/query60.q index a5ab248cd1..2e3faf6dad 100644 --- a/ql/src/test/queries/clientpositive/perf/query60.q +++ b/ql/src/test/queries/clientpositive/perf/query60.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query60.tpl and seed 1930872976 explain with ss as ( diff --git a/ql/src/test/queries/clientpositive/perf/query61.q b/ql/src/test/queries/clientpositive/perf/query61.q index edaf6f6e8e..01c4218568 100644 --- a/ql/src/test/queries/clientpositive/perf/query61.q +++ b/ql/src/test/queries/clientpositive/perf/query61.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query61.tpl and seed 1930872976 explain select promotions,total,cast(promotions as decimal(15,4))/cast(total as decimal(15,4))*100 diff --git a/ql/src/test/queries/clientpositive/perf/query63.q b/ql/src/test/queries/clientpositive/perf/query63.q index 49e513c786..3d5a735dfb 100644 --- a/ql/src/test/queries/clientpositive/perf/query63.q +++ b/ql/src/test/queries/clientpositive/perf/query63.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query63.tpl and seed 1819994127 explain select * diff --git a/ql/src/test/queries/clientpositive/perf/query64.q b/ql/src/test/queries/clientpositive/perf/query64.q index b069c2ace9..45e167b717 100644 --- a/ql/src/test/queries/clientpositive/perf/query64.q +++ b/ql/src/test/queries/clientpositive/perf/query64.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query64.tpl and seed 1220860970 explain with cs_ui as diff --git a/ql/src/test/queries/clientpositive/perf/query65.q b/ql/src/test/queries/clientpositive/perf/query65.q index d5b53a25c5..4612c10205 100644 --- a/ql/src/test/queries/clientpositive/perf/query65.q +++ b/ql/src/test/queries/clientpositive/perf/query65.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query65.tpl and seed 1819994127 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query66.q b/ql/src/test/queries/clientpositive/perf/query66.q index 280bac8df3..753e973678 100644 --- a/ql/src/test/queries/clientpositive/perf/query66.q +++ b/ql/src/test/queries/clientpositive/perf/query66.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query66.tpl and seed 2042478054 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query67.q b/ql/src/test/queries/clientpositive/perf/query67.q index c3ecf2a177..ce3fee04a6 100644 --- a/ql/src/test/queries/clientpositive/perf/query67.q +++ b/ql/src/test/queries/clientpositive/perf/query67.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query67.tpl and seed 1819994127 explain select * diff --git a/ql/src/test/queries/clientpositive/perf/query68.q b/ql/src/test/queries/clientpositive/perf/query68.q index 964dc8a0a7..298f3e7821 100644 --- a/ql/src/test/queries/clientpositive/perf/query68.q +++ b/ql/src/test/queries/clientpositive/perf/query68.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query68.tpl and seed 803547492 explain select c_last_name diff --git a/ql/src/test/queries/clientpositive/perf/query69.q b/ql/src/test/queries/clientpositive/perf/query69.q index ce2d19cc5d..4d1dbceaf2 100644 --- a/ql/src/test/queries/clientpositive/perf/query69.q +++ b/ql/src/test/queries/clientpositive/perf/query69.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query69.tpl and seed 797269820 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query7.q b/ql/src/test/queries/clientpositive/perf/query7.q index 7bc1a00a3f..8efcd65427 100644 --- a/ql/src/test/queries/clientpositive/perf/query7.q +++ b/ql/src/test/queries/clientpositive/perf/query7.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query7.tpl and seed 1930872976 explain select i_item_id, diff --git a/ql/src/test/queries/clientpositive/perf/query70.q b/ql/src/test/queries/clientpositive/perf/query70.q index 7974976c34..9c8df7144e 100644 --- a/ql/src/test/queries/clientpositive/perf/query70.q +++ b/ql/src/test/queries/clientpositive/perf/query70.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query70.tpl and seed 1819994127 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query71.q b/ql/src/test/queries/clientpositive/perf/query71.q index ea6548ec4d..cc1e3de3e0 100644 --- a/ql/src/test/queries/clientpositive/perf/query71.q +++ b/ql/src/test/queries/clientpositive/perf/query71.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query71.tpl and seed 2031708268 explain select i_brand_id brand_id, i_brand brand,t_hour,t_minute, diff --git a/ql/src/test/queries/clientpositive/perf/query72.q b/ql/src/test/queries/clientpositive/perf/query72.q index 20fbcb1242..f111bbacb0 100644 --- a/ql/src/test/queries/clientpositive/perf/query72.q +++ b/ql/src/test/queries/clientpositive/perf/query72.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query72.tpl and seed 2031708268 explain select i_item_desc diff --git a/ql/src/test/queries/clientpositive/perf/query73.q b/ql/src/test/queries/clientpositive/perf/query73.q index 42ccaa19be..cf8ceb4fff 100644 --- a/ql/src/test/queries/clientpositive/perf/query73.q +++ b/ql/src/test/queries/clientpositive/perf/query73.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query73.tpl and seed 1971067816 explain select c_last_name diff --git a/ql/src/test/queries/clientpositive/perf/query74.q b/ql/src/test/queries/clientpositive/perf/query74.q index b25db9c0e0..11d1712e47 100644 --- a/ql/src/test/queries/clientpositive/perf/query74.q +++ b/ql/src/test/queries/clientpositive/perf/query74.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query74.tpl and seed 1556717815 explain with year_total as ( diff --git a/ql/src/test/queries/clientpositive/perf/query75.q b/ql/src/test/queries/clientpositive/perf/query75.q index ac1fc381c4..cb8d5163f3 100644 --- a/ql/src/test/queries/clientpositive/perf/query75.q +++ b/ql/src/test/queries/clientpositive/perf/query75.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query75.tpl and seed 1819994127 explain WITH all_sales AS ( diff --git a/ql/src/test/queries/clientpositive/perf/query76.q b/ql/src/test/queries/clientpositive/perf/query76.q index ca943ce967..a342013a70 100644 --- a/ql/src/test/queries/clientpositive/perf/query76.q +++ b/ql/src/test/queries/clientpositive/perf/query76.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query76.tpl and seed 2031708268 explain select channel, col_name, d_year, d_qoy, i_category, COUNT(*) sales_cnt, SUM(ext_sales_price) sales_amt FROM ( diff --git a/ql/src/test/queries/clientpositive/perf/query77.q b/ql/src/test/queries/clientpositive/perf/query77.q index 28578133fe..ecc31fc419 100644 --- a/ql/src/test/queries/clientpositive/perf/query77.q +++ b/ql/src/test/queries/clientpositive/perf/query77.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query77.tpl and seed 1819994127 explain with ss as diff --git a/ql/src/test/queries/clientpositive/perf/query78.q b/ql/src/test/queries/clientpositive/perf/query78.q index ca9e6d6cb1..ae50db5698 100644 --- a/ql/src/test/queries/clientpositive/perf/query78.q +++ b/ql/src/test/queries/clientpositive/perf/query78.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query78.tpl and seed 1819994127 explain with ws as diff --git a/ql/src/test/queries/clientpositive/perf/query79.q b/ql/src/test/queries/clientpositive/perf/query79.q index dfa7017c13..350d7c6664 100644 --- a/ql/src/test/queries/clientpositive/perf/query79.q +++ b/ql/src/test/queries/clientpositive/perf/query79.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query79.tpl and seed 2031708268 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query8.q b/ql/src/test/queries/clientpositive/perf/query8.q index cfce36618b..35287a2cfe 100644 --- a/ql/src/test/queries/clientpositive/perf/query8.q +++ b/ql/src/test/queries/clientpositive/perf/query8.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query8.tpl and seed 1766988859 explain select s_store_name diff --git a/ql/src/test/queries/clientpositive/perf/query80.q b/ql/src/test/queries/clientpositive/perf/query80.q index 651c5d7ff5..c5c37d468b 100644 --- a/ql/src/test/queries/clientpositive/perf/query80.q +++ b/ql/src/test/queries/clientpositive/perf/query80.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query80.tpl and seed 1819994127 explain with ssr as diff --git a/ql/src/test/queries/clientpositive/perf/query81.q b/ql/src/test/queries/clientpositive/perf/query81.q index fd072c398d..f3b4d949ea 100644 --- a/ql/src/test/queries/clientpositive/perf/query81.q +++ b/ql/src/test/queries/clientpositive/perf/query81.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query81.tpl and seed 1819994127 explain with customer_total_return as diff --git a/ql/src/test/queries/clientpositive/perf/query82.q b/ql/src/test/queries/clientpositive/perf/query82.q index 9aec0cbd68..83598288af 100644 --- a/ql/src/test/queries/clientpositive/perf/query82.q +++ b/ql/src/test/queries/clientpositive/perf/query82.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query82.tpl and seed 55585014 explain select i_item_id diff --git a/ql/src/test/queries/clientpositive/perf/query83.q b/ql/src/test/queries/clientpositive/perf/query83.q index fd9184ccb9..f9eef5b2aa 100644 --- a/ql/src/test/queries/clientpositive/perf/query83.q +++ b/ql/src/test/queries/clientpositive/perf/query83.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query83.tpl and seed 1930872976 explain with sr_items as diff --git a/ql/src/test/queries/clientpositive/perf/query84.q b/ql/src/test/queries/clientpositive/perf/query84.q index 4ab59457d2..44574df8d0 100644 --- a/ql/src/test/queries/clientpositive/perf/query84.q +++ b/ql/src/test/queries/clientpositive/perf/query84.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query84.tpl and seed 1819994127 explain select c_customer_id as customer_id diff --git a/ql/src/test/queries/clientpositive/perf/query85.q b/ql/src/test/queries/clientpositive/perf/query85.q index 2e67e728bf..cccbbf2944 100644 --- a/ql/src/test/queries/clientpositive/perf/query85.q +++ b/ql/src/test/queries/clientpositive/perf/query85.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query85.tpl and seed 622697896 explain select substr(r_reason_desc,1,20) diff --git a/ql/src/test/queries/clientpositive/perf/query86.q b/ql/src/test/queries/clientpositive/perf/query86.q index 6670868962..ca8a8cc927 100644 --- a/ql/src/test/queries/clientpositive/perf/query86.q +++ b/ql/src/test/queries/clientpositive/perf/query86.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query86.tpl and seed 1819994127 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query87.q b/ql/src/test/queries/clientpositive/perf/query87.q index e4562c23fe..6a514a6ad3 100644 --- a/ql/src/test/queries/clientpositive/perf/query87.q +++ b/ql/src/test/queries/clientpositive/perf/query87.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query87.tpl and seed 1819994127 explain select count(*) diff --git a/ql/src/test/queries/clientpositive/perf/query88.q b/ql/src/test/queries/clientpositive/perf/query88.q index 265cc7c4f2..1c78501ad9 100644 --- a/ql/src/test/queries/clientpositive/perf/query88.q +++ b/ql/src/test/queries/clientpositive/perf/query88.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query88.tpl and seed 318176889 explain select * diff --git a/ql/src/test/queries/clientpositive/perf/query89.q b/ql/src/test/queries/clientpositive/perf/query89.q index 31592295c0..9ddc125ecb 100644 --- a/ql/src/test/queries/clientpositive/perf/query89.q +++ b/ql/src/test/queries/clientpositive/perf/query89.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query89.tpl and seed 1719819282 explain select * diff --git a/ql/src/test/queries/clientpositive/perf/query9.q b/ql/src/test/queries/clientpositive/perf/query9.q index 421f5e1f43..190b12f5f2 100644 --- a/ql/src/test/queries/clientpositive/perf/query9.q +++ b/ql/src/test/queries/clientpositive/perf/query9.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query9.tpl and seed 1490436826 explain select case when (select count(*) diff --git a/ql/src/test/queries/clientpositive/perf/query90.q b/ql/src/test/queries/clientpositive/perf/query90.q index d17cbc4c21..8fa8c3f652 100644 --- a/ql/src/test/queries/clientpositive/perf/query90.q +++ b/ql/src/test/queries/clientpositive/perf/query90.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query90.tpl and seed 2031708268 explain select cast(amc as decimal(15,4))/cast(pmc as decimal(15,4)) am_pm_ratio diff --git a/ql/src/test/queries/clientpositive/perf/query91.q b/ql/src/test/queries/clientpositive/perf/query91.q index 79ca713dd0..bd54aeee83 100644 --- a/ql/src/test/queries/clientpositive/perf/query91.q +++ b/ql/src/test/queries/clientpositive/perf/query91.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query91.tpl and seed 1930872976 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query92.q b/ql/src/test/queries/clientpositive/perf/query92.q index f26fa5e46f..4fc25e80b8 100644 --- a/ql/src/test/queries/clientpositive/perf/query92.q +++ b/ql/src/test/queries/clientpositive/perf/query92.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query92.tpl and seed 2031708268 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query93.q b/ql/src/test/queries/clientpositive/perf/query93.q index 7f4a093df7..d5e9168426 100644 --- a/ql/src/test/queries/clientpositive/perf/query93.q +++ b/ql/src/test/queries/clientpositive/perf/query93.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query93.tpl and seed 1200409435 explain select ss_customer_sk diff --git a/ql/src/test/queries/clientpositive/perf/query94.q b/ql/src/test/queries/clientpositive/perf/query94.q index 18253fa7d6..0557982e29 100644 --- a/ql/src/test/queries/clientpositive/perf/query94.q +++ b/ql/src/test/queries/clientpositive/perf/query94.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query94.tpl and seed 2031708268 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query95.q b/ql/src/test/queries/clientpositive/perf/query95.q index e9024a8c0b..9c79975e52 100644 --- a/ql/src/test/queries/clientpositive/perf/query95.q +++ b/ql/src/test/queries/clientpositive/perf/query95.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query95.tpl and seed 2031708268 explain with ws_wh as diff --git a/ql/src/test/queries/clientpositive/perf/query96.q b/ql/src/test/queries/clientpositive/perf/query96.q index a306d6cdfd..3dd14b77cf 100644 --- a/ql/src/test/queries/clientpositive/perf/query96.q +++ b/ql/src/test/queries/clientpositive/perf/query96.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query96.tpl and seed 1819994127 explain select count(*) diff --git a/ql/src/test/queries/clientpositive/perf/query97.q b/ql/src/test/queries/clientpositive/perf/query97.q index 7203e5243c..d782ebaf53 100644 --- a/ql/src/test/queries/clientpositive/perf/query97.q +++ b/ql/src/test/queries/clientpositive/perf/query97.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query97.tpl and seed 1819994127 explain with ssci as ( diff --git a/ql/src/test/queries/clientpositive/perf/query98.q b/ql/src/test/queries/clientpositive/perf/query98.q index 6168f2af86..dea8478c93 100644 --- a/ql/src/test/queries/clientpositive/perf/query98.q +++ b/ql/src/test/queries/clientpositive/perf/query98.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query98.tpl and seed 345591136 explain select i_item_desc diff --git a/ql/src/test/queries/clientpositive/perf/query99.q b/ql/src/test/queries/clientpositive/perf/query99.q index 83be1d0e71..191967b399 100644 --- a/ql/src/test/queries/clientpositive/perf/query99.q +++ b/ql/src/test/queries/clientpositive/perf/query99.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query99.tpl and seed 1819994127 explain select diff --git a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl1.q.out b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl1.q.out index 869f1edc02..6568429869 100644 --- a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl1.q.out +++ b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl1.q.out @@ -16,4 +16,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@table2 PREHOOK: query: alter table table3 add constraint fk1 foreign key (c) references table1(a) disable novalidate PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Child table not found: table3) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.InvalidTableException: Table not found table3 diff --git a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_tbl.q.out b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_tbl.q.out index 56d87d5c48..728691906a 100644 --- a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_tbl.q.out +++ b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_tbl.q.out @@ -16,4 +16,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@table2 PREHOOK: query: alter table table3 add constraint pk3 primary key (a) disable novalidate rely PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Parent table not found: table3) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.InvalidTableException: Table not found table3 diff --git a/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out b/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out index 7f42c4cb99..40493905db 100644 --- a/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out +++ b/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out @@ -8,4 +8,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@table2 PREHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk1 PREHOOK: type: ALTERTABLE_DROPCONSTRAINT -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:The constraint: pk1 does not exist for the associated table: default.table1) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.InvalidTableException: Table not found table1 diff --git a/ql/src/test/results/clientpositive/acid_nullscan.q.out b/ql/src/test/results/clientpositive/acid_nullscan.q.out index 0db7d34a3d..b50b15d5ab 100644 --- a/ql/src/test/results/clientpositive/acid_nullscan.q.out +++ b/ql/src/test/results/clientpositive/acid_nullscan.q.out @@ -93,7 +93,7 @@ STAGE PLANS: serialization.ddl struct acid_vectorized_n1 { i32 a, string b} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - totalSize 2596 + totalSize 2592 transactional true transactional_properties default #### A masked pattern was here #### @@ -118,7 +118,7 @@ STAGE PLANS: serialization.ddl struct acid_vectorized_n1 { i32 a, string b} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2596 + totalSize 2592 transactional true transactional_properties default #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/acid_stats2.q.out b/ql/src/test/results/clientpositive/acid_stats2.q.out index dbc35fd1b9..aceac2a63f 100644 --- a/ql/src/test/results/clientpositive/acid_stats2.q.out +++ b/ql/src/test/results/clientpositive/acid_stats2.q.out @@ -311,7 +311,7 @@ Table Parameters: numPartitions 2 numRows 3 rawDataSize 0 - totalSize 1544 + totalSize 1542 transactional true transactional_properties default #### A masked pattern was here #### @@ -387,7 +387,7 @@ Partition Parameters: numFiles 1 numRows 2 rawDataSize 0 - totalSize 797 + totalSize 795 #### A masked pattern was here #### # Storage Information @@ -464,7 +464,7 @@ Table Parameters: numPartitions 2 numRows 2 rawDataSize 0 - totalSize 2241 + totalSize 2238 transactional true transactional_properties default #### A masked pattern was here #### @@ -503,7 +503,7 @@ Partition Parameters: numFiles 2 numRows 0 rawDataSize 0 - totalSize 1444 + totalSize 1443 #### A masked pattern was here #### # Storage Information @@ -540,7 +540,7 @@ Partition Parameters: numFiles 1 numRows 2 rawDataSize 0 - totalSize 797 + totalSize 795 #### A masked pattern was here #### # Storage Information @@ -644,7 +644,7 @@ Table Parameters: numPartitions 2 numRows 1 rawDataSize 0 - totalSize 2937 + totalSize 2927 transactional true transactional_properties default #### A masked pattern was here #### @@ -683,7 +683,7 @@ Partition Parameters: numFiles 2 numRows 0 rawDataSize 0 - totalSize 1444 + totalSize 1443 #### A masked pattern was here #### # Storage Information @@ -720,7 +720,7 @@ Partition Parameters: numFiles 2 numRows 1 rawDataSize 0 - totalSize 1493 + totalSize 1484 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/acid_table_stats.q.out b/ql/src/test/results/clientpositive/acid_table_stats.q.out index 488096d635..f9963e07c8 100644 --- a/ql/src/test/results/clientpositive/acid_table_stats.q.out +++ b/ql/src/test/results/clientpositive/acid_table_stats.q.out @@ -97,7 +97,7 @@ Partition Parameters: numFiles 2 numRows 1000 rawDataSize 0 - totalSize 4380 + totalSize 4372 #### A masked pattern was here #### # Storage Information @@ -184,7 +184,7 @@ Partition Parameters: numFiles 2 numRows 1000 rawDataSize 0 - totalSize 4380 + totalSize 4372 #### A masked pattern was here #### # Storage Information @@ -235,7 +235,7 @@ Partition Parameters: numFiles 2 numRows 1000 rawDataSize 0 - totalSize 4380 + totalSize 4372 #### A masked pattern was here #### # Storage Information @@ -331,7 +331,7 @@ Partition Parameters: numFiles 4 numRows 2000 rawDataSize 0 - totalSize 8761 + totalSize 8754 #### A masked pattern was here #### # Storage Information @@ -380,7 +380,7 @@ Partition Parameters: numFiles 4 numRows 2000 rawDataSize 0 - totalSize 8761 + totalSize 8754 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out index 0ca11dfa6e..a6fadff0a8 100644 --- a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out +++ b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out @@ -211,7 +211,7 @@ Table Parameters: numFiles 2 numRows 10 rawDataSize 0 - totalSize 1899 + totalSize 1898 transactional true transactional_properties default #### A masked pattern was here #### @@ -256,7 +256,7 @@ Table Parameters: numFiles 4 numRows 8 rawDataSize 0 - totalSize 3285 + totalSize 3287 transactional true transactional_properties default #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/compute_stats_date.q.out b/ql/src/test/results/clientpositive/compute_stats_date.q.out index d5eaf2099a..e638e5cdfe 100644 --- a/ql/src/test/results/clientpositive/compute_stats_date.q.out +++ b/ql/src/test/results/clientpositive/compute_stats_date.q.out @@ -132,11 +132,11 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"fl_date\":\"true\"}} -PREHOOK: query: alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0') +PREHOOK: query: alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0', 'numNulls'='0') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS PREHOOK: Input: default@tab_date PREHOOK: Output: default@tab_date -POSTHOOK: query: alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0') +POSTHOOK: query: alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0', 'numNulls'='0') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS POSTHOOK: Input: default@tab_date POSTHOOK: Output: default@tab_date diff --git a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out index 892d291de8..e50634ccb2 100644 --- a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out +++ b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out @@ -112,7 +112,7 @@ STAGE PLANS: serialization.ddl struct acidtbldefault { i32 a} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 34777 + totalSize 34734 transactional true transactional_properties default #### A masked pattern was here #### @@ -138,7 +138,7 @@ STAGE PLANS: serialization.ddl struct acidtbldefault { i32 a} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 34777 + totalSize 34734 transactional true transactional_properties default #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out b/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out index 8270c724b2..39acaaaebc 100644 --- a/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out +++ b/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out @@ -285,7 +285,7 @@ Table Parameters: numPartitions 4 numRows 2003 rawDataSize 0 - totalSize 18013 + totalSize 17981 transactional true transactional_properties default #### A masked pattern was here #### @@ -397,7 +397,7 @@ Table Parameters: numPartitions 4 numRows 2003 rawDataSize 0 - totalSize 18013 + totalSize 17981 transactional true transactional_properties default #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/llap/default_constraint.q.out b/ql/src/test/results/clientpositive/llap/default_constraint.q.out index a8db5aefc3..16c399d027 100644 --- a/ql/src/test/results/clientpositive/llap/default_constraint.q.out +++ b/ql/src/test/results/clientpositive/llap/default_constraint.q.out @@ -1986,7 +1986,7 @@ Table Parameters: bucketing_version 2 #### A masked pattern was here #### numFiles 3 - totalSize 3281 + totalSize 3282 transactional true transactional_properties default #### A masked pattern was here #### @@ -2065,7 +2065,7 @@ Table Parameters: bucketing_version 2 #### A masked pattern was here #### numFiles 3 - totalSize 3281 + totalSize 3282 transactional true transactional_properties default #### A masked pattern was here #### @@ -2145,7 +2145,7 @@ Table Parameters: bucketing_version 2 #### A masked pattern was here #### numFiles 3 - totalSize 3281 + totalSize 3282 transactional true transactional_properties default #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out index 3f18e520dd..2ae6312077 100644 --- a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out +++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out @@ -1416,7 +1416,7 @@ STAGE PLANS: TableScan alias: acid_2l_part_sdpo filterExpr: (value = 'bar') (type: boolean) - Statistics: Num rows: 4200 Data size: 1247197 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 4200 Data size: 1247037 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (value = 'bar') (type: boolean) Statistics: Num rows: 5 Data size: 1375 Basic stats: COMPLETE Column stats: PARTIAL diff --git a/ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out b/ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out index aef1053c63..612006a0b7 100644 --- a/ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out +++ b/ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out @@ -173,7 +173,7 @@ Table Parameters: numFiles 1 numRows 12288 rawDataSize 0 - totalSize 309558 + totalSize 309553 transactional true transactional_properties default #### A masked pattern was here #### @@ -442,7 +442,7 @@ Table Parameters: numFiles 2 numRows 4 rawDataSize 0 - totalSize 3304 + totalSize 3303 transactional true transactional_properties default #### A masked pattern was here #### @@ -536,7 +536,7 @@ Table Parameters: numFiles 3 numRows 12292 rawDataSize 0 - totalSize 312862 + totalSize 312854 transactional true transactional_properties default #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out index b68d98b029..d603bf5b70 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out @@ -525,10 +525,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_2 - filterExpr: ((c > 10) and (ROW__ID.writeid > 1L) and a is not null) (type: boolean) + filterExpr: ((c > 10) and (ROW__ID.writeid > 2L) and a is not null) (type: boolean) Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((c > 10) and (ROW__ID.writeid > 1L) and a is not null) (type: boolean) + predicate: ((c > 10) and (ROW__ID.writeid > 2L) and a is not null) (type: boolean) Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)) diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out index 77bc231b11..a185f79c9f 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out @@ -754,10 +754,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_2_n2 - filterExpr: ((c > 10) and (ROW__ID.writeid > 1L) and a is not null) (type: boolean) + filterExpr: ((c > 10) and (ROW__ID.writeid > 2L) and a is not null) (type: boolean) Statistics: Num rows: 3 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((c > 10) and (ROW__ID.writeid > 1L) and a is not null) (type: boolean) + predicate: ((c > 10) and (ROW__ID.writeid > 2L) and a is not null) (type: boolean) Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)), d (type: int) @@ -1731,10 +1731,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_2_n2 - filterExpr: ((c > 10) and (ROW__ID.writeid > 4L) and a is not null) (type: boolean) + filterExpr: ((c > 10) and (ROW__ID.writeid > 5L) and a is not null) (type: boolean) Statistics: Num rows: 3 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((c > 10) and (ROW__ID.writeid > 4L) and a is not null) (type: boolean) + predicate: ((c > 10) and (ROW__ID.writeid > 5L) and a is not null) (type: boolean) Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)), d (type: int) diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out index 1e59666f29..b0960402da 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out @@ -250,10 +250,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_2_n3 - filterExpr: ((c > 10) and (ROW__ID.writeid > 1L) and a is not null) (type: boolean) + filterExpr: ((c > 10) and (ROW__ID.writeid > 2L) and a is not null) (type: boolean) Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((c > 10) and (ROW__ID.writeid > 1L) and a is not null) (type: boolean) + predicate: ((c > 10) and (ROW__ID.writeid > 2L) and a is not null) (type: boolean) Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)) @@ -982,10 +982,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_2_n3 - filterExpr: ((c > 10) and (ROW__ID.writeid > 4L) and a is not null) (type: boolean) + filterExpr: ((c > 10) and (ROW__ID.writeid > 5L) and a is not null) (type: boolean) Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((c > 10) and (ROW__ID.writeid > 4L) and a is not null) (type: boolean) + predicate: ((c > 10) and (ROW__ID.writeid > 5L) and a is not null) (type: boolean) Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)) diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out index e918e2e516..04a8be8f5b 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out @@ -525,10 +525,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_2_n0 - filterExpr: ((c > 10) and (ROW__ID.writeid > 1L) and a is not null) (type: boolean) + filterExpr: ((c > 10) and (ROW__ID.writeid > 2L) and a is not null) (type: boolean) Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((c > 10) and (ROW__ID.writeid > 1L) and a is not null) (type: boolean) + predicate: ((c > 10) and (ROW__ID.writeid > 2L) and a is not null) (type: boolean) Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)) diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_time_window.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_time_window.q.out index 7da22c0616..2b00ea0854 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_time_window.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_time_window.q.out @@ -643,10 +643,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_2_n1 - filterExpr: ((c > 10) and (ROW__ID.writeid > 1L) and a is not null) (type: boolean) + filterExpr: ((c > 10) and (ROW__ID.writeid > 2L) and a is not null) (type: boolean) Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((c > 10) and (ROW__ID.writeid > 1L) and a is not null) (type: boolean) + predicate: ((c > 10) and (ROW__ID.writeid > 2L) and a is not null) (type: boolean) Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)) diff --git a/ql/src/test/results/clientpositive/llap/sqlmerge_stats.q.out b/ql/src/test/results/clientpositive/llap/sqlmerge_stats.q.out index 989b34f3f6..7fb200c997 100644 --- a/ql/src/test/results/clientpositive/llap/sqlmerge_stats.q.out +++ b/ql/src/test/results/clientpositive/llap/sqlmerge_stats.q.out @@ -93,7 +93,7 @@ Table Parameters: numFiles 1 numRows 1 rawDataSize 0 - totalSize 657 + totalSize 667 transactional true transactional_properties default #### A masked pattern was here #### @@ -420,7 +420,7 @@ Table Parameters: numFiles 4 numRows 2 rawDataSize 0 - totalSize 2690 + totalSize 2709 transactional true transactional_properties default #### A masked pattern was here #### @@ -486,7 +486,7 @@ Table Parameters: numFiles 6 numRows 0 rawDataSize 0 - totalSize 4052 + totalSize 4084 transactional true transactional_properties default #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/row__id.q.out b/ql/src/test/results/clientpositive/row__id.q.out index 6df961ab1e..d13e238450 100644 --- a/ql/src/test/results/clientpositive/row__id.q.out +++ b/ql/src/test/results/clientpositive/row__id.q.out @@ -72,24 +72,24 @@ STAGE PLANS: Map Operator Tree: TableScan alias: hello_acid - Statistics: Num rows: 3 Data size: 19884 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 20164 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ROW__ID.writeid (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 3 Data size: 19884 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 20164 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: bigint) sort order: + - Statistics: Num rows: 3 Data size: 19884 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 20164 Basic stats: COMPLETE Column stats: NONE Execution mode: vectorized Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 3 Data size: 19884 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 20164 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 3 Data size: 19884 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 20164 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -115,9 +115,9 @@ POSTHOOK: Input: default@hello_acid@load_date=2016-03-01 POSTHOOK: Input: default@hello_acid@load_date=2016-03-02 POSTHOOK: Input: default@hello_acid@load_date=2016-03-03 #### A masked pattern was here #### -1 2 3 +4 PREHOOK: query: explain select tid from (select row__id.writeid as tid from hello_acid) sub where tid = 3 PREHOOK: type: QUERY @@ -145,17 +145,17 @@ STAGE PLANS: TableScan alias: hello_acid filterExpr: (ROW__ID.writeid = 3L) (type: boolean) - Statistics: Num rows: 3 Data size: 19884 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 20164 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (ROW__ID.writeid = 3L) (type: boolean) - Statistics: Num rows: 1 Data size: 6628 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 6721 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ROW__ID.writeid (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 6628 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 6721 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 6628 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 6721 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/stats_nonpart.q.out b/ql/src/test/results/clientpositive/stats_nonpart.q.out index ae9f9e5f54..67e30a91ac 100644 --- a/ql/src/test/results/clientpositive/stats_nonpart.q.out +++ b/ql/src/test/results/clientpositive/stats_nonpart.q.out @@ -222,7 +222,7 @@ Table Parameters: numFiles 1 numRows 2 rawDataSize 0 - totalSize 719 + totalSize 717 transactional true transactional_properties default #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/stats_part.q.out b/ql/src/test/results/clientpositive/stats_part.q.out index efc1766755..3c5d12dbe3 100644 --- a/ql/src/test/results/clientpositive/stats_part.q.out +++ b/ql/src/test/results/clientpositive/stats_part.q.out @@ -219,7 +219,7 @@ Table Parameters: numPartitions 3 numRows 6 rawDataSize 0 - totalSize 2244 + totalSize 2245 transactional true transactional_properties default #### A masked pattern was here #### @@ -282,7 +282,7 @@ Table Parameters: numPartitions 3 numRows 8 rawDataSize 0 - totalSize 2998 + totalSize 2999 transactional true transactional_properties default #### A masked pattern was here #### @@ -441,7 +441,7 @@ Table Parameters: numPartitions 3 numRows 8 rawDataSize 0 - totalSize 2998 + totalSize 2999 transactional true transactional_properties default #### A masked pattern was here #### @@ -528,7 +528,7 @@ Table Parameters: numPartitions 3 numRows 8 rawDataSize 0 - totalSize 2998 + totalSize 2999 transactional true transactional_properties default #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/stats_part2.q.out b/ql/src/test/results/clientpositive/stats_part2.q.out index 1411b6eff7..38b44af3f7 100644 --- a/ql/src/test/results/clientpositive/stats_part2.q.out +++ b/ql/src/test/results/clientpositive/stats_part2.q.out @@ -356,7 +356,7 @@ Table Parameters: numPartitions 3 numRows 8 rawDataSize 0 - totalSize 3124 + totalSize 3125 transactional true transactional_properties default #### A masked pattern was here #### @@ -512,7 +512,7 @@ Partition Parameters: numFiles 1 numRows 2 rawDataSize 0 - totalSize 756 + totalSize 755 #### A masked pattern was here #### # Storage Information @@ -549,7 +549,7 @@ Partition Parameters: numFiles 1 numRows 2 rawDataSize 0 - totalSize 789 + totalSize 791 #### A masked pattern was here #### # Storage Information @@ -709,7 +709,7 @@ Partition Parameters: numFiles 1 numRows 2 rawDataSize 0 - totalSize 756 + totalSize 755 #### A masked pattern was here #### # Storage Information @@ -746,7 +746,7 @@ Partition Parameters: numFiles 3 numRows 2 rawDataSize 0 - totalSize 2235 + totalSize 2244 #### A masked pattern was here #### # Storage Information @@ -849,7 +849,7 @@ Partition Parameters: numFiles 1 numRows 2 rawDataSize 0 - totalSize 756 + totalSize 755 #### A masked pattern was here #### # Storage Information @@ -886,7 +886,7 @@ Partition Parameters: numFiles 3 numRows 2 rawDataSize 0 - totalSize 2235 + totalSize 2244 #### A masked pattern was here #### # Storage Information @@ -993,7 +993,7 @@ Partition Parameters: numFiles 2 numRows 1 rawDataSize 0 - totalSize 1453 + totalSize 1445 #### A masked pattern was here #### # Storage Information @@ -1030,7 +1030,7 @@ Partition Parameters: numFiles 4 numRows 1 rawDataSize 0 - totalSize 2929 + totalSize 2942 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/stats_sizebug.q.out b/ql/src/test/results/clientpositive/stats_sizebug.q.out index 780745deb8..dbadfc918b 100644 --- a/ql/src/test/results/clientpositive/stats_sizebug.q.out +++ b/ql/src/test/results/clientpositive/stats_sizebug.q.out @@ -158,7 +158,7 @@ Table Parameters: numFiles 1 numRows 2 rawDataSize 0 - totalSize 718 + totalSize 717 transactional true transactional_properties default #### A masked pattern was here #### @@ -205,7 +205,7 @@ Table Parameters: numFiles 1 numRows 2 rawDataSize 0 - totalSize 718 + totalSize 717 transactional true transactional_properties default #### A masked pattern was here #### diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java index 60a2b47b56..d21f1baf50 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java @@ -816,13 +816,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddDynamicPartition case 5: // PARTITIONNAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list772 = iprot.readListBegin(); - struct.partitionnames = new ArrayList(_list772.size); - String _elem773; - for (int _i774 = 0; _i774 < _list772.size; ++_i774) + org.apache.thrift.protocol.TList _list780 = iprot.readListBegin(); + struct.partitionnames = new ArrayList(_list780.size); + String _elem781; + for (int _i782 = 0; _i782 < _list780.size; ++_i782) { - _elem773 = iprot.readString(); - struct.partitionnames.add(_elem773); + _elem781 = iprot.readString(); + struct.partitionnames.add(_elem781); } iprot.readListEnd(); } @@ -872,9 +872,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddDynamicPartitio oprot.writeFieldBegin(PARTITIONNAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionnames.size())); - for (String _iter775 : struct.partitionnames) + for (String _iter783 : struct.partitionnames) { - oprot.writeString(_iter775); + oprot.writeString(_iter783); } oprot.writeListEnd(); } @@ -910,9 +910,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartition oprot.writeString(struct.tablename); { oprot.writeI32(struct.partitionnames.size()); - for (String _iter776 : struct.partitionnames) + for (String _iter784 : struct.partitionnames) { - oprot.writeString(_iter776); + oprot.writeString(_iter784); } } BitSet optionals = new BitSet(); @@ -937,13 +937,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartitions struct.tablename = iprot.readString(); struct.setTablenameIsSet(true); { - org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionnames = new ArrayList(_list777.size); - String _elem778; - for (int _i779 = 0; _i779 < _list777.size; ++_i779) + org.apache.thrift.protocol.TList _list785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionnames = new ArrayList(_list785.size); + String _elem786; + for (int _i787 = 0; _i787 < _list785.size; ++_i787) { - _elem778 = iprot.readString(); - struct.partitionnames.add(_elem778); + _elem786 = iprot.readString(); + struct.partitionnames.add(_elem786); } } struct.setPartitionnamesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java index 747dfad577..9e18831203 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java @@ -716,13 +716,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI case 3: // TXN_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list698 = iprot.readListBegin(); - struct.txnIds = new ArrayList(_list698.size); - long _elem699; - for (int _i700 = 0; _i700 < _list698.size; ++_i700) + org.apache.thrift.protocol.TList _list706 = iprot.readListBegin(); + struct.txnIds = new ArrayList(_list706.size); + long _elem707; + for (int _i708 = 0; _i708 < _list706.size; ++_i708) { - _elem699 = iprot.readI64(); - struct.txnIds.add(_elem699); + _elem707 = iprot.readI64(); + struct.txnIds.add(_elem707); } iprot.readListEnd(); } @@ -742,14 +742,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI case 5: // SRC_TXN_TO_WRITE_ID_LIST if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list701 = iprot.readListBegin(); - struct.srcTxnToWriteIdList = new ArrayList(_list701.size); - TxnToWriteId _elem702; - for (int _i703 = 0; _i703 < _list701.size; ++_i703) + org.apache.thrift.protocol.TList _list709 = iprot.readListBegin(); + struct.srcTxnToWriteIdList = new ArrayList(_list709.size); + TxnToWriteId _elem710; + for (int _i711 = 0; _i711 < _list709.size; ++_i711) { - _elem702 = new TxnToWriteId(); - _elem702.read(iprot); - struct.srcTxnToWriteIdList.add(_elem702); + _elem710 = new TxnToWriteId(); + _elem710.read(iprot); + struct.srcTxnToWriteIdList.add(_elem710); } iprot.readListEnd(); } @@ -786,9 +786,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite oprot.writeFieldBegin(TXN_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txnIds.size())); - for (long _iter704 : struct.txnIds) + for (long _iter712 : struct.txnIds) { - oprot.writeI64(_iter704); + oprot.writeI64(_iter712); } oprot.writeListEnd(); } @@ -807,9 +807,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite oprot.writeFieldBegin(SRC_TXN_TO_WRITE_ID_LIST_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.srcTxnToWriteIdList.size())); - for (TxnToWriteId _iter705 : struct.srcTxnToWriteIdList) + for (TxnToWriteId _iter713 : struct.srcTxnToWriteIdList) { - _iter705.write(oprot); + _iter713.write(oprot); } oprot.writeListEnd(); } @@ -849,9 +849,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI if (struct.isSetTxnIds()) { { oprot.writeI32(struct.txnIds.size()); - for (long _iter706 : struct.txnIds) + for (long _iter714 : struct.txnIds) { - oprot.writeI64(_iter706); + oprot.writeI64(_iter714); } } } @@ -861,9 +861,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI if (struct.isSetSrcTxnToWriteIdList()) { { oprot.writeI32(struct.srcTxnToWriteIdList.size()); - for (TxnToWriteId _iter707 : struct.srcTxnToWriteIdList) + for (TxnToWriteId _iter715 : struct.srcTxnToWriteIdList) { - _iter707.write(oprot); + _iter715.write(oprot); } } } @@ -879,13 +879,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteId BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list708 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.txnIds = new ArrayList(_list708.size); - long _elem709; - for (int _i710 = 0; _i710 < _list708.size; ++_i710) + org.apache.thrift.protocol.TList _list716 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.txnIds = new ArrayList(_list716.size); + long _elem717; + for (int _i718 = 0; _i718 < _list716.size; ++_i718) { - _elem709 = iprot.readI64(); - struct.txnIds.add(_elem709); + _elem717 = iprot.readI64(); + struct.txnIds.add(_elem717); } } struct.setTxnIdsIsSet(true); @@ -896,14 +896,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteId } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list711 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.srcTxnToWriteIdList = new ArrayList(_list711.size); - TxnToWriteId _elem712; - for (int _i713 = 0; _i713 < _list711.size; ++_i713) + org.apache.thrift.protocol.TList _list719 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.srcTxnToWriteIdList = new ArrayList(_list719.size); + TxnToWriteId _elem720; + for (int _i721 = 0; _i721 < _list719.size; ++_i721) { - _elem712 = new TxnToWriteId(); - _elem712.read(iprot); - struct.srcTxnToWriteIdList.add(_elem712); + _elem720 = new TxnToWriteId(); + _elem720.read(iprot); + struct.srcTxnToWriteIdList.add(_elem720); } } struct.setSrcTxnToWriteIdListIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java index e5349e82c5..877bcad3e7 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI case 1: // TXN_TO_WRITE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list714 = iprot.readListBegin(); - struct.txnToWriteIds = new ArrayList(_list714.size); - TxnToWriteId _elem715; - for (int _i716 = 0; _i716 < _list714.size; ++_i716) + org.apache.thrift.protocol.TList _list722 = iprot.readListBegin(); + struct.txnToWriteIds = new ArrayList(_list722.size); + TxnToWriteId _elem723; + for (int _i724 = 0; _i724 < _list722.size; ++_i724) { - _elem715 = new TxnToWriteId(); - _elem715.read(iprot); - struct.txnToWriteIds.add(_elem715); + _elem723 = new TxnToWriteId(); + _elem723.read(iprot); + struct.txnToWriteIds.add(_elem723); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite oprot.writeFieldBegin(TXN_TO_WRITE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.txnToWriteIds.size())); - for (TxnToWriteId _iter717 : struct.txnToWriteIds) + for (TxnToWriteId _iter725 : struct.txnToWriteIds) { - _iter717.write(oprot); + _iter725.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.txnToWriteIds.size()); - for (TxnToWriteId _iter718 : struct.txnToWriteIds) + for (TxnToWriteId _iter726 : struct.txnToWriteIds) { - _iter718.write(oprot); + _iter726.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list719 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.txnToWriteIds = new ArrayList(_list719.size); - TxnToWriteId _elem720; - for (int _i721 = 0; _i721 < _list719.size; ++_i721) + org.apache.thrift.protocol.TList _list727 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.txnToWriteIds = new ArrayList(_list727.size); + TxnToWriteId _elem728; + for (int _i729 = 0; _i729 < _list727.size; ++_i729) { - _elem720 = new TxnToWriteId(); - _elem720.read(iprot); - struct.txnToWriteIds.add(_elem720); + _elem728 = new TxnToWriteId(); + _elem728.read(iprot); + struct.txnToWriteIds.add(_elem728); } } struct.setTxnToWriteIdsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java index 6453c93d79..d13d87c8bc 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java @@ -877,14 +877,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AlterPartitionsRequ case 4: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1104 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list1104.size); - Partition _elem1105; - for (int _i1106 = 0; _i1106 < _list1104.size; ++_i1106) + org.apache.thrift.protocol.TList _list1112 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list1112.size); + Partition _elem1113; + for (int _i1114 = 0; _i1114 < _list1112.size; ++_i1114) { - _elem1105 = new Partition(); - _elem1105.read(iprot); - struct.partitions.add(_elem1105); + _elem1113 = new Partition(); + _elem1113.read(iprot); + struct.partitions.add(_elem1113); } iprot.readListEnd(); } @@ -952,9 +952,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AlterPartitionsReq oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter1107 : struct.partitions) + for (Partition _iter1115 : struct.partitions) { - _iter1107.write(oprot); + _iter1115.write(oprot); } oprot.writeListEnd(); } @@ -1000,9 +1000,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsRequ oprot.writeString(struct.tableName); { oprot.writeI32(struct.partitions.size()); - for (Partition _iter1108 : struct.partitions) + for (Partition _iter1116 : struct.partitions) { - _iter1108.write(oprot); + _iter1116.write(oprot); } } BitSet optionals = new BitSet(); @@ -1041,14 +1041,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsReque struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); { - org.apache.thrift.protocol.TList _list1109 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list1109.size); - Partition _elem1110; - for (int _i1111 = 0; _i1111 < _list1109.size; ++_i1111) + org.apache.thrift.protocol.TList _list1117 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list1117.size); + Partition _elem1118; + for (int _i1119 = 0; _i1119 < _list1117.size; ++_i1119) { - _elem1110 = new Partition(); - _elem1110.read(iprot); - struct.partitions.add(_elem1110); + _elem1118 = new Partition(); + _elem1118.read(iprot); + struct.partitions.add(_elem1118); } } struct.setPartitionsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java index 5feff5ff8d..d5f51ade15 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClearFileMetadataRe case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list888 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list888.size); - long _elem889; - for (int _i890 = 0; _i890 < _list888.size; ++_i890) + org.apache.thrift.protocol.TList _list896 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list896.size); + long _elem897; + for (int _i898 = 0; _i898 < _list896.size; ++_i898) { - _elem889 = iprot.readI64(); - struct.fileIds.add(_elem889); + _elem897 = iprot.readI64(); + struct.fileIds.add(_elem897); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClearFileMetadataR oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter891 : struct.fileIds) + for (long _iter899 : struct.fileIds) { - oprot.writeI64(_iter891); + oprot.writeI64(_iter899); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter892 : struct.fileIds) + for (long _iter900 : struct.fileIds) { - oprot.writeI64(_iter892); + oprot.writeI64(_iter900); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe public void read(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list893 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list893.size); - long _elem894; - for (int _i895 = 0; _i895 < _list893.size; ++_i895) + org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list901.size); + long _elem902; + for (int _i903 = 0; _i903 < _list901.size; ++_i903) { - _elem894 = iprot.readI64(); - struct.fileIds.add(_elem894); + _elem902 = iprot.readI64(); + struct.fileIds.add(_elem902); } } struct.setFileIdsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java index a4fb786c3c..86855b3c9a 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java @@ -354,13 +354,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClientCapabilities case 1: // VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list904 = iprot.readListBegin(); - struct.values = new ArrayList(_list904.size); - ClientCapability _elem905; - for (int _i906 = 0; _i906 < _list904.size; ++_i906) + org.apache.thrift.protocol.TList _list912 = iprot.readListBegin(); + struct.values = new ArrayList(_list912.size); + ClientCapability _elem913; + for (int _i914 = 0; _i914 < _list912.size; ++_i914) { - _elem905 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); - struct.values.add(_elem905); + _elem913 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); + struct.values.add(_elem913); } iprot.readListEnd(); } @@ -386,9 +386,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClientCapabilities oprot.writeFieldBegin(VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.values.size())); - for (ClientCapability _iter907 : struct.values) + for (ClientCapability _iter915 : struct.values) { - oprot.writeI32(_iter907.getValue()); + oprot.writeI32(_iter915.getValue()); } oprot.writeListEnd(); } @@ -413,9 +413,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.values.size()); - for (ClientCapability _iter908 : struct.values) + for (ClientCapability _iter916 : struct.values) { - oprot.writeI32(_iter908.getValue()); + oprot.writeI32(_iter916.getValue()); } } } @@ -424,13 +424,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities public void read(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list909 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.values = new ArrayList(_list909.size); - ClientCapability _elem910; - for (int _i911 = 0; _i911 < _list909.size; ++_i911) + org.apache.thrift.protocol.TList _list917 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); + struct.values = new ArrayList(_list917.size); + ClientCapability _elem918; + for (int _i919 = 0; _i919 < _list917.size; ++_i919) { - _elem910 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); - struct.values.add(_elem910); + _elem918 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); + struct.values.add(_elem918); } } struct.setValuesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java index 133004e5e5..0e60d279f3 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java @@ -814,15 +814,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CompactionRequest s case 6: // PROPERTIES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map754 = iprot.readMapBegin(); - struct.properties = new HashMap(2*_map754.size); - String _key755; - String _val756; - for (int _i757 = 0; _i757 < _map754.size; ++_i757) + org.apache.thrift.protocol.TMap _map762 = iprot.readMapBegin(); + struct.properties = new HashMap(2*_map762.size); + String _key763; + String _val764; + for (int _i765 = 0; _i765 < _map762.size; ++_i765) { - _key755 = iprot.readString(); - _val756 = iprot.readString(); - struct.properties.put(_key755, _val756); + _key763 = iprot.readString(); + _val764 = iprot.readString(); + struct.properties.put(_key763, _val764); } iprot.readMapEnd(); } @@ -878,10 +878,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CompactionRequest oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size())); - for (Map.Entry _iter758 : struct.properties.entrySet()) + for (Map.Entry _iter766 : struct.properties.entrySet()) { - oprot.writeString(_iter758.getKey()); - oprot.writeString(_iter758.getValue()); + oprot.writeString(_iter766.getKey()); + oprot.writeString(_iter766.getValue()); } oprot.writeMapEnd(); } @@ -928,10 +928,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionRequest s if (struct.isSetProperties()) { { oprot.writeI32(struct.properties.size()); - for (Map.Entry _iter759 : struct.properties.entrySet()) + for (Map.Entry _iter767 : struct.properties.entrySet()) { - oprot.writeString(_iter759.getKey()); - oprot.writeString(_iter759.getValue()); + oprot.writeString(_iter767.getKey()); + oprot.writeString(_iter767.getValue()); } } } @@ -957,15 +957,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionRequest st } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map760 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.properties = new HashMap(2*_map760.size); - String _key761; - String _val762; - for (int _i763 = 0; _i763 < _map760.size; ++_i763) + org.apache.thrift.protocol.TMap _map768 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.properties = new HashMap(2*_map768.size); + String _key769; + String _val770; + for (int _i771 = 0; _i771 < _map768.size; ++_i771) { - _key761 = iprot.readString(); - _val762 = iprot.readString(); - struct.properties.put(_key761, _val762); + _key769 = iprot.readString(); + _val770 = iprot.readString(); + struct.properties.put(_key769, _val770); } } struct.setPropertiesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreateTableRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreateTableRequest.java index 5d42a80373..8fa1086f18 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreateTableRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreateTableRequest.java @@ -1225,14 +1225,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreateTableRequest case 3: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1048 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list1048.size); - SQLPrimaryKey _elem1049; - for (int _i1050 = 0; _i1050 < _list1048.size; ++_i1050) + org.apache.thrift.protocol.TList _list1056 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list1056.size); + SQLPrimaryKey _elem1057; + for (int _i1058 = 0; _i1058 < _list1056.size; ++_i1058) { - _elem1049 = new SQLPrimaryKey(); - _elem1049.read(iprot); - struct.primaryKeys.add(_elem1049); + _elem1057 = new SQLPrimaryKey(); + _elem1057.read(iprot); + struct.primaryKeys.add(_elem1057); } iprot.readListEnd(); } @@ -1244,14 +1244,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreateTableRequest case 4: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1051 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list1051.size); - SQLForeignKey _elem1052; - for (int _i1053 = 0; _i1053 < _list1051.size; ++_i1053) + org.apache.thrift.protocol.TList _list1059 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list1059.size); + SQLForeignKey _elem1060; + for (int _i1061 = 0; _i1061 < _list1059.size; ++_i1061) { - _elem1052 = new SQLForeignKey(); - _elem1052.read(iprot); - struct.foreignKeys.add(_elem1052); + _elem1060 = new SQLForeignKey(); + _elem1060.read(iprot); + struct.foreignKeys.add(_elem1060); } iprot.readListEnd(); } @@ -1263,14 +1263,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreateTableRequest case 5: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1054 = iprot.readListBegin(); - struct.uniqueConstraints = new ArrayList(_list1054.size); - SQLUniqueConstraint _elem1055; - for (int _i1056 = 0; _i1056 < _list1054.size; ++_i1056) + org.apache.thrift.protocol.TList _list1062 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList(_list1062.size); + SQLUniqueConstraint _elem1063; + for (int _i1064 = 0; _i1064 < _list1062.size; ++_i1064) { - _elem1055 = new SQLUniqueConstraint(); - _elem1055.read(iprot); - struct.uniqueConstraints.add(_elem1055); + _elem1063 = new SQLUniqueConstraint(); + _elem1063.read(iprot); + struct.uniqueConstraints.add(_elem1063); } iprot.readListEnd(); } @@ -1282,14 +1282,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreateTableRequest case 6: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1057 = iprot.readListBegin(); - struct.notNullConstraints = new ArrayList(_list1057.size); - SQLNotNullConstraint _elem1058; - for (int _i1059 = 0; _i1059 < _list1057.size; ++_i1059) + org.apache.thrift.protocol.TList _list1065 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList(_list1065.size); + SQLNotNullConstraint _elem1066; + for (int _i1067 = 0; _i1067 < _list1065.size; ++_i1067) { - _elem1058 = new SQLNotNullConstraint(); - _elem1058.read(iprot); - struct.notNullConstraints.add(_elem1058); + _elem1066 = new SQLNotNullConstraint(); + _elem1066.read(iprot); + struct.notNullConstraints.add(_elem1066); } iprot.readListEnd(); } @@ -1301,14 +1301,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreateTableRequest case 7: // DEFAULT_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1060 = iprot.readListBegin(); - struct.defaultConstraints = new ArrayList(_list1060.size); - SQLDefaultConstraint _elem1061; - for (int _i1062 = 0; _i1062 < _list1060.size; ++_i1062) + org.apache.thrift.protocol.TList _list1068 = iprot.readListBegin(); + struct.defaultConstraints = new ArrayList(_list1068.size); + SQLDefaultConstraint _elem1069; + for (int _i1070 = 0; _i1070 < _list1068.size; ++_i1070) { - _elem1061 = new SQLDefaultConstraint(); - _elem1061.read(iprot); - struct.defaultConstraints.add(_elem1061); + _elem1069 = new SQLDefaultConstraint(); + _elem1069.read(iprot); + struct.defaultConstraints.add(_elem1069); } iprot.readListEnd(); } @@ -1320,14 +1320,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreateTableRequest case 8: // CHECK_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1063 = iprot.readListBegin(); - struct.checkConstraints = new ArrayList(_list1063.size); - SQLCheckConstraint _elem1064; - for (int _i1065 = 0; _i1065 < _list1063.size; ++_i1065) + org.apache.thrift.protocol.TList _list1071 = iprot.readListBegin(); + struct.checkConstraints = new ArrayList(_list1071.size); + SQLCheckConstraint _elem1072; + for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073) { - _elem1064 = new SQLCheckConstraint(); - _elem1064.read(iprot); - struct.checkConstraints.add(_elem1064); + _elem1072 = new SQLCheckConstraint(); + _elem1072.read(iprot); + struct.checkConstraints.add(_elem1072); } iprot.readListEnd(); } @@ -1339,13 +1339,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreateTableRequest case 9: // PROCESSOR_CAPABILITIES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1066 = iprot.readListBegin(); - struct.processorCapabilities = new ArrayList(_list1066.size); - String _elem1067; - for (int _i1068 = 0; _i1068 < _list1066.size; ++_i1068) + org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin(); + struct.processorCapabilities = new ArrayList(_list1074.size); + String _elem1075; + for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076) { - _elem1067 = iprot.readString(); - struct.processorCapabilities.add(_elem1067); + _elem1075 = iprot.readString(); + struct.processorCapabilities.add(_elem1075); } iprot.readListEnd(); } @@ -1392,9 +1392,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CreateTableRequest oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter1069 : struct.primaryKeys) + for (SQLPrimaryKey _iter1077 : struct.primaryKeys) { - _iter1069.write(oprot); + _iter1077.write(oprot); } oprot.writeListEnd(); } @@ -1406,9 +1406,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CreateTableRequest oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter1070 : struct.foreignKeys) + for (SQLForeignKey _iter1078 : struct.foreignKeys) { - _iter1070.write(oprot); + _iter1078.write(oprot); } oprot.writeListEnd(); } @@ -1420,9 +1420,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CreateTableRequest oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter1071 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1079 : struct.uniqueConstraints) { - _iter1071.write(oprot); + _iter1079.write(oprot); } oprot.writeListEnd(); } @@ -1434,9 +1434,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CreateTableRequest oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter1072 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1080 : struct.notNullConstraints) { - _iter1072.write(oprot); + _iter1080.write(oprot); } oprot.writeListEnd(); } @@ -1448,9 +1448,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CreateTableRequest oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); - for (SQLDefaultConstraint _iter1073 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1081 : struct.defaultConstraints) { - _iter1073.write(oprot); + _iter1081.write(oprot); } oprot.writeListEnd(); } @@ -1462,9 +1462,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CreateTableRequest oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size())); - for (SQLCheckConstraint _iter1074 : struct.checkConstraints) + for (SQLCheckConstraint _iter1082 : struct.checkConstraints) { - _iter1074.write(oprot); + _iter1082.write(oprot); } oprot.writeListEnd(); } @@ -1476,9 +1476,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CreateTableRequest oprot.writeFieldBegin(PROCESSOR_CAPABILITIES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.processorCapabilities.size())); - for (String _iter1075 : struct.processorCapabilities) + for (String _iter1083 : struct.processorCapabilities) { - oprot.writeString(_iter1075); + oprot.writeString(_iter1083); } oprot.writeListEnd(); } @@ -1545,63 +1545,63 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CreateTableRequest if (struct.isSetPrimaryKeys()) { { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter1076 : struct.primaryKeys) + for (SQLPrimaryKey _iter1084 : struct.primaryKeys) { - _iter1076.write(oprot); + _iter1084.write(oprot); } } } if (struct.isSetForeignKeys()) { { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter1077 : struct.foreignKeys) + for (SQLForeignKey _iter1085 : struct.foreignKeys) { - _iter1077.write(oprot); + _iter1085.write(oprot); } } } if (struct.isSetUniqueConstraints()) { { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter1078 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1086 : struct.uniqueConstraints) { - _iter1078.write(oprot); + _iter1086.write(oprot); } } } if (struct.isSetNotNullConstraints()) { { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter1079 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1087 : struct.notNullConstraints) { - _iter1079.write(oprot); + _iter1087.write(oprot); } } } if (struct.isSetDefaultConstraints()) { { oprot.writeI32(struct.defaultConstraints.size()); - for (SQLDefaultConstraint _iter1080 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1088 : struct.defaultConstraints) { - _iter1080.write(oprot); + _iter1088.write(oprot); } } } if (struct.isSetCheckConstraints()) { { oprot.writeI32(struct.checkConstraints.size()); - for (SQLCheckConstraint _iter1081 : struct.checkConstraints) + for (SQLCheckConstraint _iter1089 : struct.checkConstraints) { - _iter1081.write(oprot); + _iter1089.write(oprot); } } } if (struct.isSetProcessorCapabilities()) { { oprot.writeI32(struct.processorCapabilities.size()); - for (String _iter1082 : struct.processorCapabilities) + for (String _iter1090 : struct.processorCapabilities) { - oprot.writeString(_iter1082); + oprot.writeString(_iter1090); } } } @@ -1624,97 +1624,97 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CreateTableRequest s } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1083 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list1083.size); - SQLPrimaryKey _elem1084; - for (int _i1085 = 0; _i1085 < _list1083.size; ++_i1085) + org.apache.thrift.protocol.TList _list1091 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list1091.size); + SQLPrimaryKey _elem1092; + for (int _i1093 = 0; _i1093 < _list1091.size; ++_i1093) { - _elem1084 = new SQLPrimaryKey(); - _elem1084.read(iprot); - struct.primaryKeys.add(_elem1084); + _elem1092 = new SQLPrimaryKey(); + _elem1092.read(iprot); + struct.primaryKeys.add(_elem1092); } } struct.setPrimaryKeysIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1086 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list1086.size); - SQLForeignKey _elem1087; - for (int _i1088 = 0; _i1088 < _list1086.size; ++_i1088) + org.apache.thrift.protocol.TList _list1094 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list1094.size); + SQLForeignKey _elem1095; + for (int _i1096 = 0; _i1096 < _list1094.size; ++_i1096) { - _elem1087 = new SQLForeignKey(); - _elem1087.read(iprot); - struct.foreignKeys.add(_elem1087); + _elem1095 = new SQLForeignKey(); + _elem1095.read(iprot); + struct.foreignKeys.add(_elem1095); } } struct.setForeignKeysIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list1089 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraints = new ArrayList(_list1089.size); - SQLUniqueConstraint _elem1090; - for (int _i1091 = 0; _i1091 < _list1089.size; ++_i1091) + org.apache.thrift.protocol.TList _list1097 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraints = new ArrayList(_list1097.size); + SQLUniqueConstraint _elem1098; + for (int _i1099 = 0; _i1099 < _list1097.size; ++_i1099) { - _elem1090 = new SQLUniqueConstraint(); - _elem1090.read(iprot); - struct.uniqueConstraints.add(_elem1090); + _elem1098 = new SQLUniqueConstraint(); + _elem1098.read(iprot); + struct.uniqueConstraints.add(_elem1098); } } struct.setUniqueConstraintsIsSet(true); } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1092 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraints = new ArrayList(_list1092.size); - SQLNotNullConstraint _elem1093; - for (int _i1094 = 0; _i1094 < _list1092.size; ++_i1094) + org.apache.thrift.protocol.TList _list1100 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraints = new ArrayList(_list1100.size); + SQLNotNullConstraint _elem1101; + for (int _i1102 = 0; _i1102 < _list1100.size; ++_i1102) { - _elem1093 = new SQLNotNullConstraint(); - _elem1093.read(iprot); - struct.notNullConstraints.add(_elem1093); + _elem1101 = new SQLNotNullConstraint(); + _elem1101.read(iprot); + struct.notNullConstraints.add(_elem1101); } } struct.setNotNullConstraintsIsSet(true); } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1095 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.defaultConstraints = new ArrayList(_list1095.size); - SQLDefaultConstraint _elem1096; - for (int _i1097 = 0; _i1097 < _list1095.size; ++_i1097) + org.apache.thrift.protocol.TList _list1103 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraints = new ArrayList(_list1103.size); + SQLDefaultConstraint _elem1104; + for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105) { - _elem1096 = new SQLDefaultConstraint(); - _elem1096.read(iprot); - struct.defaultConstraints.add(_elem1096); + _elem1104 = new SQLDefaultConstraint(); + _elem1104.read(iprot); + struct.defaultConstraints.add(_elem1104); } } struct.setDefaultConstraintsIsSet(true); } if (incoming.get(6)) { { - org.apache.thrift.protocol.TList _list1098 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.checkConstraints = new ArrayList(_list1098.size); - SQLCheckConstraint _elem1099; - for (int _i1100 = 0; _i1100 < _list1098.size; ++_i1100) + org.apache.thrift.protocol.TList _list1106 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.checkConstraints = new ArrayList(_list1106.size); + SQLCheckConstraint _elem1107; + for (int _i1108 = 0; _i1108 < _list1106.size; ++_i1108) { - _elem1099 = new SQLCheckConstraint(); - _elem1099.read(iprot); - struct.checkConstraints.add(_elem1099); + _elem1107 = new SQLCheckConstraint(); + _elem1107.read(iprot); + struct.checkConstraints.add(_elem1107); } } struct.setCheckConstraintsIsSet(true); } if (incoming.get(7)) { { - org.apache.thrift.protocol.TList _list1101 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.processorCapabilities = new ArrayList(_list1101.size); - String _elem1102; - for (int _i1103 = 0; _i1103 < _list1101.size; ++_i1103) + org.apache.thrift.protocol.TList _list1109 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.processorCapabilities = new ArrayList(_list1109.size); + String _elem1110; + for (int _i1111 = 0; _i1111 < _list1109.size; ++_i1111) { - _elem1102 = iprot.readString(); - struct.processorCapabilities.add(_elem1102); + _elem1110 = iprot.readString(); + struct.processorCapabilities.add(_elem1110); } } struct.setProcessorCapabilitiesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ExtendedTableInfo.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ExtendedTableInfo.java index c37b0d311a..ae7e549585 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ExtendedTableInfo.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ExtendedTableInfo.java @@ -627,13 +627,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ExtendedTableInfo s case 3: // REQUIRED_READ_CAPABILITIES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list952 = iprot.readListBegin(); - struct.requiredReadCapabilities = new ArrayList(_list952.size); - String _elem953; - for (int _i954 = 0; _i954 < _list952.size; ++_i954) + org.apache.thrift.protocol.TList _list960 = iprot.readListBegin(); + struct.requiredReadCapabilities = new ArrayList(_list960.size); + String _elem961; + for (int _i962 = 0; _i962 < _list960.size; ++_i962) { - _elem953 = iprot.readString(); - struct.requiredReadCapabilities.add(_elem953); + _elem961 = iprot.readString(); + struct.requiredReadCapabilities.add(_elem961); } iprot.readListEnd(); } @@ -645,13 +645,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ExtendedTableInfo s case 4: // REQUIRED_WRITE_CAPABILITIES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list955 = iprot.readListBegin(); - struct.requiredWriteCapabilities = new ArrayList(_list955.size); - String _elem956; - for (int _i957 = 0; _i957 < _list955.size; ++_i957) + org.apache.thrift.protocol.TList _list963 = iprot.readListBegin(); + struct.requiredWriteCapabilities = new ArrayList(_list963.size); + String _elem964; + for (int _i965 = 0; _i965 < _list963.size; ++_i965) { - _elem956 = iprot.readString(); - struct.requiredWriteCapabilities.add(_elem956); + _elem964 = iprot.readString(); + struct.requiredWriteCapabilities.add(_elem964); } iprot.readListEnd(); } @@ -688,9 +688,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ExtendedTableInfo oprot.writeFieldBegin(REQUIRED_READ_CAPABILITIES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.requiredReadCapabilities.size())); - for (String _iter958 : struct.requiredReadCapabilities) + for (String _iter966 : struct.requiredReadCapabilities) { - oprot.writeString(_iter958); + oprot.writeString(_iter966); } oprot.writeListEnd(); } @@ -702,9 +702,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ExtendedTableInfo oprot.writeFieldBegin(REQUIRED_WRITE_CAPABILITIES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.requiredWriteCapabilities.size())); - for (String _iter959 : struct.requiredWriteCapabilities) + for (String _iter967 : struct.requiredWriteCapabilities) { - oprot.writeString(_iter959); + oprot.writeString(_iter967); } oprot.writeListEnd(); } @@ -746,18 +746,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ExtendedTableInfo s if (struct.isSetRequiredReadCapabilities()) { { oprot.writeI32(struct.requiredReadCapabilities.size()); - for (String _iter960 : struct.requiredReadCapabilities) + for (String _iter968 : struct.requiredReadCapabilities) { - oprot.writeString(_iter960); + oprot.writeString(_iter968); } } } if (struct.isSetRequiredWriteCapabilities()) { { oprot.writeI32(struct.requiredWriteCapabilities.size()); - for (String _iter961 : struct.requiredWriteCapabilities) + for (String _iter969 : struct.requiredWriteCapabilities) { - oprot.writeString(_iter961); + oprot.writeString(_iter969); } } } @@ -775,26 +775,26 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ExtendedTableInfo st } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list962 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.requiredReadCapabilities = new ArrayList(_list962.size); - String _elem963; - for (int _i964 = 0; _i964 < _list962.size; ++_i964) + org.apache.thrift.protocol.TList _list970 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.requiredReadCapabilities = new ArrayList(_list970.size); + String _elem971; + for (int _i972 = 0; _i972 < _list970.size; ++_i972) { - _elem963 = iprot.readString(); - struct.requiredReadCapabilities.add(_elem963); + _elem971 = iprot.readString(); + struct.requiredReadCapabilities.add(_elem971); } } struct.setRequiredReadCapabilitiesIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list965 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.requiredWriteCapabilities = new ArrayList(_list965.size); - String _elem966; - for (int _i967 = 0; _i967 < _list965.size; ++_i967) + org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.requiredWriteCapabilities = new ArrayList(_list973.size); + String _elem974; + for (int _i975 = 0; _i975 < _list973.size; ++_i975) { - _elem966 = iprot.readString(); - struct.requiredWriteCapabilities.add(_elem966); + _elem974 = iprot.readString(); + struct.requiredWriteCapabilities.add(_elem974); } } struct.setRequiredWriteCapabilitiesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java index 4024751ed3..d716e2f5db 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, FindSchemasByColsRe case 1: // SCHEMA_VERSIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1040 = iprot.readListBegin(); - struct.schemaVersions = new ArrayList(_list1040.size); - SchemaVersionDescriptor _elem1041; - for (int _i1042 = 0; _i1042 < _list1040.size; ++_i1042) + org.apache.thrift.protocol.TList _list1048 = iprot.readListBegin(); + struct.schemaVersions = new ArrayList(_list1048.size); + SchemaVersionDescriptor _elem1049; + for (int _i1050 = 0; _i1050 < _list1048.size; ++_i1050) { - _elem1041 = new SchemaVersionDescriptor(); - _elem1041.read(iprot); - struct.schemaVersions.add(_elem1041); + _elem1049 = new SchemaVersionDescriptor(); + _elem1049.read(iprot); + struct.schemaVersions.add(_elem1049); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, FindSchemasByColsR oprot.writeFieldBegin(SCHEMA_VERSIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.schemaVersions.size())); - for (SchemaVersionDescriptor _iter1043 : struct.schemaVersions) + for (SchemaVersionDescriptor _iter1051 : struct.schemaVersions) { - _iter1043.write(oprot); + _iter1051.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsRe if (struct.isSetSchemaVersions()) { { oprot.writeI32(struct.schemaVersions.size()); - for (SchemaVersionDescriptor _iter1044 : struct.schemaVersions) + for (SchemaVersionDescriptor _iter1052 : struct.schemaVersions) { - _iter1044.write(oprot); + _iter1052.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsRes BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1045 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.schemaVersions = new ArrayList(_list1045.size); - SchemaVersionDescriptor _elem1046; - for (int _i1047 = 0; _i1047 < _list1045.size; ++_i1047) + org.apache.thrift.protocol.TList _list1053 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.schemaVersions = new ArrayList(_list1053.size); + SchemaVersionDescriptor _elem1054; + for (int _i1055 = 0; _i1055 < _list1053.size; ++_i1055) { - _elem1046 = new SchemaVersionDescriptor(); - _elem1046.read(iprot); - struct.schemaVersions.add(_elem1046); + _elem1054 = new SchemaVersionDescriptor(); + _elem1054.read(iprot); + struct.schemaVersions.add(_elem1054); } } struct.setSchemaVersionsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java index 07eb291e3d..960dbd70df 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java @@ -794,13 +794,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, FireEventRequest st case 5: // PARTITION_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list820 = iprot.readListBegin(); - struct.partitionVals = new ArrayList(_list820.size); - String _elem821; - for (int _i822 = 0; _i822 < _list820.size; ++_i822) + org.apache.thrift.protocol.TList _list828 = iprot.readListBegin(); + struct.partitionVals = new ArrayList(_list828.size); + String _elem829; + for (int _i830 = 0; _i830 < _list828.size; ++_i830) { - _elem821 = iprot.readString(); - struct.partitionVals.add(_elem821); + _elem829 = iprot.readString(); + struct.partitionVals.add(_elem829); } iprot.readListEnd(); } @@ -857,9 +857,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, FireEventRequest s oprot.writeFieldBegin(PARTITION_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionVals.size())); - for (String _iter823 : struct.partitionVals) + for (String _iter831 : struct.partitionVals) { - oprot.writeString(_iter823); + oprot.writeString(_iter831); } oprot.writeListEnd(); } @@ -915,9 +915,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FireEventRequest st if (struct.isSetPartitionVals()) { { oprot.writeI32(struct.partitionVals.size()); - for (String _iter824 : struct.partitionVals) + for (String _iter832 : struct.partitionVals) { - oprot.writeString(_iter824); + oprot.writeString(_iter832); } } } @@ -945,13 +945,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FireEventRequest str } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionVals = new ArrayList(_list825.size); - String _elem826; - for (int _i827 = 0; _i827 < _list825.size; ++_i827) + org.apache.thrift.protocol.TList _list833 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionVals = new ArrayList(_list833.size); + String _elem834; + for (int _i835 = 0; _i835 < _list833.size; ++_i835) { - _elem826 = iprot.readString(); - struct.partitionVals.add(_elem826); + _elem834 = iprot.readString(); + struct.partitionVals.add(_elem834); } } struct.setPartitionValsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java index e22563ff6a..60ebe96295 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetAllFunctionsResp case 1: // FUNCTIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list896 = iprot.readListBegin(); - struct.functions = new ArrayList(_list896.size); - Function _elem897; - for (int _i898 = 0; _i898 < _list896.size; ++_i898) + org.apache.thrift.protocol.TList _list904 = iprot.readListBegin(); + struct.functions = new ArrayList(_list904.size); + Function _elem905; + for (int _i906 = 0; _i906 < _list904.size; ++_i906) { - _elem897 = new Function(); - _elem897.read(iprot); - struct.functions.add(_elem897); + _elem905 = new Function(); + _elem905.read(iprot); + struct.functions.add(_elem905); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetAllFunctionsRes oprot.writeFieldBegin(FUNCTIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.functions.size())); - for (Function _iter899 : struct.functions) + for (Function _iter907 : struct.functions) { - _iter899.write(oprot); + _iter907.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsResp if (struct.isSetFunctions()) { { oprot.writeI32(struct.functions.size()); - for (Function _iter900 : struct.functions) + for (Function _iter908 : struct.functions) { - _iter900.write(oprot); + _iter908.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsRespo BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.functions = new ArrayList(_list901.size); - Function _elem902; - for (int _i903 = 0; _i903 < _list901.size; ++_i903) + org.apache.thrift.protocol.TList _list909 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.functions = new ArrayList(_list909.size); + Function _elem910; + for (int _i911 = 0; _i911 < _list909.size; ++_i911) { - _elem902 = new Function(); - _elem902.read(iprot); - struct.functions.add(_elem902); + _elem910 = new Function(); + _elem910.read(iprot); + struct.functions.add(_elem910); } } struct.setFunctionsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java index 9521a8478d..abc67fc157 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java @@ -619,13 +619,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list846 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list846.size); - long _elem847; - for (int _i848 = 0; _i848 < _list846.size; ++_i848) + org.apache.thrift.protocol.TList _list854 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list854.size); + long _elem855; + for (int _i856 = 0; _i856 < _list854.size; ++_i856) { - _elem847 = iprot.readI64(); - struct.fileIds.add(_elem847); + _elem855 = iprot.readI64(); + struct.fileIds.add(_elem855); } iprot.readListEnd(); } @@ -675,9 +675,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter849 : struct.fileIds) + for (long _iter857 : struct.fileIds) { - oprot.writeI64(_iter849); + oprot.writeI64(_iter857); } oprot.writeListEnd(); } @@ -719,9 +719,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter850 : struct.fileIds) + for (long _iter858 : struct.fileIds) { - oprot.writeI64(_iter850); + oprot.writeI64(_iter858); } } oprot.writeBinary(struct.expr); @@ -745,13 +745,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list851 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list851.size); - long _elem852; - for (int _i853 = 0; _i853 < _list851.size; ++_i853) + org.apache.thrift.protocol.TList _list859 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list859.size); + long _elem860; + for (int _i861 = 0; _i861 < _list859.size; ++_i861) { - _elem852 = iprot.readI64(); - struct.fileIds.add(_elem852); + _elem860 = iprot.readI64(); + struct.fileIds.add(_elem860); } } struct.setFileIdsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java index c192c0d1a5..478bf3f370 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java @@ -444,16 +444,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map836 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map836.size); - long _key837; - MetadataPpdResult _val838; - for (int _i839 = 0; _i839 < _map836.size; ++_i839) + org.apache.thrift.protocol.TMap _map844 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map844.size); + long _key845; + MetadataPpdResult _val846; + for (int _i847 = 0; _i847 < _map844.size; ++_i847) { - _key837 = iprot.readI64(); - _val838 = new MetadataPpdResult(); - _val838.read(iprot); - struct.metadata.put(_key837, _val838); + _key845 = iprot.readI64(); + _val846 = new MetadataPpdResult(); + _val846.read(iprot); + struct.metadata.put(_key845, _val846); } iprot.readMapEnd(); } @@ -487,10 +487,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, struct.metadata.size())); - for (Map.Entry _iter840 : struct.metadata.entrySet()) + for (Map.Entry _iter848 : struct.metadata.entrySet()) { - oprot.writeI64(_iter840.getKey()); - _iter840.getValue().write(oprot); + oprot.writeI64(_iter848.getKey()); + _iter848.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -518,10 +518,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter841 : struct.metadata.entrySet()) + for (Map.Entry _iter849 : struct.metadata.entrySet()) { - oprot.writeI64(_iter841.getKey()); - _iter841.getValue().write(oprot); + oprot.writeI64(_iter849.getKey()); + _iter849.getValue().write(oprot); } } oprot.writeBool(struct.isSupported); @@ -531,16 +531,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map842 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.metadata = new HashMap(2*_map842.size); - long _key843; - MetadataPpdResult _val844; - for (int _i845 = 0; _i845 < _map842.size; ++_i845) + org.apache.thrift.protocol.TMap _map850 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.metadata = new HashMap(2*_map850.size); + long _key851; + MetadataPpdResult _val852; + for (int _i853 = 0; _i853 < _map850.size; ++_i853) { - _key843 = iprot.readI64(); - _val844 = new MetadataPpdResult(); - _val844.read(iprot); - struct.metadata.put(_key843, _val844); + _key851 = iprot.readI64(); + _val852 = new MetadataPpdResult(); + _val852.read(iprot); + struct.metadata.put(_key851, _val852); } } struct.setMetadataIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java index 8ccce25e19..cc6376b03c 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list864 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list864.size); - long _elem865; - for (int _i866 = 0; _i866 < _list864.size; ++_i866) + org.apache.thrift.protocol.TList _list872 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list872.size); + long _elem873; + for (int _i874 = 0; _i874 < _list872.size; ++_i874) { - _elem865 = iprot.readI64(); - struct.fileIds.add(_elem865); + _elem873 = iprot.readI64(); + struct.fileIds.add(_elem873); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter867 : struct.fileIds) + for (long _iter875 : struct.fileIds) { - oprot.writeI64(_iter867); + oprot.writeI64(_iter875); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter868 : struct.fileIds) + for (long _iter876 : struct.fileIds) { - oprot.writeI64(_iter868); + oprot.writeI64(_iter876); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list869 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list869.size); - long _elem870; - for (int _i871 = 0; _i871 < _list869.size; ++_i871) + org.apache.thrift.protocol.TList _list877 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list877.size); + long _elem878; + for (int _i879 = 0; _i879 < _list877.size; ++_i879) { - _elem870 = iprot.readI64(); - struct.fileIds.add(_elem870); + _elem878 = iprot.readI64(); + struct.fileIds.add(_elem878); } } struct.setFileIdsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java index a6da5c51f4..186bea13ff 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java @@ -433,15 +433,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataResu case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map854 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map854.size); - long _key855; - ByteBuffer _val856; - for (int _i857 = 0; _i857 < _map854.size; ++_i857) + org.apache.thrift.protocol.TMap _map862 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map862.size); + long _key863; + ByteBuffer _val864; + for (int _i865 = 0; _i865 < _map862.size; ++_i865) { - _key855 = iprot.readI64(); - _val856 = iprot.readBinary(); - struct.metadata.put(_key855, _val856); + _key863 = iprot.readI64(); + _val864 = iprot.readBinary(); + struct.metadata.put(_key863, _val864); } iprot.readMapEnd(); } @@ -475,10 +475,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataRes oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (Map.Entry _iter858 : struct.metadata.entrySet()) + for (Map.Entry _iter866 : struct.metadata.entrySet()) { - oprot.writeI64(_iter858.getKey()); - oprot.writeBinary(_iter858.getValue()); + oprot.writeI64(_iter866.getKey()); + oprot.writeBinary(_iter866.getValue()); } oprot.writeMapEnd(); } @@ -506,10 +506,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter859 : struct.metadata.entrySet()) + for (Map.Entry _iter867 : struct.metadata.entrySet()) { - oprot.writeI64(_iter859.getKey()); - oprot.writeBinary(_iter859.getValue()); + oprot.writeI64(_iter867.getKey()); + oprot.writeBinary(_iter867.getValue()); } } oprot.writeBool(struct.isSupported); @@ -519,15 +519,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map860 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new HashMap(2*_map860.size); - long _key861; - ByteBuffer _val862; - for (int _i863 = 0; _i863 < _map860.size; ++_i863) + org.apache.thrift.protocol.TMap _map868 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new HashMap(2*_map868.size); + long _key869; + ByteBuffer _val870; + for (int _i871 = 0; _i871 < _map868.size; ++_i871) { - _key861 = iprot.readI64(); - _val862 = iprot.readBinary(); - struct.metadata.put(_key861, _val862); + _key869 = iprot.readI64(); + _val870 = iprot.readBinary(); + struct.metadata.put(_key869, _val870); } } struct.setMetadataIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java index 613b94b5b8..b9623ffad5 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java @@ -44,6 +44,7 @@ private static final org.apache.thrift.protocol.TField GET_COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("get_col_stats", org.apache.thrift.protocol.TType.BOOL, (short)4); private static final org.apache.thrift.protocol.TField PROCESSOR_CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("processorCapabilities", org.apache.thrift.protocol.TType.LIST, (short)5); private static final org.apache.thrift.protocol.TField PROCESSOR_IDENTIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("processorIdentifier", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)7); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -57,6 +58,7 @@ private boolean get_col_stats; // optional private List processorCapabilities; // optional private String processorIdentifier; // optional + private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -65,7 +67,8 @@ NAMES((short)3, "names"), GET_COL_STATS((short)4, "get_col_stats"), PROCESSOR_CAPABILITIES((short)5, "processorCapabilities"), - PROCESSOR_IDENTIFIER((short)6, "processorIdentifier"); + PROCESSOR_IDENTIFIER((short)6, "processorIdentifier"), + VALID_WRITE_ID_LIST((short)7, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -92,6 +95,8 @@ public static _Fields findByThriftId(int fieldId) { return PROCESSOR_CAPABILITIES; case 6: // PROCESSOR_IDENTIFIER return PROCESSOR_IDENTIFIER; + case 7: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -134,7 +139,7 @@ public String getFieldName() { // isset id assignments private static final int __GET_COL_STATS_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.NAMES,_Fields.GET_COL_STATS,_Fields.PROCESSOR_CAPABILITIES,_Fields.PROCESSOR_IDENTIFIER}; + private static final _Fields optionals[] = {_Fields.NAMES,_Fields.GET_COL_STATS,_Fields.PROCESSOR_CAPABILITIES,_Fields.PROCESSOR_IDENTIFIER,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -152,6 +157,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.PROCESSOR_IDENTIFIER, new org.apache.thrift.meta_data.FieldMetaData("processorIdentifier", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetPartitionsByNamesRequest.class, metaDataMap); } @@ -191,6 +198,9 @@ public GetPartitionsByNamesRequest(GetPartitionsByNamesRequest other) { if (other.isSetProcessorIdentifier()) { this.processorIdentifier = other.processorIdentifier; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public GetPartitionsByNamesRequest deepCopy() { @@ -206,6 +216,7 @@ public void clear() { this.get_col_stats = false; this.processorCapabilities = null; this.processorIdentifier = null; + this.validWriteIdList = null; } public String getDb_name() { @@ -375,6 +386,29 @@ public void setProcessorIdentifierIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -425,6 +459,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -448,6 +490,9 @@ public Object getFieldValue(_Fields field) { case PROCESSOR_IDENTIFIER: return getProcessorIdentifier(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -471,6 +516,8 @@ public boolean isSet(_Fields field) { return isSetProcessorCapabilities(); case PROCESSOR_IDENTIFIER: return isSetProcessorIdentifier(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -542,6 +589,15 @@ public boolean equals(GetPartitionsByNamesRequest that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -579,6 +635,11 @@ public int hashCode() { if (present_processorIdentifier) list.add(processorIdentifier); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -650,6 +711,16 @@ public int compareTo(GetPartitionsByNamesRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -721,6 +792,16 @@ public String toString() { } first = false; } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -842,6 +923,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsByName org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -905,6 +994,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsByNam oprot.writeFieldEnd(); } } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -937,7 +1033,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsByName if (struct.isSetProcessorIdentifier()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetValidWriteIdList()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); @@ -962,6 +1061,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsByName if (struct.isSetProcessorIdentifier()) { oprot.writeString(struct.processorIdentifier); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override @@ -971,7 +1073,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsByNames struct.setDb_nameIsSet(true); struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list588 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); @@ -1006,6 +1108,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsByNames struct.processorIdentifier = iprot.readString(); struct.setProcessorIdentifierIsSet(true); } + if (incoming.get(4)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java index fcba6ebb4d..0441c85dab 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java @@ -444,13 +444,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsFilter case 8: // FILTERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1128 = iprot.readListBegin(); - struct.filters = new ArrayList(_list1128.size); - String _elem1129; - for (int _i1130 = 0; _i1130 < _list1128.size; ++_i1130) + org.apache.thrift.protocol.TList _list1136 = iprot.readListBegin(); + struct.filters = new ArrayList(_list1136.size); + String _elem1137; + for (int _i1138 = 0; _i1138 < _list1136.size; ++_i1138) { - _elem1129 = iprot.readString(); - struct.filters.add(_elem1129); + _elem1137 = iprot.readString(); + struct.filters.add(_elem1137); } iprot.readListEnd(); } @@ -484,9 +484,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsFilte oprot.writeFieldBegin(FILTERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filters.size())); - for (String _iter1131 : struct.filters) + for (String _iter1139 : struct.filters) { - oprot.writeString(_iter1131); + oprot.writeString(_iter1139); } oprot.writeListEnd(); } @@ -524,9 +524,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsFilter if (struct.isSetFilters()) { { oprot.writeI32(struct.filters.size()); - for (String _iter1132 : struct.filters) + for (String _iter1140 : struct.filters) { - oprot.writeString(_iter1132); + oprot.writeString(_iter1140); } } } @@ -542,13 +542,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsFilterS } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1133 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filters = new ArrayList(_list1133.size); - String _elem1134; - for (int _i1135 = 0; _i1135 < _list1133.size; ++_i1135) + org.apache.thrift.protocol.TList _list1141 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filters = new ArrayList(_list1141.size); + String _elem1142; + for (int _i1143 = 0; _i1143 < _list1141.size; ++_i1143) { - _elem1134 = iprot.readString(); - struct.filters.add(_elem1134); + _elem1142 = iprot.readString(); + struct.filters.add(_elem1142); } } struct.setFiltersIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectionSpec.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectionSpec.java index d94cbb1bcc..733a2857eb 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectionSpec.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectionSpec.java @@ -509,13 +509,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsProjec case 1: // FIELD_LIST if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1120 = iprot.readListBegin(); - struct.fieldList = new ArrayList(_list1120.size); - String _elem1121; - for (int _i1122 = 0; _i1122 < _list1120.size; ++_i1122) + org.apache.thrift.protocol.TList _list1128 = iprot.readListBegin(); + struct.fieldList = new ArrayList(_list1128.size); + String _elem1129; + for (int _i1130 = 0; _i1130 < _list1128.size; ++_i1130) { - _elem1121 = iprot.readString(); - struct.fieldList.add(_elem1121); + _elem1129 = iprot.readString(); + struct.fieldList.add(_elem1129); } iprot.readListEnd(); } @@ -557,9 +557,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsProje oprot.writeFieldBegin(FIELD_LIST_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.fieldList.size())); - for (String _iter1123 : struct.fieldList) + for (String _iter1131 : struct.fieldList) { - oprot.writeString(_iter1123); + oprot.writeString(_iter1131); } oprot.writeListEnd(); } @@ -606,9 +606,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsProjec if (struct.isSetFieldList()) { { oprot.writeI32(struct.fieldList.size()); - for (String _iter1124 : struct.fieldList) + for (String _iter1132 : struct.fieldList) { - oprot.writeString(_iter1124); + oprot.writeString(_iter1132); } } } @@ -626,13 +626,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsProject BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1125 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.fieldList = new ArrayList(_list1125.size); - String _elem1126; - for (int _i1127 = 0; _i1127 < _list1125.size; ++_i1127) + org.apache.thrift.protocol.TList _list1133 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.fieldList = new ArrayList(_list1133.size); + String _elem1134; + for (int _i1135 = 0; _i1135 < _list1133.size; ++_i1135) { - _elem1126 = iprot.readString(); - struct.fieldList.add(_elem1126); + _elem1134 = iprot.readString(); + struct.fieldList.add(_elem1134); } } struct.setFieldListIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java index dd4bf8339a..1f024ac86e 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java @@ -48,6 +48,7 @@ private static final org.apache.thrift.protocol.TField FILTER_SPEC_FIELD_DESC = new org.apache.thrift.protocol.TField("filterSpec", org.apache.thrift.protocol.TType.STRUCT, (short)8); private static final org.apache.thrift.protocol.TField PROCESSOR_CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("processorCapabilities", org.apache.thrift.protocol.TType.LIST, (short)9); private static final org.apache.thrift.protocol.TField PROCESSOR_IDENTIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("processorIdentifier", org.apache.thrift.protocol.TType.STRING, (short)10); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)11); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -65,6 +66,7 @@ private GetPartitionsFilterSpec filterSpec; // required private List processorCapabilities; // optional private String processorIdentifier; // optional + private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -77,7 +79,8 @@ PROJECTION_SPEC((short)7, "projectionSpec"), FILTER_SPEC((short)8, "filterSpec"), PROCESSOR_CAPABILITIES((short)9, "processorCapabilities"), - PROCESSOR_IDENTIFIER((short)10, "processorIdentifier"); + PROCESSOR_IDENTIFIER((short)10, "processorIdentifier"), + VALID_WRITE_ID_LIST((short)11, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -112,6 +115,8 @@ public static _Fields findByThriftId(int fieldId) { return PROCESSOR_CAPABILITIES; case 10: // PROCESSOR_IDENTIFIER return PROCESSOR_IDENTIFIER; + case 11: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -154,7 +159,7 @@ public String getFieldName() { // isset id assignments private static final int __WITHAUTH_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.WITH_AUTH,_Fields.USER,_Fields.GROUP_NAMES,_Fields.PROCESSOR_CAPABILITIES,_Fields.PROCESSOR_IDENTIFIER}; + private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.WITH_AUTH,_Fields.USER,_Fields.GROUP_NAMES,_Fields.PROCESSOR_CAPABILITIES,_Fields.PROCESSOR_IDENTIFIER,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -180,6 +185,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.PROCESSOR_IDENTIFIER, new org.apache.thrift.meta_data.FieldMetaData("processorIdentifier", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetPartitionsRequest.class, metaDataMap); } @@ -235,6 +242,9 @@ public GetPartitionsRequest(GetPartitionsRequest other) { if (other.isSetProcessorIdentifier()) { this.processorIdentifier = other.processorIdentifier; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public GetPartitionsRequest deepCopy() { @@ -254,6 +264,7 @@ public void clear() { this.filterSpec = null; this.processorCapabilities = null; this.processorIdentifier = null; + this.validWriteIdList = null; } public String getCatName() { @@ -515,6 +526,29 @@ public void setProcessorIdentifierIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case CAT_NAME: @@ -597,6 +631,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -632,6 +674,9 @@ public Object getFieldValue(_Fields field) { case PROCESSOR_IDENTIFIER: return getProcessorIdentifier(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -663,6 +708,8 @@ public boolean isSet(_Fields field) { return isSetProcessorCapabilities(); case PROCESSOR_IDENTIFIER: return isSetProcessorIdentifier(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -770,6 +817,15 @@ public boolean equals(GetPartitionsRequest that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -827,6 +883,11 @@ public int hashCode() { if (present_processorIdentifier) list.add(processorIdentifier); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -938,6 +999,16 @@ public int compareTo(GetPartitionsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1045,6 +1116,16 @@ public String toString() { } first = false; } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1139,13 +1220,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsReques case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1144 = iprot.readListBegin(); - struct.groupNames = new ArrayList(_list1144.size); - String _elem1145; - for (int _i1146 = 0; _i1146 < _list1144.size; ++_i1146) + org.apache.thrift.protocol.TList _list1152 = iprot.readListBegin(); + struct.groupNames = new ArrayList(_list1152.size); + String _elem1153; + for (int _i1154 = 0; _i1154 < _list1152.size; ++_i1154) { - _elem1145 = iprot.readString(); - struct.groupNames.add(_elem1145); + _elem1153 = iprot.readString(); + struct.groupNames.add(_elem1153); } iprot.readListEnd(); } @@ -1175,13 +1256,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsReques case 9: // PROCESSOR_CAPABILITIES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1147 = iprot.readListBegin(); - struct.processorCapabilities = new ArrayList(_list1147.size); - String _elem1148; - for (int _i1149 = 0; _i1149 < _list1147.size; ++_i1149) + org.apache.thrift.protocol.TList _list1155 = iprot.readListBegin(); + struct.processorCapabilities = new ArrayList(_list1155.size); + String _elem1156; + for (int _i1157 = 0; _i1157 < _list1155.size; ++_i1157) { - _elem1148 = iprot.readString(); - struct.processorCapabilities.add(_elem1148); + _elem1156 = iprot.readString(); + struct.processorCapabilities.add(_elem1156); } iprot.readListEnd(); } @@ -1198,6 +1279,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsReques org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 11: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1245,9 +1334,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsReque oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.groupNames.size())); - for (String _iter1150 : struct.groupNames) + for (String _iter1158 : struct.groupNames) { - oprot.writeString(_iter1150); + oprot.writeString(_iter1158); } oprot.writeListEnd(); } @@ -1269,9 +1358,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsReque oprot.writeFieldBegin(PROCESSOR_CAPABILITIES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.processorCapabilities.size())); - for (String _iter1151 : struct.processorCapabilities) + for (String _iter1159 : struct.processorCapabilities) { - oprot.writeString(_iter1151); + oprot.writeString(_iter1159); } oprot.writeListEnd(); } @@ -1285,6 +1374,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsReque oprot.writeFieldEnd(); } } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1333,7 +1429,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsReques if (struct.isSetProcessorIdentifier()) { optionals.set(9); } - oprot.writeBitSet(optionals, 10); + if (struct.isSetValidWriteIdList()) { + optionals.set(10); + } + oprot.writeBitSet(optionals, 11); if (struct.isSetCatName()) { oprot.writeString(struct.catName); } @@ -1352,9 +1451,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsReques if (struct.isSetGroupNames()) { { oprot.writeI32(struct.groupNames.size()); - for (String _iter1152 : struct.groupNames) + for (String _iter1160 : struct.groupNames) { - oprot.writeString(_iter1152); + oprot.writeString(_iter1160); } } } @@ -1367,21 +1466,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsReques if (struct.isSetProcessorCapabilities()) { { oprot.writeI32(struct.processorCapabilities.size()); - for (String _iter1153 : struct.processorCapabilities) + for (String _iter1161 : struct.processorCapabilities) { - oprot.writeString(_iter1153); + oprot.writeString(_iter1161); } } } if (struct.isSetProcessorIdentifier()) { oprot.writeString(struct.processorIdentifier); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(10); + BitSet incoming = iprot.readBitSet(11); if (incoming.get(0)) { struct.catName = iprot.readString(); struct.setCatNameIsSet(true); @@ -1404,13 +1506,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRequest } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1154 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.groupNames = new ArrayList(_list1154.size); - String _elem1155; - for (int _i1156 = 0; _i1156 < _list1154.size; ++_i1156) + org.apache.thrift.protocol.TList _list1162 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.groupNames = new ArrayList(_list1162.size); + String _elem1163; + for (int _i1164 = 0; _i1164 < _list1162.size; ++_i1164) { - _elem1155 = iprot.readString(); - struct.groupNames.add(_elem1155); + _elem1163 = iprot.readString(); + struct.groupNames.add(_elem1163); } } struct.setGroupNamesIsSet(true); @@ -1427,13 +1529,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRequest } if (incoming.get(8)) { { - org.apache.thrift.protocol.TList _list1157 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.processorCapabilities = new ArrayList(_list1157.size); - String _elem1158; - for (int _i1159 = 0; _i1159 < _list1157.size; ++_i1159) + org.apache.thrift.protocol.TList _list1165 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.processorCapabilities = new ArrayList(_list1165.size); + String _elem1166; + for (int _i1167 = 0; _i1167 < _list1165.size; ++_i1167) { - _elem1158 = iprot.readString(); - struct.processorCapabilities.add(_elem1158); + _elem1166 = iprot.readString(); + struct.processorCapabilities.add(_elem1166); } } struct.setProcessorCapabilitiesIsSet(true); @@ -1442,6 +1544,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRequest struct.processorIdentifier = iprot.readString(); struct.setProcessorIdentifierIsSet(true); } + if (incoming.get(10)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java index ddfa59fb1c..423b827766 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsRespon case 1: // PARTITION_SPEC if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1136 = iprot.readListBegin(); - struct.partitionSpec = new ArrayList(_list1136.size); - PartitionSpec _elem1137; - for (int _i1138 = 0; _i1138 < _list1136.size; ++_i1138) + org.apache.thrift.protocol.TList _list1144 = iprot.readListBegin(); + struct.partitionSpec = new ArrayList(_list1144.size); + PartitionSpec _elem1145; + for (int _i1146 = 0; _i1146 < _list1144.size; ++_i1146) { - _elem1137 = new PartitionSpec(); - _elem1137.read(iprot); - struct.partitionSpec.add(_elem1137); + _elem1145 = new PartitionSpec(); + _elem1145.read(iprot); + struct.partitionSpec.add(_elem1145); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsRespo oprot.writeFieldBegin(PARTITION_SPEC_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionSpec.size())); - for (PartitionSpec _iter1139 : struct.partitionSpec) + for (PartitionSpec _iter1147 : struct.partitionSpec) { - _iter1139.write(oprot); + _iter1147.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRespon if (struct.isSetPartitionSpec()) { { oprot.writeI32(struct.partitionSpec.size()); - for (PartitionSpec _iter1140 : struct.partitionSpec) + for (PartitionSpec _iter1148 : struct.partitionSpec) { - _iter1140.write(oprot); + _iter1148.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRespons BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1141 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitionSpec = new ArrayList(_list1141.size); - PartitionSpec _elem1142; - for (int _i1143 = 0; _i1143 < _list1141.size; ++_i1143) + org.apache.thrift.protocol.TList _list1149 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionSpec = new ArrayList(_list1149.size); + PartitionSpec _elem1150; + for (int _i1151 = 0; _i1151 < _list1149.size; ++_i1151) { - _elem1142 = new PartitionSpec(); - _elem1142.read(iprot); - struct.partitionSpec.add(_elem1142); + _elem1150 = new PartitionSpec(); + _elem1150.read(iprot); + struct.partitionSpec.add(_elem1150); } } struct.setPartitionSpecIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java index efa02f324a..3a84c46871 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java @@ -974,13 +974,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTableRequest str case 8: // PROCESSOR_CAPABILITIES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list912 = iprot.readListBegin(); - struct.processorCapabilities = new ArrayList(_list912.size); - String _elem913; - for (int _i914 = 0; _i914 < _list912.size; ++_i914) + org.apache.thrift.protocol.TList _list920 = iprot.readListBegin(); + struct.processorCapabilities = new ArrayList(_list920.size); + String _elem921; + for (int _i922 = 0; _i922 < _list920.size; ++_i922) { - _elem913 = iprot.readString(); - struct.processorCapabilities.add(_elem913); + _elem921 = iprot.readString(); + struct.processorCapabilities.add(_elem921); } iprot.readListEnd(); } @@ -1051,9 +1051,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTableRequest st oprot.writeFieldBegin(PROCESSOR_CAPABILITIES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.processorCapabilities.size())); - for (String _iter915 : struct.processorCapabilities) + for (String _iter923 : struct.processorCapabilities) { - oprot.writeString(_iter915); + oprot.writeString(_iter923); } oprot.writeListEnd(); } @@ -1121,9 +1121,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTableRequest str if (struct.isSetProcessorCapabilities()) { { oprot.writeI32(struct.processorCapabilities.size()); - for (String _iter916 : struct.processorCapabilities) + for (String _iter924 : struct.processorCapabilities) { - oprot.writeString(_iter916); + oprot.writeString(_iter924); } } } @@ -1159,13 +1159,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTableRequest stru } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list917 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.processorCapabilities = new ArrayList(_list917.size); - String _elem918; - for (int _i919 = 0; _i919 < _list917.size; ++_i919) + org.apache.thrift.protocol.TList _list925 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.processorCapabilities = new ArrayList(_list925.size); + String _elem926; + for (int _i927 = 0; _i927 < _list925.size; ++_i927) { - _elem918 = iprot.readString(); - struct.processorCapabilities.add(_elem918); + _elem926 = iprot.readString(); + struct.processorCapabilities.add(_elem926); } } struct.setProcessorCapabilitiesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesExtRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesExtRequest.java index 88ff2202b9..c350aba5be 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesExtRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesExtRequest.java @@ -885,13 +885,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesExtRequest case 6: // PROCESSOR_CAPABILITIES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list944 = iprot.readListBegin(); - struct.processorCapabilities = new ArrayList(_list944.size); - String _elem945; - for (int _i946 = 0; _i946 < _list944.size; ++_i946) + org.apache.thrift.protocol.TList _list952 = iprot.readListBegin(); + struct.processorCapabilities = new ArrayList(_list952.size); + String _elem953; + for (int _i954 = 0; _i954 < _list952.size; ++_i954) { - _elem945 = iprot.readString(); - struct.processorCapabilities.add(_elem945); + _elem953 = iprot.readString(); + struct.processorCapabilities.add(_elem953); } iprot.readListEnd(); } @@ -949,9 +949,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesExtReques oprot.writeFieldBegin(PROCESSOR_CAPABILITIES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.processorCapabilities.size())); - for (String _iter947 : struct.processorCapabilities) + for (String _iter955 : struct.processorCapabilities) { - oprot.writeString(_iter947); + oprot.writeString(_iter955); } oprot.writeListEnd(); } @@ -1003,9 +1003,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesExtRequest if (struct.isSetProcessorCapabilities()) { { oprot.writeI32(struct.processorCapabilities.size()); - for (String _iter948 : struct.processorCapabilities) + for (String _iter956 : struct.processorCapabilities) { - oprot.writeString(_iter948); + oprot.writeString(_iter956); } } } @@ -1032,13 +1032,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesExtRequest } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list949 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.processorCapabilities = new ArrayList(_list949.size); - String _elem950; - for (int _i951 = 0; _i951 < _list949.size; ++_i951) + org.apache.thrift.protocol.TList _list957 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.processorCapabilities = new ArrayList(_list957.size); + String _elem958; + for (int _i959 = 0; _i959 < _list957.size; ++_i959) { - _elem950 = iprot.readString(); - struct.processorCapabilities.add(_elem950); + _elem958 = iprot.readString(); + struct.processorCapabilities.add(_elem958); } } struct.setProcessorCapabilitiesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java index 414128b873..786197ae2e 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java @@ -785,13 +785,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesRequest st case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list920 = iprot.readListBegin(); - struct.tblNames = new ArrayList(_list920.size); - String _elem921; - for (int _i922 = 0; _i922 < _list920.size; ++_i922) + org.apache.thrift.protocol.TList _list928 = iprot.readListBegin(); + struct.tblNames = new ArrayList(_list928.size); + String _elem929; + for (int _i930 = 0; _i930 < _list928.size; ++_i930) { - _elem921 = iprot.readString(); - struct.tblNames.add(_elem921); + _elem929 = iprot.readString(); + struct.tblNames.add(_elem929); } iprot.readListEnd(); } @@ -820,13 +820,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesRequest st case 5: // PROCESSOR_CAPABILITIES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list923 = iprot.readListBegin(); - struct.processorCapabilities = new ArrayList(_list923.size); - String _elem924; - for (int _i925 = 0; _i925 < _list923.size; ++_i925) + org.apache.thrift.protocol.TList _list931 = iprot.readListBegin(); + struct.processorCapabilities = new ArrayList(_list931.size); + String _elem932; + for (int _i933 = 0; _i933 < _list931.size; ++_i933) { - _elem924 = iprot.readString(); - struct.processorCapabilities.add(_elem924); + _elem932 = iprot.readString(); + struct.processorCapabilities.add(_elem932); } iprot.readListEnd(); } @@ -866,9 +866,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesRequest s oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tblNames.size())); - for (String _iter926 : struct.tblNames) + for (String _iter934 : struct.tblNames) { - oprot.writeString(_iter926); + oprot.writeString(_iter934); } oprot.writeListEnd(); } @@ -894,9 +894,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesRequest s oprot.writeFieldBegin(PROCESSOR_CAPABILITIES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.processorCapabilities.size())); - for (String _iter927 : struct.processorCapabilities) + for (String _iter935 : struct.processorCapabilities) { - oprot.writeString(_iter927); + oprot.writeString(_iter935); } oprot.writeListEnd(); } @@ -948,9 +948,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest st if (struct.isSetTblNames()) { { oprot.writeI32(struct.tblNames.size()); - for (String _iter928 : struct.tblNames) + for (String _iter936 : struct.tblNames) { - oprot.writeString(_iter928); + oprot.writeString(_iter936); } } } @@ -963,9 +963,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest st if (struct.isSetProcessorCapabilities()) { { oprot.writeI32(struct.processorCapabilities.size()); - for (String _iter929 : struct.processorCapabilities) + for (String _iter937 : struct.processorCapabilities) { - oprot.writeString(_iter929); + oprot.writeString(_iter937); } } } @@ -982,13 +982,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest str BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list930 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tblNames = new ArrayList(_list930.size); - String _elem931; - for (int _i932 = 0; _i932 < _list930.size; ++_i932) + org.apache.thrift.protocol.TList _list938 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tblNames = new ArrayList(_list938.size); + String _elem939; + for (int _i940 = 0; _i940 < _list938.size; ++_i940) { - _elem931 = iprot.readString(); - struct.tblNames.add(_elem931); + _elem939 = iprot.readString(); + struct.tblNames.add(_elem939); } } struct.setTblNamesIsSet(true); @@ -1004,13 +1004,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest str } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list933 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.processorCapabilities = new ArrayList(_list933.size); - String _elem934; - for (int _i935 = 0; _i935 < _list933.size; ++_i935) + org.apache.thrift.protocol.TList _list941 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.processorCapabilities = new ArrayList(_list941.size); + String _elem942; + for (int _i943 = 0; _i943 < _list941.size; ++_i943) { - _elem934 = iprot.readString(); - struct.processorCapabilities.add(_elem934); + _elem942 = iprot.readString(); + struct.processorCapabilities.add(_elem942); } } struct.setProcessorCapabilitiesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java index 1e95bd3ae4..2b4f82ce5f 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesResult str case 1: // TABLES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list936 = iprot.readListBegin(); - struct.tables = new ArrayList(_list936.size); - Table _elem937; - for (int _i938 = 0; _i938 < _list936.size; ++_i938) + org.apache.thrift.protocol.TList _list944 = iprot.readListBegin(); + struct.tables = new ArrayList
(_list944.size); + Table _elem945; + for (int _i946 = 0; _i946 < _list944.size; ++_i946) { - _elem937 = new Table(); - _elem937.read(iprot); - struct.tables.add(_elem937); + _elem945 = new Table(); + _elem945.read(iprot); + struct.tables.add(_elem945); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesResult st oprot.writeFieldBegin(TABLES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tables.size())); - for (Table _iter939 : struct.tables) + for (Table _iter947 : struct.tables) { - _iter939.write(oprot); + _iter947.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult str TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tables.size()); - for (Table _iter940 : struct.tables) + for (Table _iter948 : struct.tables) { - _iter940.write(oprot); + _iter948.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult str public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list941 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tables = new ArrayList
(_list941.size); - Table _elem942; - for (int _i943 = 0; _i943 < _list941.size; ++_i943) + org.apache.thrift.protocol.TList _list949 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tables = new ArrayList
(_list949.size); + Table _elem950; + for (int _i951 = 0; _i951 < _list949.size; ++_i951) { - _elem942 = new Table(); - _elem942.read(iprot); - struct.tables.add(_elem942); + _elem950 = new Table(); + _elem950.read(iprot); + struct.tables.add(_elem950); } } struct.setTablesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTxnTableWriteIdsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTxnTableWriteIdsResponse.java new file mode 100644 index 0000000000..3b61311044 --- /dev/null +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTxnTableWriteIdsResponse.java @@ -0,0 +1,443 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetTxnTableWriteIdsResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTxnTableWriteIdsResponse"); + + private static final org.apache.thrift.protocol.TField TABLE_WRITE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("tableWriteIds", org.apache.thrift.protocol.TType.LIST, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetTxnTableWriteIdsResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetTxnTableWriteIdsResponseTupleSchemeFactory()); + } + + private List tableWriteIds; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TABLE_WRITE_IDS((short)1, "tableWriteIds"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TABLE_WRITE_IDS + return TABLE_WRITE_IDS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TABLE_WRITE_IDS, new org.apache.thrift.meta_data.FieldMetaData("tableWriteIds", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TableWriteId.class)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTxnTableWriteIdsResponse.class, metaDataMap); + } + + public GetTxnTableWriteIdsResponse() { + } + + public GetTxnTableWriteIdsResponse( + List tableWriteIds) + { + this(); + this.tableWriteIds = tableWriteIds; + } + + /** + * Performs a deep copy on other. + */ + public GetTxnTableWriteIdsResponse(GetTxnTableWriteIdsResponse other) { + if (other.isSetTableWriteIds()) { + List __this__tableWriteIds = new ArrayList(other.tableWriteIds.size()); + for (TableWriteId other_element : other.tableWriteIds) { + __this__tableWriteIds.add(new TableWriteId(other_element)); + } + this.tableWriteIds = __this__tableWriteIds; + } + } + + public GetTxnTableWriteIdsResponse deepCopy() { + return new GetTxnTableWriteIdsResponse(this); + } + + @Override + public void clear() { + this.tableWriteIds = null; + } + + public int getTableWriteIdsSize() { + return (this.tableWriteIds == null) ? 0 : this.tableWriteIds.size(); + } + + public java.util.Iterator getTableWriteIdsIterator() { + return (this.tableWriteIds == null) ? null : this.tableWriteIds.iterator(); + } + + public void addToTableWriteIds(TableWriteId elem) { + if (this.tableWriteIds == null) { + this.tableWriteIds = new ArrayList(); + } + this.tableWriteIds.add(elem); + } + + public List getTableWriteIds() { + return this.tableWriteIds; + } + + public void setTableWriteIds(List tableWriteIds) { + this.tableWriteIds = tableWriteIds; + } + + public void unsetTableWriteIds() { + this.tableWriteIds = null; + } + + /** Returns true if field tableWriteIds is set (has been assigned a value) and false otherwise */ + public boolean isSetTableWriteIds() { + return this.tableWriteIds != null; + } + + public void setTableWriteIdsIsSet(boolean value) { + if (!value) { + this.tableWriteIds = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TABLE_WRITE_IDS: + if (value == null) { + unsetTableWriteIds(); + } else { + setTableWriteIds((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TABLE_WRITE_IDS: + return getTableWriteIds(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TABLE_WRITE_IDS: + return isSetTableWriteIds(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetTxnTableWriteIdsResponse) + return this.equals((GetTxnTableWriteIdsResponse)that); + return false; + } + + public boolean equals(GetTxnTableWriteIdsResponse that) { + if (that == null) + return false; + + boolean this_present_tableWriteIds = true && this.isSetTableWriteIds(); + boolean that_present_tableWriteIds = true && that.isSetTableWriteIds(); + if (this_present_tableWriteIds || that_present_tableWriteIds) { + if (!(this_present_tableWriteIds && that_present_tableWriteIds)) + return false; + if (!this.tableWriteIds.equals(that.tableWriteIds)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_tableWriteIds = true && (isSetTableWriteIds()); + list.add(present_tableWriteIds); + if (present_tableWriteIds) + list.add(tableWriteIds); + + return list.hashCode(); + } + + @Override + public int compareTo(GetTxnTableWriteIdsResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetTableWriteIds()).compareTo(other.isSetTableWriteIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTableWriteIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableWriteIds, other.tableWriteIds); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetTxnTableWriteIdsResponse("); + boolean first = true; + + sb.append("tableWriteIds:"); + if (this.tableWriteIds == null) { + sb.append("null"); + } else { + sb.append(this.tableWriteIds); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetTableWriteIds()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableWriteIds' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetTxnTableWriteIdsResponseStandardSchemeFactory implements SchemeFactory { + public GetTxnTableWriteIdsResponseStandardScheme getScheme() { + return new GetTxnTableWriteIdsResponseStandardScheme(); + } + } + + private static class GetTxnTableWriteIdsResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetTxnTableWriteIdsResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TABLE_WRITE_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list690 = iprot.readListBegin(); + struct.tableWriteIds = new ArrayList(_list690.size); + TableWriteId _elem691; + for (int _i692 = 0; _i692 < _list690.size; ++_i692) + { + _elem691 = new TableWriteId(); + _elem691.read(iprot); + struct.tableWriteIds.add(_elem691); + } + iprot.readListEnd(); + } + struct.setTableWriteIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetTxnTableWriteIdsResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.tableWriteIds != null) { + oprot.writeFieldBegin(TABLE_WRITE_IDS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tableWriteIds.size())); + for (TableWriteId _iter693 : struct.tableWriteIds) + { + _iter693.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetTxnTableWriteIdsResponseTupleSchemeFactory implements SchemeFactory { + public GetTxnTableWriteIdsResponseTupleScheme getScheme() { + return new GetTxnTableWriteIdsResponseTupleScheme(); + } + } + + private static class GetTxnTableWriteIdsResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetTxnTableWriteIdsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.tableWriteIds.size()); + for (TableWriteId _iter694 : struct.tableWriteIds) + { + _iter694.write(oprot); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetTxnTableWriteIdsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list695 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tableWriteIds = new ArrayList(_list695.size); + TableWriteId _elem696; + for (int _i697 = 0; _i697 < _list695.size; ++_i697) + { + _elem696 = new TableWriteId(); + _elem696.read(iprot); + struct.tableWriteIds.add(_elem696); + } + } + struct.setTableWriteIdsIsSet(true); + } + } + +} + diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java index 4e4a36fa57..7c6120aa13 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetValidWriteIdsRes case 1: // TBL_VALID_WRITE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list690 = iprot.readListBegin(); - struct.tblValidWriteIds = new ArrayList(_list690.size); - TableValidWriteIds _elem691; - for (int _i692 = 0; _i692 < _list690.size; ++_i692) + org.apache.thrift.protocol.TList _list698 = iprot.readListBegin(); + struct.tblValidWriteIds = new ArrayList(_list698.size); + TableValidWriteIds _elem699; + for (int _i700 = 0; _i700 < _list698.size; ++_i700) { - _elem691 = new TableValidWriteIds(); - _elem691.read(iprot); - struct.tblValidWriteIds.add(_elem691); + _elem699 = new TableValidWriteIds(); + _elem699.read(iprot); + struct.tblValidWriteIds.add(_elem699); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetValidWriteIdsRe oprot.writeFieldBegin(TBL_VALID_WRITE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tblValidWriteIds.size())); - for (TableValidWriteIds _iter693 : struct.tblValidWriteIds) + for (TableValidWriteIds _iter701 : struct.tblValidWriteIds) { - _iter693.write(oprot); + _iter701.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRes TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tblValidWriteIds.size()); - for (TableValidWriteIds _iter694 : struct.tblValidWriteIds) + for (TableValidWriteIds _iter702 : struct.tblValidWriteIds) { - _iter694.write(oprot); + _iter702.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRes public void read(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list695 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tblValidWriteIds = new ArrayList(_list695.size); - TableValidWriteIds _elem696; - for (int _i697 = 0; _i697 < _list695.size; ++_i697) + org.apache.thrift.protocol.TList _list703 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tblValidWriteIds = new ArrayList(_list703.size); + TableValidWriteIds _elem704; + for (int _i705 = 0; _i705 < _list703.size; ++_i705) { - _elem696 = new TableValidWriteIds(); - _elem696.read(iprot); - struct.tblValidWriteIds.add(_elem696); + _elem704 = new TableValidWriteIds(); + _elem704.read(iprot); + struct.tblValidWriteIds.add(_elem704); } } struct.setTblValidWriteIdsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java index c00facefd9..84498992a0 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java @@ -453,13 +453,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 1: // ABORTED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set738 = iprot.readSetBegin(); - struct.aborted = new HashSet(2*_set738.size); - long _elem739; - for (int _i740 = 0; _i740 < _set738.size; ++_i740) + org.apache.thrift.protocol.TSet _set746 = iprot.readSetBegin(); + struct.aborted = new HashSet(2*_set746.size); + long _elem747; + for (int _i748 = 0; _i748 < _set746.size; ++_i748) { - _elem739 = iprot.readI64(); - struct.aborted.add(_elem739); + _elem747 = iprot.readI64(); + struct.aborted.add(_elem747); } iprot.readSetEnd(); } @@ -471,13 +471,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 2: // NOSUCH if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set741 = iprot.readSetBegin(); - struct.nosuch = new HashSet(2*_set741.size); - long _elem742; - for (int _i743 = 0; _i743 < _set741.size; ++_i743) + org.apache.thrift.protocol.TSet _set749 = iprot.readSetBegin(); + struct.nosuch = new HashSet(2*_set749.size); + long _elem750; + for (int _i751 = 0; _i751 < _set749.size; ++_i751) { - _elem742 = iprot.readI64(); - struct.nosuch.add(_elem742); + _elem750 = iprot.readI64(); + struct.nosuch.add(_elem750); } iprot.readSetEnd(); } @@ -503,9 +503,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(ABORTED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.aborted.size())); - for (long _iter744 : struct.aborted) + for (long _iter752 : struct.aborted) { - oprot.writeI64(_iter744); + oprot.writeI64(_iter752); } oprot.writeSetEnd(); } @@ -515,9 +515,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(NOSUCH_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.nosuch.size())); - for (long _iter745 : struct.nosuch) + for (long _iter753 : struct.nosuch) { - oprot.writeI64(_iter745); + oprot.writeI64(_iter753); } oprot.writeSetEnd(); } @@ -542,16 +542,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.aborted.size()); - for (long _iter746 : struct.aborted) + for (long _iter754 : struct.aborted) { - oprot.writeI64(_iter746); + oprot.writeI64(_iter754); } } { oprot.writeI32(struct.nosuch.size()); - for (long _iter747 : struct.nosuch) + for (long _iter755 : struct.nosuch) { - oprot.writeI64(_iter747); + oprot.writeI64(_iter755); } } } @@ -560,24 +560,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TSet _set748 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.aborted = new HashSet(2*_set748.size); - long _elem749; - for (int _i750 = 0; _i750 < _set748.size; ++_i750) + org.apache.thrift.protocol.TSet _set756 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.aborted = new HashSet(2*_set756.size); + long _elem757; + for (int _i758 = 0; _i758 < _set756.size; ++_i758) { - _elem749 = iprot.readI64(); - struct.aborted.add(_elem749); + _elem757 = iprot.readI64(); + struct.aborted.add(_elem757); } } struct.setAbortedIsSet(true); { - org.apache.thrift.protocol.TSet _set751 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.nosuch = new HashSet(2*_set751.size); - long _elem752; - for (int _i753 = 0; _i753 < _set751.size; ++_i753) + org.apache.thrift.protocol.TSet _set759 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.nosuch = new HashSet(2*_set759.size); + long _elem760; + for (int _i761 = 0; _i761 < _set759.size; ++_i761) { - _elem752 = iprot.readI64(); - struct.nosuch.add(_elem752); + _elem760 = iprot.readI64(); + struct.nosuch.add(_elem760); } } struct.setNosuchIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java index adbf18cfd2..f1476b6257 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java @@ -636,13 +636,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 2: // FILES_ADDED if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list796 = iprot.readListBegin(); - struct.filesAdded = new ArrayList(_list796.size); - String _elem797; - for (int _i798 = 0; _i798 < _list796.size; ++_i798) + org.apache.thrift.protocol.TList _list804 = iprot.readListBegin(); + struct.filesAdded = new ArrayList(_list804.size); + String _elem805; + for (int _i806 = 0; _i806 < _list804.size; ++_i806) { - _elem797 = iprot.readString(); - struct.filesAdded.add(_elem797); + _elem805 = iprot.readString(); + struct.filesAdded.add(_elem805); } iprot.readListEnd(); } @@ -654,13 +654,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 3: // FILES_ADDED_CHECKSUM if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list799 = iprot.readListBegin(); - struct.filesAddedChecksum = new ArrayList(_list799.size); - String _elem800; - for (int _i801 = 0; _i801 < _list799.size; ++_i801) + org.apache.thrift.protocol.TList _list807 = iprot.readListBegin(); + struct.filesAddedChecksum = new ArrayList(_list807.size); + String _elem808; + for (int _i809 = 0; _i809 < _list807.size; ++_i809) { - _elem800 = iprot.readString(); - struct.filesAddedChecksum.add(_elem800); + _elem808 = iprot.readString(); + struct.filesAddedChecksum.add(_elem808); } iprot.readListEnd(); } @@ -672,13 +672,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 4: // SUB_DIRECTORY_LIST if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list802 = iprot.readListBegin(); - struct.subDirectoryList = new ArrayList(_list802.size); - String _elem803; - for (int _i804 = 0; _i804 < _list802.size; ++_i804) + org.apache.thrift.protocol.TList _list810 = iprot.readListBegin(); + struct.subDirectoryList = new ArrayList(_list810.size); + String _elem811; + for (int _i812 = 0; _i812 < _list810.size; ++_i812) { - _elem803 = iprot.readString(); - struct.subDirectoryList.add(_elem803); + _elem811 = iprot.readString(); + struct.subDirectoryList.add(_elem811); } iprot.readListEnd(); } @@ -709,9 +709,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAdded.size())); - for (String _iter805 : struct.filesAdded) + for (String _iter813 : struct.filesAdded) { - oprot.writeString(_iter805); + oprot.writeString(_iter813); } oprot.writeListEnd(); } @@ -722,9 +722,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_CHECKSUM_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAddedChecksum.size())); - for (String _iter806 : struct.filesAddedChecksum) + for (String _iter814 : struct.filesAddedChecksum) { - oprot.writeString(_iter806); + oprot.writeString(_iter814); } oprot.writeListEnd(); } @@ -736,9 +736,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(SUB_DIRECTORY_LIST_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.subDirectoryList.size())); - for (String _iter807 : struct.subDirectoryList) + for (String _iter815 : struct.subDirectoryList) { - oprot.writeString(_iter807); + oprot.writeString(_iter815); } oprot.writeListEnd(); } @@ -764,9 +764,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.filesAdded.size()); - for (String _iter808 : struct.filesAdded) + for (String _iter816 : struct.filesAdded) { - oprot.writeString(_iter808); + oprot.writeString(_iter816); } } BitSet optionals = new BitSet(); @@ -786,18 +786,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD if (struct.isSetFilesAddedChecksum()) { { oprot.writeI32(struct.filesAddedChecksum.size()); - for (String _iter809 : struct.filesAddedChecksum) + for (String _iter817 : struct.filesAddedChecksum) { - oprot.writeString(_iter809); + oprot.writeString(_iter817); } } } if (struct.isSetSubDirectoryList()) { { oprot.writeI32(struct.subDirectoryList.size()); - for (String _iter810 : struct.subDirectoryList) + for (String _iter818 : struct.subDirectoryList) { - oprot.writeString(_iter810); + oprot.writeString(_iter818); } } } @@ -807,13 +807,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestData struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list811 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAdded = new ArrayList(_list811.size); - String _elem812; - for (int _i813 = 0; _i813 < _list811.size; ++_i813) + org.apache.thrift.protocol.TList _list819 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAdded = new ArrayList(_list819.size); + String _elem820; + for (int _i821 = 0; _i821 < _list819.size; ++_i821) { - _elem812 = iprot.readString(); - struct.filesAdded.add(_elem812); + _elem820 = iprot.readString(); + struct.filesAdded.add(_elem820); } } struct.setFilesAddedIsSet(true); @@ -824,26 +824,26 @@ public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestDa } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list814 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAddedChecksum = new ArrayList(_list814.size); - String _elem815; - for (int _i816 = 0; _i816 < _list814.size; ++_i816) + org.apache.thrift.protocol.TList _list822 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAddedChecksum = new ArrayList(_list822.size); + String _elem823; + for (int _i824 = 0; _i824 < _list822.size; ++_i824) { - _elem815 = iprot.readString(); - struct.filesAddedChecksum.add(_elem815); + _elem823 = iprot.readString(); + struct.filesAddedChecksum.add(_elem823); } } struct.setFilesAddedChecksumIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.subDirectoryList = new ArrayList(_list817.size); - String _elem818; - for (int _i819 = 0; _i819 < _list817.size; ++_i819) + org.apache.thrift.protocol.TList _list825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.subDirectoryList = new ArrayList(_list825.size); + String _elem826; + for (int _i827 = 0; _i827 < _list825.size; ++_i827) { - _elem818 = iprot.readString(); - struct.subDirectoryList.add(_elem818); + _elem826 = iprot.readString(); + struct.subDirectoryList.add(_elem826); } } struct.setSubDirectoryListIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java index 7402fb30eb..ad6cc0482d 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java @@ -689,14 +689,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, LockRequest struct) case 1: // COMPONENT if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list722 = iprot.readListBegin(); - struct.component = new ArrayList(_list722.size); - LockComponent _elem723; - for (int _i724 = 0; _i724 < _list722.size; ++_i724) + org.apache.thrift.protocol.TList _list730 = iprot.readListBegin(); + struct.component = new ArrayList(_list730.size); + LockComponent _elem731; + for (int _i732 = 0; _i732 < _list730.size; ++_i732) { - _elem723 = new LockComponent(); - _elem723.read(iprot); - struct.component.add(_elem723); + _elem731 = new LockComponent(); + _elem731.read(iprot); + struct.component.add(_elem731); } iprot.readListEnd(); } @@ -754,9 +754,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, LockRequest struct oprot.writeFieldBegin(COMPONENT_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.component.size())); - for (LockComponent _iter725 : struct.component) + for (LockComponent _iter733 : struct.component) { - _iter725.write(oprot); + _iter733.write(oprot); } oprot.writeListEnd(); } @@ -803,9 +803,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.component.size()); - for (LockComponent _iter726 : struct.component) + for (LockComponent _iter734 : struct.component) { - _iter726.write(oprot); + _iter734.write(oprot); } } oprot.writeString(struct.user); @@ -830,14 +830,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) public void read(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list727 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.component = new ArrayList(_list727.size); - LockComponent _elem728; - for (int _i729 = 0; _i729 < _list727.size; ++_i729) + org.apache.thrift.protocol.TList _list735 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.component = new ArrayList(_list735.size); + LockComponent _elem736; + for (int _i737 = 0; _i737 < _list735.size; ++_i737) { - _elem728 = new LockComponent(); - _elem728.read(iprot); - struct.component.add(_elem728); + _elem736 = new LockComponent(); + _elem736.read(iprot); + struct.component.add(_elem736); } } struct.setComponentIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java index c2207eb654..127b84f9d6 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java @@ -525,13 +525,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventRe case 3: // EVENT_TYPE_SKIP_LIST if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list780 = iprot.readListBegin(); - struct.eventTypeSkipList = new ArrayList(_list780.size); - String _elem781; - for (int _i782 = 0; _i782 < _list780.size; ++_i782) + org.apache.thrift.protocol.TList _list788 = iprot.readListBegin(); + struct.eventTypeSkipList = new ArrayList(_list788.size); + String _elem789; + for (int _i790 = 0; _i790 < _list788.size; ++_i790) { - _elem781 = iprot.readString(); - struct.eventTypeSkipList.add(_elem781); + _elem789 = iprot.readString(); + struct.eventTypeSkipList.add(_elem789); } iprot.readListEnd(); } @@ -566,9 +566,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEventR oprot.writeFieldBegin(EVENT_TYPE_SKIP_LIST_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.eventTypeSkipList.size())); - for (String _iter783 : struct.eventTypeSkipList) + for (String _iter791 : struct.eventTypeSkipList) { - oprot.writeString(_iter783); + oprot.writeString(_iter791); } oprot.writeListEnd(); } @@ -607,9 +607,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe if (struct.isSetEventTypeSkipList()) { { oprot.writeI32(struct.eventTypeSkipList.size()); - for (String _iter784 : struct.eventTypeSkipList) + for (String _iter792 : struct.eventTypeSkipList) { - oprot.writeString(_iter784); + oprot.writeString(_iter792); } } } @@ -627,13 +627,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventReq } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.eventTypeSkipList = new ArrayList(_list785.size); - String _elem786; - for (int _i787 = 0; _i787 < _list785.size; ++_i787) + org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.eventTypeSkipList = new ArrayList(_list793.size); + String _elem794; + for (int _i795 = 0; _i795 < _list793.size; ++_i795) { - _elem786 = iprot.readString(); - struct.eventTypeSkipList.add(_elem786); + _elem794 = iprot.readString(); + struct.eventTypeSkipList.add(_elem794); } } struct.setEventTypeSkipListIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java index b1595a7d32..87f6b1fad2 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventRe case 1: // EVENTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list788 = iprot.readListBegin(); - struct.events = new ArrayList(_list788.size); - NotificationEvent _elem789; - for (int _i790 = 0; _i790 < _list788.size; ++_i790) + org.apache.thrift.protocol.TList _list796 = iprot.readListBegin(); + struct.events = new ArrayList(_list796.size); + NotificationEvent _elem797; + for (int _i798 = 0; _i798 < _list796.size; ++_i798) { - _elem789 = new NotificationEvent(); - _elem789.read(iprot); - struct.events.add(_elem789); + _elem797 = new NotificationEvent(); + _elem797.read(iprot); + struct.events.add(_elem797); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEventR oprot.writeFieldBegin(EVENTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.events.size())); - for (NotificationEvent _iter791 : struct.events) + for (NotificationEvent _iter799 : struct.events) { - _iter791.write(oprot); + _iter799.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.events.size()); - for (NotificationEvent _iter792 : struct.events) + for (NotificationEvent _iter800 : struct.events) { - _iter792.write(oprot); + _iter800.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.events = new ArrayList(_list793.size); - NotificationEvent _elem794; - for (int _i795 = 0; _i795 < _list793.size; ++_i795) + org.apache.thrift.protocol.TList _list801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.events = new ArrayList(_list801.size); + NotificationEvent _elem802; + for (int _i803 = 0; _i803 < _list801.size; ++_i803) { - _elem794 = new NotificationEvent(); - _elem794.read(iprot); - struct.events.add(_elem794); + _elem802 = new NotificationEvent(); + _elem802.read(iprot); + struct.events.add(_elem802); } } struct.setEventsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java index 92fb2218f2..7500acfa7a 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java @@ -47,6 +47,7 @@ private static final org.apache.thrift.protocol.TField ASCENDING_FIELD_DESC = new org.apache.thrift.protocol.TField("ascending", org.apache.thrift.protocol.TType.BOOL, (short)7); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxParts", org.apache.thrift.protocol.TType.I64, (short)8); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)10); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -63,6 +64,7 @@ private boolean ascending; // optional private long maxParts; // optional private String catName; // optional + private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -74,7 +76,8 @@ PARTITION_ORDER((short)6, "partitionOrder"), ASCENDING((short)7, "ascending"), MAX_PARTS((short)8, "maxParts"), - CAT_NAME((short)9, "catName"); + CAT_NAME((short)9, "catName"), + VALID_WRITE_ID_LIST((short)10, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -107,6 +110,8 @@ public static _Fields findByThriftId(int fieldId) { return MAX_PARTS; case 9: // CAT_NAME return CAT_NAME; + case 10: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -151,7 +156,7 @@ public String getFieldName() { private static final int __ASCENDING_ISSET_ID = 1; private static final int __MAXPARTS_ISSET_ID = 2; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.APPLY_DISTINCT,_Fields.FILTER,_Fields.PARTITION_ORDER,_Fields.ASCENDING,_Fields.MAX_PARTS,_Fields.CAT_NAME}; + private static final _Fields optionals[] = {_Fields.APPLY_DISTINCT,_Fields.FILTER,_Fields.PARTITION_ORDER,_Fields.ASCENDING,_Fields.MAX_PARTS,_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -175,6 +180,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionValuesRequest.class, metaDataMap); } @@ -233,6 +240,9 @@ public PartitionValuesRequest(PartitionValuesRequest other) { if (other.isSetCatName()) { this.catName = other.catName; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public PartitionValuesRequest deepCopy() { @@ -253,6 +263,7 @@ public void clear() { this.maxParts = -1L; this.catName = null; + this.validWriteIdList = null; } public String getDbName() { @@ -489,6 +500,29 @@ public void setCatNameIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -563,6 +597,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -595,6 +637,9 @@ public Object getFieldValue(_Fields field) { case CAT_NAME: return getCatName(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -624,6 +669,8 @@ public boolean isSet(_Fields field) { return isSetMaxParts(); case CAT_NAME: return isSetCatName(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -722,6 +769,15 @@ public boolean equals(PartitionValuesRequest that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -774,6 +830,11 @@ public int hashCode() { if (present_catName) list.add(catName); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -875,6 +936,16 @@ public int compareTo(PartitionValuesRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -966,6 +1037,16 @@ public String toString() { } first = false; } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1117,6 +1198,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesRequ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 10: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1195,6 +1284,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesReq oprot.writeFieldEnd(); } } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1240,7 +1336,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRequ if (struct.isSetCatName()) { optionals.set(5); } - oprot.writeBitSet(optionals, 6); + if (struct.isSetValidWriteIdList()) { + optionals.set(6); + } + oprot.writeBitSet(optionals, 7); if (struct.isSetApplyDistinct()) { oprot.writeBool(struct.applyDistinct); } @@ -1265,6 +1364,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRequ if (struct.isSetCatName()) { oprot.writeString(struct.catName); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override @@ -1286,7 +1388,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesReque } } struct.setPartitionKeysIsSet(true); - BitSet incoming = iprot.readBitSet(6); + BitSet incoming = iprot.readBitSet(7); if (incoming.get(0)) { struct.applyDistinct = iprot.readBool(); struct.setApplyDistinctIsSet(true); @@ -1321,6 +1423,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesReque struct.catName = iprot.readString(); struct.setCatNameIsSet(true); } + if (incoming.get(6)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java index 0e72625e01..0046f6a5ea 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java @@ -44,6 +44,7 @@ private static final org.apache.thrift.protocol.TField DEFAULT_PARTITION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("defaultPartitionName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxParts", org.apache.thrift.protocol.TType.I16, (short)5); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)7); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -57,6 +58,7 @@ private String defaultPartitionName; // optional private short maxParts; // optional private String catName; // optional + private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -65,7 +67,8 @@ EXPR((short)3, "expr"), DEFAULT_PARTITION_NAME((short)4, "defaultPartitionName"), MAX_PARTS((short)5, "maxParts"), - CAT_NAME((short)6, "catName"); + CAT_NAME((short)6, "catName"), + VALID_WRITE_ID_LIST((short)7, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -92,6 +95,8 @@ public static _Fields findByThriftId(int fieldId) { return MAX_PARTS; case 6: // CAT_NAME return CAT_NAME; + case 7: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -134,7 +139,7 @@ public String getFieldName() { // isset id assignments private static final int __MAXPARTS_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.DEFAULT_PARTITION_NAME,_Fields.MAX_PARTS,_Fields.CAT_NAME}; + private static final _Fields optionals[] = {_Fields.DEFAULT_PARTITION_NAME,_Fields.MAX_PARTS,_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -150,6 +155,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsByExprRequest.class, metaDataMap); } @@ -191,6 +198,9 @@ public PartitionsByExprRequest(PartitionsByExprRequest other) { if (other.isSetCatName()) { this.catName = other.catName; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public PartitionsByExprRequest deepCopy() { @@ -206,6 +216,7 @@ public void clear() { this.maxParts = (short)-1; this.catName = null; + this.validWriteIdList = null; } public String getDbName() { @@ -354,6 +365,29 @@ public void setCatNameIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -404,6 +438,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -427,6 +469,9 @@ public Object getFieldValue(_Fields field) { case CAT_NAME: return getCatName(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -450,6 +495,8 @@ public boolean isSet(_Fields field) { return isSetMaxParts(); case CAT_NAME: return isSetCatName(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -521,6 +568,15 @@ public boolean equals(PartitionsByExprRequest that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -558,6 +614,11 @@ public int hashCode() { if (present_catName) list.add(catName); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -629,6 +690,16 @@ public int compareTo(PartitionsByExprRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -698,6 +769,16 @@ public String toString() { } first = false; } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -803,6 +884,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsByExprReq org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -850,6 +939,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsByExprRe oprot.writeFieldEnd(); } } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -880,7 +976,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprReq if (struct.isSetCatName()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidWriteIdList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDefaultPartitionName()) { oprot.writeString(struct.defaultPartitionName); } @@ -890,6 +989,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprReq if (struct.isSetCatName()) { oprot.writeString(struct.catName); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override @@ -901,7 +1003,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRequ struct.setTblNameIsSet(true); struct.expr = iprot.readBinary(); struct.setExprIsSet(true); - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.defaultPartitionName = iprot.readString(); struct.setDefaultPartitionNameIsSet(true); @@ -914,6 +1016,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRequ struct.catName = iprot.readString(); struct.setCatNameIsSet(true); } + if (incoming.get(3)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java index 40dab8c6d1..c78213958d 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java @@ -547,13 +547,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list872 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list872.size); - long _elem873; - for (int _i874 = 0; _i874 < _list872.size; ++_i874) + org.apache.thrift.protocol.TList _list880 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list880.size); + long _elem881; + for (int _i882 = 0; _i882 < _list880.size; ++_i882) { - _elem873 = iprot.readI64(); - struct.fileIds.add(_elem873); + _elem881 = iprot.readI64(); + struct.fileIds.add(_elem881); } iprot.readListEnd(); } @@ -565,13 +565,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 2: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list875 = iprot.readListBegin(); - struct.metadata = new ArrayList(_list875.size); - ByteBuffer _elem876; - for (int _i877 = 0; _i877 < _list875.size; ++_i877) + org.apache.thrift.protocol.TList _list883 = iprot.readListBegin(); + struct.metadata = new ArrayList(_list883.size); + ByteBuffer _elem884; + for (int _i885 = 0; _i885 < _list883.size; ++_i885) { - _elem876 = iprot.readBinary(); - struct.metadata.add(_elem876); + _elem884 = iprot.readBinary(); + struct.metadata.add(_elem884); } iprot.readListEnd(); } @@ -605,9 +605,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter878 : struct.fileIds) + for (long _iter886 : struct.fileIds) { - oprot.writeI64(_iter878); + oprot.writeI64(_iter886); } oprot.writeListEnd(); } @@ -617,9 +617,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (ByteBuffer _iter879 : struct.metadata) + for (ByteBuffer _iter887 : struct.metadata) { - oprot.writeBinary(_iter879); + oprot.writeBinary(_iter887); } oprot.writeListEnd(); } @@ -651,16 +651,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter880 : struct.fileIds) + for (long _iter888 : struct.fileIds) { - oprot.writeI64(_iter880); + oprot.writeI64(_iter888); } } { oprot.writeI32(struct.metadata.size()); - for (ByteBuffer _iter881 : struct.metadata) + for (ByteBuffer _iter889 : struct.metadata) { - oprot.writeBinary(_iter881); + oprot.writeBinary(_iter889); } } BitSet optionals = new BitSet(); @@ -677,24 +677,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list882 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list882.size); - long _elem883; - for (int _i884 = 0; _i884 < _list882.size; ++_i884) + org.apache.thrift.protocol.TList _list890 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list890.size); + long _elem891; + for (int _i892 = 0; _i892 < _list890.size; ++_i892) { - _elem883 = iprot.readI64(); - struct.fileIds.add(_elem883); + _elem891 = iprot.readI64(); + struct.fileIds.add(_elem891); } } struct.setFileIdsIsSet(true); { - org.apache.thrift.protocol.TList _list885 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new ArrayList(_list885.size); - ByteBuffer _elem886; - for (int _i887 = 0; _i887 < _list885.size; ++_i887) + org.apache.thrift.protocol.TList _list893 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new ArrayList(_list893.size); + ByteBuffer _elem894; + for (int _i895 = 0; _i895 < _list893.size; ++_i895) { - _elem886 = iprot.readBinary(); - struct.metadata.add(_elem886); + _elem894 = iprot.readBinary(); + struct.metadata.add(_elem894); } } struct.setMetadataIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java index de467c298f..75842e14ea 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java @@ -796,13 +796,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, RenamePartitionRequ case 4: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1112 = iprot.readListBegin(); - struct.partVals = new ArrayList(_list1112.size); - String _elem1113; - for (int _i1114 = 0; _i1114 < _list1112.size; ++_i1114) + org.apache.thrift.protocol.TList _list1120 = iprot.readListBegin(); + struct.partVals = new ArrayList(_list1120.size); + String _elem1121; + for (int _i1122 = 0; _i1122 < _list1120.size; ++_i1122) { - _elem1113 = iprot.readString(); - struct.partVals.add(_elem1113); + _elem1121 = iprot.readString(); + struct.partVals.add(_elem1121); } iprot.readListEnd(); } @@ -862,9 +862,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, RenamePartitionReq oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partVals.size())); - for (String _iter1115 : struct.partVals) + for (String _iter1123 : struct.partVals) { - oprot.writeString(_iter1115); + oprot.writeString(_iter1123); } oprot.writeListEnd(); } @@ -903,9 +903,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, RenamePartitionRequ oprot.writeString(struct.tableName); { oprot.writeI32(struct.partVals.size()); - for (String _iter1116 : struct.partVals) + for (String _iter1124 : struct.partVals) { - oprot.writeString(_iter1116); + oprot.writeString(_iter1124); } } struct.newPart.write(oprot); @@ -933,13 +933,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, RenamePartitionReque struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); { - org.apache.thrift.protocol.TList _list1117 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partVals = new ArrayList(_list1117.size); - String _elem1118; - for (int _i1119 = 0; _i1119 < _list1117.size; ++_i1119) + org.apache.thrift.protocol.TList _list1125 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partVals = new ArrayList(_list1125.size); + String _elem1126; + for (int _i1127 = 0; _i1127 < _list1125.size; ++_i1127) { - _elem1118 = iprot.readString(); - struct.partVals.add(_elem1118); + _elem1126 = iprot.readString(); + struct.partVals.add(_elem1126); } } struct.setPartValsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java index 09fcd476e9..31224876fb 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java @@ -1119,14 +1119,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SchemaVersion struc case 4: // COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1032 = iprot.readListBegin(); - struct.cols = new ArrayList(_list1032.size); - FieldSchema _elem1033; - for (int _i1034 = 0; _i1034 < _list1032.size; ++_i1034) + org.apache.thrift.protocol.TList _list1040 = iprot.readListBegin(); + struct.cols = new ArrayList(_list1040.size); + FieldSchema _elem1041; + for (int _i1042 = 0; _i1042 < _list1040.size; ++_i1042) { - _elem1033 = new FieldSchema(); - _elem1033.read(iprot); - struct.cols.add(_elem1033); + _elem1041 = new FieldSchema(); + _elem1041.read(iprot); + struct.cols.add(_elem1041); } iprot.readListEnd(); } @@ -1212,9 +1212,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SchemaVersion stru oprot.writeFieldBegin(COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.cols.size())); - for (FieldSchema _iter1035 : struct.cols) + for (FieldSchema _iter1043 : struct.cols) { - _iter1035.write(oprot); + _iter1043.write(oprot); } oprot.writeListEnd(); } @@ -1323,9 +1323,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SchemaVersion struc if (struct.isSetCols()) { { oprot.writeI32(struct.cols.size()); - for (FieldSchema _iter1036 : struct.cols) + for (FieldSchema _iter1044 : struct.cols) { - _iter1036.write(oprot); + _iter1044.write(oprot); } } } @@ -1368,14 +1368,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SchemaVersion struct } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list1037 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.cols = new ArrayList(_list1037.size); - FieldSchema _elem1038; - for (int _i1039 = 0; _i1039 < _list1037.size; ++_i1039) + org.apache.thrift.protocol.TList _list1045 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.cols = new ArrayList(_list1045.size); + FieldSchema _elem1046; + for (int _i1047 = 0; _i1047 < _list1045.size; ++_i1047) { - _elem1038 = new FieldSchema(); - _elem1038.read(iprot); - struct.cols.add(_elem1038); + _elem1046 = new FieldSchema(); + _elem1046.read(iprot); + struct.cols.add(_elem1046); } } struct.setColsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java index ef76095eda..8fae376808 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowCompactResponse case 1: // COMPACTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list764 = iprot.readListBegin(); - struct.compacts = new ArrayList(_list764.size); - ShowCompactResponseElement _elem765; - for (int _i766 = 0; _i766 < _list764.size; ++_i766) + org.apache.thrift.protocol.TList _list772 = iprot.readListBegin(); + struct.compacts = new ArrayList(_list772.size); + ShowCompactResponseElement _elem773; + for (int _i774 = 0; _i774 < _list772.size; ++_i774) { - _elem765 = new ShowCompactResponseElement(); - _elem765.read(iprot); - struct.compacts.add(_elem765); + _elem773 = new ShowCompactResponseElement(); + _elem773.read(iprot); + struct.compacts.add(_elem773); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowCompactRespons oprot.writeFieldBegin(COMPACTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.compacts.size())); - for (ShowCompactResponseElement _iter767 : struct.compacts) + for (ShowCompactResponseElement _iter775 : struct.compacts) { - _iter767.write(oprot); + _iter775.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.compacts.size()); - for (ShowCompactResponseElement _iter768 : struct.compacts) + for (ShowCompactResponseElement _iter776 : struct.compacts) { - _iter768.write(oprot); + _iter776.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse public void read(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.compacts = new ArrayList(_list769.size); - ShowCompactResponseElement _elem770; - for (int _i771 = 0; _i771 < _list769.size; ++_i771) + org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.compacts = new ArrayList(_list777.size); + ShowCompactResponseElement _elem778; + for (int _i779 = 0; _i779 < _list777.size; ++_i779) { - _elem770 = new ShowCompactResponseElement(); - _elem770.read(iprot); - struct.compacts.add(_elem770); + _elem778 = new ShowCompactResponseElement(); + _elem778.read(iprot); + struct.compacts.add(_elem778); } } struct.setCompactsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java index af3afc2087..025219365c 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowLocksResponse s case 1: // LOCKS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list730 = iprot.readListBegin(); - struct.locks = new ArrayList(_list730.size); - ShowLocksResponseElement _elem731; - for (int _i732 = 0; _i732 < _list730.size; ++_i732) + org.apache.thrift.protocol.TList _list738 = iprot.readListBegin(); + struct.locks = new ArrayList(_list738.size); + ShowLocksResponseElement _elem739; + for (int _i740 = 0; _i740 < _list738.size; ++_i740) { - _elem731 = new ShowLocksResponseElement(); - _elem731.read(iprot); - struct.locks.add(_elem731); + _elem739 = new ShowLocksResponseElement(); + _elem739.read(iprot); + struct.locks.add(_elem739); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowLocksResponse oprot.writeFieldBegin(LOCKS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.locks.size())); - for (ShowLocksResponseElement _iter733 : struct.locks) + for (ShowLocksResponseElement _iter741 : struct.locks) { - _iter733.write(oprot); + _iter741.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse s if (struct.isSetLocks()) { { oprot.writeI32(struct.locks.size()); - for (ShowLocksResponseElement _iter734 : struct.locks) + for (ShowLocksResponseElement _iter742 : struct.locks) { - _iter734.write(oprot); + _iter742.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse st BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list735 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.locks = new ArrayList(_list735.size); - ShowLocksResponseElement _elem736; - for (int _i737 = 0; _i737 < _list735.size; ++_i737) + org.apache.thrift.protocol.TList _list743 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.locks = new ArrayList(_list743.size); + ShowLocksResponseElement _elem744; + for (int _i745 = 0; _i745 < _list743.size; ++_i745) { - _elem736 = new ShowLocksResponseElement(); - _elem736.read(iprot); - struct.locks.add(_elem736); + _elem744 = new ShowLocksResponseElement(); + _elem744.read(iprot); + struct.locks.add(_elem744); } } struct.setLocksIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableWriteId.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableWriteId.java new file mode 100644 index 0000000000..1517721f50 --- /dev/null +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableWriteId.java @@ -0,0 +1,488 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class TableWriteId implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TableWriteId"); + + private static final org.apache.thrift.protocol.TField FULL_TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("fullTableName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TableWriteIdStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TableWriteIdTupleSchemeFactory()); + } + + private String fullTableName; // required + private long writeId; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + FULL_TABLE_NAME((short)1, "fullTableName"), + WRITE_ID((short)2, "writeId"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // FULL_TABLE_NAME + return FULL_TABLE_NAME; + case 2: // WRITE_ID + return WRITE_ID; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __WRITEID_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.FULL_TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("fullTableName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableWriteId.class, metaDataMap); + } + + public TableWriteId() { + } + + public TableWriteId( + String fullTableName, + long writeId) + { + this(); + this.fullTableName = fullTableName; + this.writeId = writeId; + setWriteIdIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public TableWriteId(TableWriteId other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetFullTableName()) { + this.fullTableName = other.fullTableName; + } + this.writeId = other.writeId; + } + + public TableWriteId deepCopy() { + return new TableWriteId(this); + } + + @Override + public void clear() { + this.fullTableName = null; + setWriteIdIsSet(false); + this.writeId = 0; + } + + public String getFullTableName() { + return this.fullTableName; + } + + public void setFullTableName(String fullTableName) { + this.fullTableName = fullTableName; + } + + public void unsetFullTableName() { + this.fullTableName = null; + } + + /** Returns true if field fullTableName is set (has been assigned a value) and false otherwise */ + public boolean isSetFullTableName() { + return this.fullTableName != null; + } + + public void setFullTableNameIsSet(boolean value) { + if (!value) { + this.fullTableName = null; + } + } + + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case FULL_TABLE_NAME: + if (value == null) { + unsetFullTableName(); + } else { + setFullTableName((String)value); + } + break; + + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case FULL_TABLE_NAME: + return getFullTableName(); + + case WRITE_ID: + return getWriteId(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case FULL_TABLE_NAME: + return isSetFullTableName(); + case WRITE_ID: + return isSetWriteId(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TableWriteId) + return this.equals((TableWriteId)that); + return false; + } + + public boolean equals(TableWriteId that) { + if (that == null) + return false; + + boolean this_present_fullTableName = true && this.isSetFullTableName(); + boolean that_present_fullTableName = true && that.isSetFullTableName(); + if (this_present_fullTableName || that_present_fullTableName) { + if (!(this_present_fullTableName && that_present_fullTableName)) + return false; + if (!this.fullTableName.equals(that.fullTableName)) + return false; + } + + boolean this_present_writeId = true; + boolean that_present_writeId = true; + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_fullTableName = true && (isSetFullTableName()); + list.add(present_fullTableName); + if (present_fullTableName) + list.add(fullTableName); + + boolean present_writeId = true; + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + + return list.hashCode(); + } + + @Override + public int compareTo(TableWriteId other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetFullTableName()).compareTo(other.isSetFullTableName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFullTableName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fullTableName, other.fullTableName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TableWriteId("); + boolean first = true; + + sb.append("fullTableName:"); + if (this.fullTableName == null) { + sb.append("null"); + } else { + sb.append(this.fullTableName); + } + first = false; + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetFullTableName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'fullTableName' is unset! Struct:" + toString()); + } + + if (!isSetWriteId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'writeId' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TableWriteIdStandardSchemeFactory implements SchemeFactory { + public TableWriteIdStandardScheme getScheme() { + return new TableWriteIdStandardScheme(); + } + } + + private static class TableWriteIdStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TableWriteId struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // FULL_TABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.fullTableName = iprot.readString(); + struct.setFullTableNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TableWriteId struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.fullTableName != null) { + oprot.writeFieldBegin(FULL_TABLE_NAME_FIELD_DESC); + oprot.writeString(struct.fullTableName); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TableWriteIdTupleSchemeFactory implements SchemeFactory { + public TableWriteIdTupleScheme getScheme() { + return new TableWriteIdTupleScheme(); + } + } + + private static class TableWriteIdTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TableWriteId struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.fullTableName); + oprot.writeI64(struct.writeId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TableWriteId struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.fullTableName = iprot.readString(); + struct.setFullTableNameIsSet(true); + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + } + +} + diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index cd9af7eed2..98a650f9ab 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -76,13 +76,13 @@ public Map get_type_all(String name) throws MetaException, org.apache.thrift.TException; - public List get_fields(String db_name, String table_name) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException; + public List get_fields(String db_name, String table_name, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException; - public List get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException; + public List get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException; - public List get_schema(String db_name, String table_name) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException; + public List get_schema(String db_name, String table_name, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException; - public List get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException; + public List get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException; public void create_table(Table tbl) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException; @@ -126,7 +126,7 @@ public List get_all_tables(String db_name) throws MetaException, org.apache.thrift.TException; - public Table get_table(String dbname, String tbl_name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public Table get_table(String dbname, String tbl_name, String validWriteIdList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; public List
get_table_objects_by_name(String dbname, List tbl_names) throws org.apache.thrift.TException; @@ -178,41 +178,41 @@ public DropPartitionsResult drop_partitions_req(DropPartitionsRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; - public Partition get_partition(String db_name, String tbl_name, List part_vals) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public Partition get_partition(String db_name, String tbl_name, List part_vals, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; public Partition exchange_partition(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException; public List exchange_partitions(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException; - public Partition get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public Partition get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public Partition get_partition_by_name(String db_name, String tbl_name, String part_name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public Partition get_partition_by_name(String db_name, String tbl_name, String part_name, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public List get_partitions(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + public List get_partitions(String db_name, String tbl_name, short max_parts, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; - public List get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + public List get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; - public List get_partitions_pspec(String db_name, String tbl_name, int max_parts) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + public List get_partitions_pspec(String db_name, String tbl_name, int max_parts, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; - public List get_partition_names(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + public List get_partition_names(String db_name, String tbl_name, short max_parts, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; public PartitionValuesResponse get_partition_values(PartitionValuesRequest request) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public List get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public List get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public List get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + public List get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; - public List get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public List get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public List get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public List get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public List get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public List get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; public PartitionsByExprResult get_partitions_by_expr(PartitionsByExprRequest req) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public int get_num_partitions_by_filter(String db_name, String tbl_name, String filter) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public int get_num_partitions_by_filter(String db_name, String tbl_name, String filter, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public List get_partitions_by_names(String db_name, String tbl_name, List names) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public List get_partitions_by_names(String db_name, String tbl_name, List names, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; public GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNamesRequest req) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; @@ -262,9 +262,9 @@ public SetPartitionsStatsResponse update_partition_column_statistics_req(SetPartitionsStatsRequest req) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException; - public ColumnStatistics get_table_column_statistics(String db_name, String tbl_name, String col_name) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException; + public ColumnStatistics get_table_column_statistics(String db_name, String tbl_name, String col_name, String validWriteIdList) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException; - public ColumnStatistics get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException; + public ColumnStatistics get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, String validWriteIdList) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException; public TableStatsResult get_table_statistics_req(TableStatsRequest request) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; @@ -532,13 +532,13 @@ public void get_type_all(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_fields(String db_name, String table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_fields(String db_name, String table_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_schema(String db_name, String table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_schema(String db_name, String table_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void create_table(Table tbl, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -582,7 +582,7 @@ public void get_all_tables(String db_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_table(String dbname, String tbl_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_table(String dbname, String tbl_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_table_objects_by_name(String dbname, List tbl_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -634,41 +634,41 @@ public void drop_partitions_req(DropPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partition(String db_name, String tbl_name, List part_vals, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partition(String db_name, String tbl_name, List part_vals, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void exchange_partition(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void exchange_partitions(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partition_by_name(String db_name, String tbl_name, String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partition_by_name(String db_name, String tbl_name, String part_name, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partitions(String db_name, String tbl_name, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partitions(String db_name, String tbl_name, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partitions_pspec(String db_name, String tbl_name, int max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partitions_pspec(String db_name, String tbl_name, int max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partition_names(String db_name, String tbl_name, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partition_names(String db_name, String tbl_name, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_partition_values(PartitionValuesRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_partitions_by_expr(PartitionsByExprRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_num_partitions_by_filter(String db_name, String tbl_name, String filter, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_num_partitions_by_filter(String db_name, String tbl_name, String filter, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partitions_by_names(String db_name, String tbl_name, List names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partitions_by_names(String db_name, String tbl_name, List names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_partitions_by_names_req(GetPartitionsByNamesRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -718,9 +718,9 @@ public void update_partition_column_statistics_req(SetPartitionsStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_table_column_statistics(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_table_column_statistics(String db_name, String tbl_name, String col_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_table_statistics_req(TableStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -1446,17 +1446,18 @@ public void send_get_type_all(String name) throws org.apache.thrift.TException throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_type_all failed: unknown result"); } - public List get_fields(String db_name, String table_name) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException + public List get_fields(String db_name, String table_name, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException { - send_get_fields(db_name, table_name); + send_get_fields(db_name, table_name, validWriteIdList); return recv_get_fields(); } - public void send_get_fields(String db_name, String table_name) throws org.apache.thrift.TException + public void send_get_fields(String db_name, String table_name, String validWriteIdList) throws org.apache.thrift.TException { get_fields_args args = new get_fields_args(); args.setDb_name(db_name); args.setTable_name(table_name); + args.setValidWriteIdList(validWriteIdList); sendBase("get_fields", args); } @@ -1479,18 +1480,19 @@ public void send_get_fields(String db_name, String table_name) throws org.apache throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_fields failed: unknown result"); } - public List get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException + public List get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException { - send_get_fields_with_environment_context(db_name, table_name, environment_context); + send_get_fields_with_environment_context(db_name, table_name, environment_context, validWriteIdList); return recv_get_fields_with_environment_context(); } - public void send_get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context) throws org.apache.thrift.TException + public void send_get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList) throws org.apache.thrift.TException { get_fields_with_environment_context_args args = new get_fields_with_environment_context_args(); args.setDb_name(db_name); args.setTable_name(table_name); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); sendBase("get_fields_with_environment_context", args); } @@ -1513,17 +1515,18 @@ public void send_get_fields_with_environment_context(String db_name, String tabl throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_fields_with_environment_context failed: unknown result"); } - public List get_schema(String db_name, String table_name) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException + public List get_schema(String db_name, String table_name, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException { - send_get_schema(db_name, table_name); + send_get_schema(db_name, table_name, validWriteIdList); return recv_get_schema(); } - public void send_get_schema(String db_name, String table_name) throws org.apache.thrift.TException + public void send_get_schema(String db_name, String table_name, String validWriteIdList) throws org.apache.thrift.TException { get_schema_args args = new get_schema_args(); args.setDb_name(db_name); args.setTable_name(table_name); + args.setValidWriteIdList(validWriteIdList); sendBase("get_schema", args); } @@ -1546,18 +1549,19 @@ public void send_get_schema(String db_name, String table_name) throws org.apache throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_schema failed: unknown result"); } - public List get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException + public List get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException { - send_get_schema_with_environment_context(db_name, table_name, environment_context); + send_get_schema_with_environment_context(db_name, table_name, environment_context, validWriteIdList); return recv_get_schema_with_environment_context(); } - public void send_get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context) throws org.apache.thrift.TException + public void send_get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList) throws org.apache.thrift.TException { get_schema_with_environment_context_args args = new get_schema_with_environment_context_args(); args.setDb_name(db_name); args.setTable_name(table_name); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); sendBase("get_schema_with_environment_context", args); } @@ -2165,17 +2169,18 @@ public void send_get_all_tables(String db_name) throws org.apache.thrift.TExcept throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_all_tables failed: unknown result"); } - public Table get_table(String dbname, String tbl_name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + public Table get_table(String dbname, String tbl_name, String validWriteIdList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_get_table(dbname, tbl_name); + send_get_table(dbname, tbl_name, validWriteIdList); return recv_get_table(); } - public void send_get_table(String dbname, String tbl_name) throws org.apache.thrift.TException + public void send_get_table(String dbname, String tbl_name, String validWriteIdList) throws org.apache.thrift.TException { get_table_args args = new get_table_args(); args.setDbname(dbname); args.setTbl_name(tbl_name); + args.setValidWriteIdList(validWriteIdList); sendBase("get_table", args); } @@ -2978,18 +2983,19 @@ public DropPartitionsResult recv_drop_partitions_req() throws NoSuchObjectExcept throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "drop_partitions_req failed: unknown result"); } - public Partition get_partition(String db_name, String tbl_name, List part_vals) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + public Partition get_partition(String db_name, String tbl_name, List part_vals, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_get_partition(db_name, tbl_name, part_vals); + send_get_partition(db_name, tbl_name, part_vals, validTxnList); return recv_get_partition(); } - public void send_get_partition(String db_name, String tbl_name, List part_vals) throws org.apache.thrift.TException + public void send_get_partition(String db_name, String tbl_name, List part_vals, String validTxnList) throws org.apache.thrift.TException { get_partition_args args = new get_partition_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_vals(part_vals); + args.setValidTxnList(validTxnList); sendBase("get_partition", args); } @@ -3087,13 +3093,13 @@ public void send_exchange_partitions(Map partitionSpecs, String s throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "exchange_partitions failed: unknown result"); } - public Partition get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + public Partition get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names); + send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names, validTxnList); return recv_get_partition_with_auth(); } - public void send_get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names) throws org.apache.thrift.TException + public void send_get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names, String validTxnList) throws org.apache.thrift.TException { get_partition_with_auth_args args = new get_partition_with_auth_args(); args.setDb_name(db_name); @@ -3101,6 +3107,7 @@ public void send_get_partition_with_auth(String db_name, String tbl_name, List get_partitions(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, org.apache.thrift.TException + public List get_partitions(String db_name, String tbl_name, short max_parts, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException { - send_get_partitions(db_name, tbl_name, max_parts); + send_get_partitions(db_name, tbl_name, max_parts, validTxnList); return recv_get_partitions(); } - public void send_get_partitions(String db_name, String tbl_name, short max_parts) throws org.apache.thrift.TException + public void send_get_partitions(String db_name, String tbl_name, short max_parts, String validTxnList) throws org.apache.thrift.TException { get_partitions_args args = new get_partitions_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); sendBase("get_partitions", args); } @@ -3182,13 +3191,13 @@ public void send_get_partitions(String db_name, String tbl_name, short max_parts throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions failed: unknown result"); } - public List get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names) throws NoSuchObjectException, MetaException, org.apache.thrift.TException + public List get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException { - send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names); + send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names, validTxnList); return recv_get_partitions_with_auth(); } - public void send_get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names) throws org.apache.thrift.TException + public void send_get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names, String validTxnList) throws org.apache.thrift.TException { get_partitions_with_auth_args args = new get_partitions_with_auth_args(); args.setDb_name(db_name); @@ -3196,6 +3205,7 @@ public void send_get_partitions_with_auth(String db_name, String tbl_name, short args.setMax_parts(max_parts); args.setUser_name(user_name); args.setGroup_names(group_names); + args.setValidTxnList(validTxnList); sendBase("get_partitions_with_auth", args); } @@ -3215,18 +3225,19 @@ public void send_get_partitions_with_auth(String db_name, String tbl_name, short throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions_with_auth failed: unknown result"); } - public List get_partitions_pspec(String db_name, String tbl_name, int max_parts) throws NoSuchObjectException, MetaException, org.apache.thrift.TException + public List get_partitions_pspec(String db_name, String tbl_name, int max_parts, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException { - send_get_partitions_pspec(db_name, tbl_name, max_parts); + send_get_partitions_pspec(db_name, tbl_name, max_parts, validTxnList); return recv_get_partitions_pspec(); } - public void send_get_partitions_pspec(String db_name, String tbl_name, int max_parts) throws org.apache.thrift.TException + public void send_get_partitions_pspec(String db_name, String tbl_name, int max_parts, String validTxnList) throws org.apache.thrift.TException { get_partitions_pspec_args args = new get_partitions_pspec_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); sendBase("get_partitions_pspec", args); } @@ -3246,18 +3257,19 @@ public void send_get_partitions_pspec(String db_name, String tbl_name, int max_p throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions_pspec failed: unknown result"); } - public List get_partition_names(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, org.apache.thrift.TException + public List get_partition_names(String db_name, String tbl_name, short max_parts, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException { - send_get_partition_names(db_name, tbl_name, max_parts); + send_get_partition_names(db_name, tbl_name, max_parts, validTxnList); return recv_get_partition_names(); } - public void send_get_partition_names(String db_name, String tbl_name, short max_parts) throws org.apache.thrift.TException + public void send_get_partition_names(String db_name, String tbl_name, short max_parts, String validTxnList) throws org.apache.thrift.TException { get_partition_names_args args = new get_partition_names_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); sendBase("get_partition_names", args); } @@ -3306,19 +3318,20 @@ public PartitionValuesResponse recv_get_partition_values() throws MetaException, throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partition_values failed: unknown result"); } - public List get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + public List get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts); + send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts, validTxnList); return recv_get_partitions_ps(); } - public void send_get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws org.apache.thrift.TException + public void send_get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList) throws org.apache.thrift.TException { get_partitions_ps_args args = new get_partitions_ps_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_vals(part_vals); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); sendBase("get_partitions_ps", args); } @@ -3338,13 +3351,13 @@ public void send_get_partitions_ps(String db_name, String tbl_name, List throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions_ps failed: unknown result"); } - public List get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names) throws NoSuchObjectException, MetaException, org.apache.thrift.TException + public List get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException { - send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names); + send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names, validTxnList); return recv_get_partitions_ps_with_auth(); } - public void send_get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names) throws org.apache.thrift.TException + public void send_get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names, String validTxnList) throws org.apache.thrift.TException { get_partitions_ps_with_auth_args args = new get_partitions_ps_with_auth_args(); args.setDb_name(db_name); @@ -3353,6 +3366,7 @@ public void send_get_partitions_ps_with_auth(String db_name, String tbl_name, Li args.setMax_parts(max_parts); args.setUser_name(user_name); args.setGroup_names(group_names); + args.setValidTxnList(validTxnList); sendBase("get_partitions_ps_with_auth", args); } @@ -3372,19 +3386,20 @@ public void send_get_partitions_ps_with_auth(String db_name, String tbl_name, Li throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions_ps_with_auth failed: unknown result"); } - public List get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + public List get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts); + send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts, validTxnList); return recv_get_partition_names_ps(); } - public void send_get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws org.apache.thrift.TException + public void send_get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList) throws org.apache.thrift.TException { get_partition_names_ps_args args = new get_partition_names_ps_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_vals(part_vals); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); sendBase("get_partition_names_ps", args); } @@ -3404,19 +3419,20 @@ public void send_get_partition_names_ps(String db_name, String tbl_name, List get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + public List get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts); + send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts, validTxnList); return recv_get_partitions_by_filter(); } - public void send_get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts) throws org.apache.thrift.TException + public void send_get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, String validTxnList) throws org.apache.thrift.TException { get_partitions_by_filter_args args = new get_partitions_by_filter_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setFilter(filter); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); sendBase("get_partitions_by_filter", args); } @@ -3436,19 +3452,20 @@ public void send_get_partitions_by_filter(String db_name, String tbl_name, Strin throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions_by_filter failed: unknown result"); } - public List get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + public List get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts); + send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts, validTxnList); return recv_get_part_specs_by_filter(); } - public void send_get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts) throws org.apache.thrift.TException + public void send_get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, String validTxnList) throws org.apache.thrift.TException { get_part_specs_by_filter_args args = new get_part_specs_by_filter_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setFilter(filter); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); sendBase("get_part_specs_by_filter", args); } @@ -3497,18 +3514,19 @@ public PartitionsByExprResult recv_get_partitions_by_expr() throws MetaException throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions_by_expr failed: unknown result"); } - public int get_num_partitions_by_filter(String db_name, String tbl_name, String filter) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + public int get_num_partitions_by_filter(String db_name, String tbl_name, String filter, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_get_num_partitions_by_filter(db_name, tbl_name, filter); + send_get_num_partitions_by_filter(db_name, tbl_name, filter, validTxnList); return recv_get_num_partitions_by_filter(); } - public void send_get_num_partitions_by_filter(String db_name, String tbl_name, String filter) throws org.apache.thrift.TException + public void send_get_num_partitions_by_filter(String db_name, String tbl_name, String filter, String validTxnList) throws org.apache.thrift.TException { get_num_partitions_by_filter_args args = new get_num_partitions_by_filter_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setFilter(filter); + args.setValidTxnList(validTxnList); sendBase("get_num_partitions_by_filter", args); } @@ -3528,18 +3546,19 @@ public int recv_get_num_partitions_by_filter() throws MetaException, NoSuchObjec throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_num_partitions_by_filter failed: unknown result"); } - public List get_partitions_by_names(String db_name, String tbl_name, List names) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + public List get_partitions_by_names(String db_name, String tbl_name, List names, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_get_partitions_by_names(db_name, tbl_name, names); + send_get_partitions_by_names(db_name, tbl_name, names, validTxnList); return recv_get_partitions_by_names(); } - public void send_get_partitions_by_names(String db_name, String tbl_name, List names) throws org.apache.thrift.TException + public void send_get_partitions_by_names(String db_name, String tbl_name, List names, String validTxnList) throws org.apache.thrift.TException { get_partitions_by_names_args args = new get_partitions_by_names_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setNames(names); + args.setValidTxnList(validTxnList); sendBase("get_partitions_by_names", args); } @@ -4294,18 +4313,19 @@ public SetPartitionsStatsResponse recv_update_partition_column_statistics_req() throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "update_partition_column_statistics_req failed: unknown result"); } - public ColumnStatistics get_table_column_statistics(String db_name, String tbl_name, String col_name) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException + public ColumnStatistics get_table_column_statistics(String db_name, String tbl_name, String col_name, String validWriteIdList) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException { - send_get_table_column_statistics(db_name, tbl_name, col_name); + send_get_table_column_statistics(db_name, tbl_name, col_name, validWriteIdList); return recv_get_table_column_statistics(); } - public void send_get_table_column_statistics(String db_name, String tbl_name, String col_name) throws org.apache.thrift.TException + public void send_get_table_column_statistics(String db_name, String tbl_name, String col_name, String validWriteIdList) throws org.apache.thrift.TException { get_table_column_statistics_args args = new get_table_column_statistics_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setCol_name(col_name); + args.setValidWriteIdList(validWriteIdList); sendBase("get_table_column_statistics", args); } @@ -4331,19 +4351,20 @@ public ColumnStatistics recv_get_table_column_statistics() throws NoSuchObjectEx throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_table_column_statistics failed: unknown result"); } - public ColumnStatistics get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException + public ColumnStatistics get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, String validWriteIdList) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException { - send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name); + send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name, validWriteIdList); return recv_get_partition_column_statistics(); } - public void send_get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name) throws org.apache.thrift.TException + public void send_get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, String validWriteIdList) throws org.apache.thrift.TException { get_partition_column_statistics_args args = new get_partition_column_statistics_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_name(part_name); args.setCol_name(col_name); + args.setValidWriteIdList(validWriteIdList); sendBase("get_partition_column_statistics", args); } @@ -7986,9 +8007,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_fields(String db_name, String table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_fields(String db_name, String table_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_fields_call method_call = new get_fields_call(db_name, table_name, resultHandler, this, ___protocolFactory, ___transport); + get_fields_call method_call = new get_fields_call(db_name, table_name, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -7996,10 +8017,12 @@ public void get_fields(String db_name, String table_name, org.apache.thrift.asyn @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_fields_call extends org.apache.thrift.async.TAsyncMethodCall { private String db_name; private String table_name; - public get_fields_call(String db_name, String table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public get_fields_call(String db_name, String table_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.table_name = table_name; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -8007,6 +8030,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa get_fields_args args = new get_fields_args(); args.setDb_name(db_name); args.setTable_name(table_name); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -8021,9 +8045,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_fields_with_environment_context_call method_call = new get_fields_with_environment_context_call(db_name, table_name, environment_context, resultHandler, this, ___protocolFactory, ___transport); + get_fields_with_environment_context_call method_call = new get_fields_with_environment_context_call(db_name, table_name, environment_context, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -8032,11 +8056,13 @@ public void get_fields_with_environment_context(String db_name, String table_nam private String db_name; private String table_name; private EnvironmentContext environment_context; - public get_fields_with_environment_context_call(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public get_fields_with_environment_context_call(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.table_name = table_name; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -8045,6 +8071,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTable_name(table_name); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -8059,9 +8086,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_schema(String db_name, String table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_schema(String db_name, String table_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_schema_call method_call = new get_schema_call(db_name, table_name, resultHandler, this, ___protocolFactory, ___transport); + get_schema_call method_call = new get_schema_call(db_name, table_name, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -8069,10 +8096,12 @@ public void get_schema(String db_name, String table_name, org.apache.thrift.asyn @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_schema_call extends org.apache.thrift.async.TAsyncMethodCall { private String db_name; private String table_name; - public get_schema_call(String db_name, String table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public get_schema_call(String db_name, String table_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.table_name = table_name; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -8080,6 +8109,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa get_schema_args args = new get_schema_args(); args.setDb_name(db_name); args.setTable_name(table_name); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -8094,9 +8124,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_schema_with_environment_context_call method_call = new get_schema_with_environment_context_call(db_name, table_name, environment_context, resultHandler, this, ___protocolFactory, ___transport); + get_schema_with_environment_context_call method_call = new get_schema_with_environment_context_call(db_name, table_name, environment_context, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -8105,11 +8135,13 @@ public void get_schema_with_environment_context(String db_name, String table_nam private String db_name; private String table_name; private EnvironmentContext environment_context; - public get_schema_with_environment_context_call(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public get_schema_with_environment_context_call(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.table_name = table_name; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -8118,6 +8150,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTable_name(table_name); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -8858,9 +8891,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_table(String dbname, String tbl_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_table(String dbname, String tbl_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_table_call method_call = new get_table_call(dbname, tbl_name, resultHandler, this, ___protocolFactory, ___transport); + get_table_call method_call = new get_table_call(dbname, tbl_name, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -8868,10 +8901,12 @@ public void get_table(String dbname, String tbl_name, org.apache.thrift.async.As @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_call extends org.apache.thrift.async.TAsyncMethodCall { private String dbname; private String tbl_name; - public get_table_call(String dbname, String tbl_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public get_table_call(String dbname, String tbl_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.dbname = dbname; this.tbl_name = tbl_name; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -8879,6 +8914,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa get_table_args args = new get_table_args(); args.setDbname(dbname); args.setTbl_name(tbl_name); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -9813,9 +9849,9 @@ public DropPartitionsResult getResult() throws NoSuchObjectException, MetaExcept } } - public void get_partition(String db_name, String tbl_name, List part_vals, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partition(String db_name, String tbl_name, List part_vals, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partition_call method_call = new get_partition_call(db_name, tbl_name, part_vals, resultHandler, this, ___protocolFactory, ___transport); + get_partition_call method_call = new get_partition_call(db_name, tbl_name, part_vals, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -9824,11 +9860,13 @@ public void get_partition(String db_name, String tbl_name, List part_val private String db_name; private String tbl_name; private List part_vals; - public get_partition_call(String db_name, String tbl_name, List part_vals, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partition_call(String db_name, String tbl_name, List part_vals, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.part_vals = part_vals; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -9837,6 +9875,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_vals(part_vals); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -9939,9 +9978,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partition_with_auth_call method_call = new get_partition_with_auth_call(db_name, tbl_name, part_vals, user_name, group_names, resultHandler, this, ___protocolFactory, ___transport); + get_partition_with_auth_call method_call = new get_partition_with_auth_call(db_name, tbl_name, part_vals, user_name, group_names, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -9952,13 +9991,15 @@ public void get_partition_with_auth(String db_name, String tbl_name, List part_vals; private String user_name; private List group_names; - public get_partition_with_auth_call(String db_name, String tbl_name, List part_vals, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partition_with_auth_call(String db_name, String tbl_name, List part_vals, String user_name, List group_names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.part_vals = part_vals; this.user_name = user_name; this.group_names = group_names; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -9969,6 +10010,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setPart_vals(part_vals); args.setUser_name(user_name); args.setGroup_names(group_names); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -9983,9 +10025,9 @@ public Partition getResult() throws MetaException, NoSuchObjectException, org.ap } } - public void get_partition_by_name(String db_name, String tbl_name, String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partition_by_name(String db_name, String tbl_name, String part_name, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partition_by_name_call method_call = new get_partition_by_name_call(db_name, tbl_name, part_name, resultHandler, this, ___protocolFactory, ___transport); + get_partition_by_name_call method_call = new get_partition_by_name_call(db_name, tbl_name, part_name, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -9994,11 +10036,13 @@ public void get_partition_by_name(String db_name, String tbl_name, String part_n private String db_name; private String tbl_name; private String part_name; - public get_partition_by_name_call(String db_name, String tbl_name, String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partition_by_name_call(String db_name, String tbl_name, String part_name, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.part_name = part_name; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10007,6 +10051,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_name(part_name); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -10021,9 +10066,9 @@ public Partition getResult() throws MetaException, NoSuchObjectException, org.ap } } - public void get_partitions(String db_name, String tbl_name, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partitions(String db_name, String tbl_name, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partitions_call method_call = new get_partitions_call(db_name, tbl_name, max_parts, resultHandler, this, ___protocolFactory, ___transport); + get_partitions_call method_call = new get_partitions_call(db_name, tbl_name, max_parts, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10032,11 +10077,13 @@ public void get_partitions(String db_name, String tbl_name, short max_parts, org private String db_name; private String tbl_name; private short max_parts; - public get_partitions_call(String db_name, String tbl_name, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partitions_call(String db_name, String tbl_name, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.max_parts = max_parts; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10045,6 +10092,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -10059,9 +10107,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partitions_with_auth_call method_call = new get_partitions_with_auth_call(db_name, tbl_name, max_parts, user_name, group_names, resultHandler, this, ___protocolFactory, ___transport); + get_partitions_with_auth_call method_call = new get_partitions_with_auth_call(db_name, tbl_name, max_parts, user_name, group_names, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10072,13 +10120,15 @@ public void get_partitions_with_auth(String db_name, String tbl_name, short max_ private short max_parts; private String user_name; private List group_names; - public get_partitions_with_auth_call(String db_name, String tbl_name, short max_parts, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partitions_with_auth_call(String db_name, String tbl_name, short max_parts, String user_name, List group_names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.max_parts = max_parts; this.user_name = user_name; this.group_names = group_names; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10089,6 +10139,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setMax_parts(max_parts); args.setUser_name(user_name); args.setGroup_names(group_names); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -10103,9 +10154,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_partitions_pspec(String db_name, String tbl_name, int max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partitions_pspec(String db_name, String tbl_name, int max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partitions_pspec_call method_call = new get_partitions_pspec_call(db_name, tbl_name, max_parts, resultHandler, this, ___protocolFactory, ___transport); + get_partitions_pspec_call method_call = new get_partitions_pspec_call(db_name, tbl_name, max_parts, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10114,11 +10165,13 @@ public void get_partitions_pspec(String db_name, String tbl_name, int max_parts, private String db_name; private String tbl_name; private int max_parts; - public get_partitions_pspec_call(String db_name, String tbl_name, int max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partitions_pspec_call(String db_name, String tbl_name, int max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.max_parts = max_parts; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10127,6 +10180,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -10141,9 +10195,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_partition_names(String db_name, String tbl_name, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partition_names(String db_name, String tbl_name, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partition_names_call method_call = new get_partition_names_call(db_name, tbl_name, max_parts, resultHandler, this, ___protocolFactory, ___transport); + get_partition_names_call method_call = new get_partition_names_call(db_name, tbl_name, max_parts, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10152,11 +10206,13 @@ public void get_partition_names(String db_name, String tbl_name, short max_parts private String db_name; private String tbl_name; private short max_parts; - public get_partition_names_call(String db_name, String tbl_name, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partition_names_call(String db_name, String tbl_name, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.max_parts = max_parts; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10165,6 +10221,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -10211,9 +10268,9 @@ public PartitionValuesResponse getResult() throws MetaException, NoSuchObjectExc } } - public void get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partitions_ps_call method_call = new get_partitions_ps_call(db_name, tbl_name, part_vals, max_parts, resultHandler, this, ___protocolFactory, ___transport); + get_partitions_ps_call method_call = new get_partitions_ps_call(db_name, tbl_name, part_vals, max_parts, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10223,12 +10280,14 @@ public void get_partitions_ps(String db_name, String tbl_name, List part private String tbl_name; private List part_vals; private short max_parts; - public get_partitions_ps_call(String db_name, String tbl_name, List part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partitions_ps_call(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.part_vals = part_vals; this.max_parts = max_parts; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10238,6 +10297,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setTbl_name(tbl_name); args.setPart_vals(part_vals); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -10252,9 +10312,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partitions_ps_with_auth_call method_call = new get_partitions_ps_with_auth_call(db_name, tbl_name, part_vals, max_parts, user_name, group_names, resultHandler, this, ___protocolFactory, ___transport); + get_partitions_ps_with_auth_call method_call = new get_partitions_ps_with_auth_call(db_name, tbl_name, part_vals, max_parts, user_name, group_names, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10266,7 +10326,8 @@ public void get_partitions_ps_with_auth(String db_name, String tbl_name, List group_names; - public get_partitions_ps_with_auth_call(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partitions_ps_with_auth_call(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; @@ -10274,6 +10335,7 @@ public get_partitions_ps_with_auth_call(String db_name, String tbl_name, List part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partition_names_ps_call method_call = new get_partition_names_ps_call(db_name, tbl_name, part_vals, max_parts, resultHandler, this, ___protocolFactory, ___transport); + get_partition_names_ps_call method_call = new get_partition_names_ps_call(db_name, tbl_name, part_vals, max_parts, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10311,12 +10374,14 @@ public void get_partition_names_ps(String db_name, String tbl_name, List private String tbl_name; private List part_vals; private short max_parts; - public get_partition_names_ps_call(String db_name, String tbl_name, List part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partition_names_ps_call(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.part_vals = part_vals; this.max_parts = max_parts; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10326,6 +10391,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setTbl_name(tbl_name); args.setPart_vals(part_vals); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -10340,9 +10406,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partitions_by_filter_call method_call = new get_partitions_by_filter_call(db_name, tbl_name, filter, max_parts, resultHandler, this, ___protocolFactory, ___transport); + get_partitions_by_filter_call method_call = new get_partitions_by_filter_call(db_name, tbl_name, filter, max_parts, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10352,12 +10418,14 @@ public void get_partitions_by_filter(String db_name, String tbl_name, String fil private String tbl_name; private String filter; private short max_parts; - public get_partitions_by_filter_call(String db_name, String tbl_name, String filter, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partitions_by_filter_call(String db_name, String tbl_name, String filter, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.filter = filter; this.max_parts = max_parts; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10367,6 +10435,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setTbl_name(tbl_name); args.setFilter(filter); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -10381,9 +10450,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_part_specs_by_filter_call method_call = new get_part_specs_by_filter_call(db_name, tbl_name, filter, max_parts, resultHandler, this, ___protocolFactory, ___transport); + get_part_specs_by_filter_call method_call = new get_part_specs_by_filter_call(db_name, tbl_name, filter, max_parts, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10393,12 +10462,14 @@ public void get_part_specs_by_filter(String db_name, String tbl_name, String fil private String tbl_name; private String filter; private int max_parts; - public get_part_specs_by_filter_call(String db_name, String tbl_name, String filter, int max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_part_specs_by_filter_call(String db_name, String tbl_name, String filter, int max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.filter = filter; this.max_parts = max_parts; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10408,6 +10479,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setTbl_name(tbl_name); args.setFilter(filter); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -10454,9 +10526,9 @@ public PartitionsByExprResult getResult() throws MetaException, NoSuchObjectExce } } - public void get_num_partitions_by_filter(String db_name, String tbl_name, String filter, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_num_partitions_by_filter(String db_name, String tbl_name, String filter, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_num_partitions_by_filter_call method_call = new get_num_partitions_by_filter_call(db_name, tbl_name, filter, resultHandler, this, ___protocolFactory, ___transport); + get_num_partitions_by_filter_call method_call = new get_num_partitions_by_filter_call(db_name, tbl_name, filter, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10465,11 +10537,13 @@ public void get_num_partitions_by_filter(String db_name, String tbl_name, String private String db_name; private String tbl_name; private String filter; - public get_num_partitions_by_filter_call(String db_name, String tbl_name, String filter, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_num_partitions_by_filter_call(String db_name, String tbl_name, String filter, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.filter = filter; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10478,6 +10552,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setFilter(filter); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -10492,9 +10567,9 @@ public int getResult() throws MetaException, NoSuchObjectException, org.apache.t } } - public void get_partitions_by_names(String db_name, String tbl_name, List names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partitions_by_names(String db_name, String tbl_name, List names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partitions_by_names_call method_call = new get_partitions_by_names_call(db_name, tbl_name, names, resultHandler, this, ___protocolFactory, ___transport); + get_partitions_by_names_call method_call = new get_partitions_by_names_call(db_name, tbl_name, names, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10503,11 +10578,13 @@ public void get_partitions_by_names(String db_name, String tbl_name, List names; - public get_partitions_by_names_call(String db_name, String tbl_name, List names, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partitions_by_names_call(String db_name, String tbl_name, List names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.names = names; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10516,6 +10593,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setNames(names); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -11361,9 +11439,9 @@ public SetPartitionsStatsResponse getResult() throws NoSuchObjectException, Inva } } - public void get_table_column_statistics(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_table_column_statistics(String db_name, String tbl_name, String col_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_table_column_statistics_call method_call = new get_table_column_statistics_call(db_name, tbl_name, col_name, resultHandler, this, ___protocolFactory, ___transport); + get_table_column_statistics_call method_call = new get_table_column_statistics_call(db_name, tbl_name, col_name, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -11372,11 +11450,13 @@ public void get_table_column_statistics(String db_name, String tbl_name, String private String db_name; private String tbl_name; private String col_name; - public get_table_column_statistics_call(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public get_table_column_statistics_call(String db_name, String tbl_name, String col_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.col_name = col_name; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -11385,6 +11465,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setCol_name(col_name); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -11399,9 +11480,9 @@ public ColumnStatistics getResult() throws NoSuchObjectException, MetaException, } } - public void get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partition_column_statistics_call method_call = new get_partition_column_statistics_call(db_name, tbl_name, part_name, col_name, resultHandler, this, ___protocolFactory, ___transport); + get_partition_column_statistics_call method_call = new get_partition_column_statistics_call(db_name, tbl_name, part_name, col_name, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -11411,12 +11492,14 @@ public void get_partition_column_statistics(String db_name, String tbl_name, Str private String tbl_name; private String part_name; private String col_name; - public get_partition_column_statistics_call(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public get_partition_column_statistics_call(String db_name, String tbl_name, String part_name, String col_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.part_name = part_name; this.col_name = col_name; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -11426,6 +11509,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setTbl_name(tbl_name); args.setPart_name(part_name); args.setCol_name(col_name); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -15865,7 +15949,7 @@ protected boolean isOneway() { public get_fields_result getResult(I iface, get_fields_args args) throws org.apache.thrift.TException { get_fields_result result = new get_fields_result(); try { - result.success = iface.get_fields(args.db_name, args.table_name); + result.success = iface.get_fields(args.db_name, args.table_name, args.validWriteIdList); } catch (MetaException o1) { result.o1 = o1; } catch (UnknownTableException o2) { @@ -15893,7 +15977,7 @@ protected boolean isOneway() { public get_fields_with_environment_context_result getResult(I iface, get_fields_with_environment_context_args args) throws org.apache.thrift.TException { get_fields_with_environment_context_result result = new get_fields_with_environment_context_result(); try { - result.success = iface.get_fields_with_environment_context(args.db_name, args.table_name, args.environment_context); + result.success = iface.get_fields_with_environment_context(args.db_name, args.table_name, args.environment_context, args.validWriteIdList); } catch (MetaException o1) { result.o1 = o1; } catch (UnknownTableException o2) { @@ -15921,7 +16005,7 @@ protected boolean isOneway() { public get_schema_result getResult(I iface, get_schema_args args) throws org.apache.thrift.TException { get_schema_result result = new get_schema_result(); try { - result.success = iface.get_schema(args.db_name, args.table_name); + result.success = iface.get_schema(args.db_name, args.table_name, args.validWriteIdList); } catch (MetaException o1) { result.o1 = o1; } catch (UnknownTableException o2) { @@ -15949,7 +16033,7 @@ protected boolean isOneway() { public get_schema_with_environment_context_result getResult(I iface, get_schema_with_environment_context_args args) throws org.apache.thrift.TException { get_schema_with_environment_context_result result = new get_schema_with_environment_context_result(); try { - result.success = iface.get_schema_with_environment_context(args.db_name, args.table_name, args.environment_context); + result.success = iface.get_schema_with_environment_context(args.db_name, args.table_name, args.environment_context, args.validWriteIdList); } catch (MetaException o1) { result.o1 = o1; } catch (UnknownTableException o2) { @@ -16523,7 +16607,7 @@ protected boolean isOneway() { public get_table_result getResult(I iface, get_table_args args) throws org.apache.thrift.TException { get_table_result result = new get_table_result(); try { - result.success = iface.get_table(args.dbname, args.tbl_name); + result.success = iface.get_table(args.dbname, args.tbl_name, args.validWriteIdList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -17223,7 +17307,7 @@ protected boolean isOneway() { public get_partition_result getResult(I iface, get_partition_args args) throws org.apache.thrift.TException { get_partition_result result = new get_partition_result(); try { - result.success = iface.get_partition(args.db_name, args.tbl_name, args.part_vals); + result.success = iface.get_partition(args.db_name, args.tbl_name, args.part_vals, args.validTxnList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -17309,7 +17393,7 @@ protected boolean isOneway() { public get_partition_with_auth_result getResult(I iface, get_partition_with_auth_args args) throws org.apache.thrift.TException { get_partition_with_auth_result result = new get_partition_with_auth_result(); try { - result.success = iface.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names); + result.success = iface.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names, args.validTxnList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -17335,7 +17419,7 @@ protected boolean isOneway() { public get_partition_by_name_result getResult(I iface, get_partition_by_name_args args) throws org.apache.thrift.TException { get_partition_by_name_result result = new get_partition_by_name_result(); try { - result.success = iface.get_partition_by_name(args.db_name, args.tbl_name, args.part_name); + result.success = iface.get_partition_by_name(args.db_name, args.tbl_name, args.part_name, args.validTxnList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -17361,7 +17445,7 @@ protected boolean isOneway() { public get_partitions_result getResult(I iface, get_partitions_args args) throws org.apache.thrift.TException { get_partitions_result result = new get_partitions_result(); try { - result.success = iface.get_partitions(args.db_name, args.tbl_name, args.max_parts); + result.success = iface.get_partitions(args.db_name, args.tbl_name, args.max_parts, args.validTxnList); } catch (NoSuchObjectException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -17387,7 +17471,7 @@ protected boolean isOneway() { public get_partitions_with_auth_result getResult(I iface, get_partitions_with_auth_args args) throws org.apache.thrift.TException { get_partitions_with_auth_result result = new get_partitions_with_auth_result(); try { - result.success = iface.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names); + result.success = iface.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names, args.validTxnList); } catch (NoSuchObjectException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -17413,7 +17497,7 @@ protected boolean isOneway() { public get_partitions_pspec_result getResult(I iface, get_partitions_pspec_args args) throws org.apache.thrift.TException { get_partitions_pspec_result result = new get_partitions_pspec_result(); try { - result.success = iface.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts); + result.success = iface.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts, args.validTxnList); } catch (NoSuchObjectException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -17439,7 +17523,7 @@ protected boolean isOneway() { public get_partition_names_result getResult(I iface, get_partition_names_args args) throws org.apache.thrift.TException { get_partition_names_result result = new get_partition_names_result(); try { - result.success = iface.get_partition_names(args.db_name, args.tbl_name, args.max_parts); + result.success = iface.get_partition_names(args.db_name, args.tbl_name, args.max_parts, args.validTxnList); } catch (NoSuchObjectException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -17491,7 +17575,7 @@ protected boolean isOneway() { public get_partitions_ps_result getResult(I iface, get_partitions_ps_args args) throws org.apache.thrift.TException { get_partitions_ps_result result = new get_partitions_ps_result(); try { - result.success = iface.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts); + result.success = iface.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.validTxnList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -17517,7 +17601,7 @@ protected boolean isOneway() { public get_partitions_ps_with_auth_result getResult(I iface, get_partitions_ps_with_auth_args args) throws org.apache.thrift.TException { get_partitions_ps_with_auth_result result = new get_partitions_ps_with_auth_result(); try { - result.success = iface.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names); + result.success = iface.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names, args.validTxnList); } catch (NoSuchObjectException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -17543,7 +17627,7 @@ protected boolean isOneway() { public get_partition_names_ps_result getResult(I iface, get_partition_names_ps_args args) throws org.apache.thrift.TException { get_partition_names_ps_result result = new get_partition_names_ps_result(); try { - result.success = iface.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts); + result.success = iface.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.validTxnList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -17569,7 +17653,7 @@ protected boolean isOneway() { public get_partitions_by_filter_result getResult(I iface, get_partitions_by_filter_args args) throws org.apache.thrift.TException { get_partitions_by_filter_result result = new get_partitions_by_filter_result(); try { - result.success = iface.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts); + result.success = iface.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts, args.validTxnList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -17595,7 +17679,7 @@ protected boolean isOneway() { public get_part_specs_by_filter_result getResult(I iface, get_part_specs_by_filter_args args) throws org.apache.thrift.TException { get_part_specs_by_filter_result result = new get_part_specs_by_filter_result(); try { - result.success = iface.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts); + result.success = iface.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts, args.validTxnList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -17647,7 +17731,7 @@ protected boolean isOneway() { public get_num_partitions_by_filter_result getResult(I iface, get_num_partitions_by_filter_args args) throws org.apache.thrift.TException { get_num_partitions_by_filter_result result = new get_num_partitions_by_filter_result(); try { - result.success = iface.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter); + result.success = iface.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.validTxnList); result.setSuccessIsSet(true); } catch (MetaException o1) { result.o1 = o1; @@ -17674,7 +17758,7 @@ protected boolean isOneway() { public get_partitions_by_names_result getResult(I iface, get_partitions_by_names_args args) throws org.apache.thrift.TException { get_partitions_by_names_result result = new get_partitions_by_names_result(); try { - result.success = iface.get_partitions_by_names(args.db_name, args.tbl_name, args.names); + result.success = iface.get_partitions_by_names(args.db_name, args.tbl_name, args.names, args.validTxnList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -18352,7 +18436,7 @@ protected boolean isOneway() { public get_table_column_statistics_result getResult(I iface, get_table_column_statistics_args args) throws org.apache.thrift.TException { get_table_column_statistics_result result = new get_table_column_statistics_result(); try { - result.success = iface.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name); + result.success = iface.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name, args.validWriteIdList); } catch (NoSuchObjectException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -18382,7 +18466,7 @@ protected boolean isOneway() { public get_partition_column_statistics_result getResult(I iface, get_partition_column_statistics_args args) throws org.apache.thrift.TException { get_partition_column_statistics_result result = new get_partition_column_statistics_result(); try { - result.success = iface.get_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name); + result.success = iface.get_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name, args.validWriteIdList); } catch (NoSuchObjectException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -22535,7 +22619,7 @@ protected boolean isOneway() { } public void start(I iface, get_fields_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_fields(args.db_name, args.table_name,resultHandler); + iface.get_fields(args.db_name, args.table_name, args.validWriteIdList,resultHandler); } } @@ -22602,7 +22686,7 @@ protected boolean isOneway() { } public void start(I iface, get_fields_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_fields_with_environment_context(args.db_name, args.table_name, args.environment_context,resultHandler); + iface.get_fields_with_environment_context(args.db_name, args.table_name, args.environment_context, args.validWriteIdList,resultHandler); } } @@ -22669,7 +22753,7 @@ protected boolean isOneway() { } public void start(I iface, get_schema_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_schema(args.db_name, args.table_name,resultHandler); + iface.get_schema(args.db_name, args.table_name, args.validWriteIdList,resultHandler); } } @@ -22736,7 +22820,7 @@ protected boolean isOneway() { } public void start(I iface, get_schema_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_schema_with_environment_context(args.db_name, args.table_name, args.environment_context,resultHandler); + iface.get_schema_with_environment_context(args.db_name, args.table_name, args.environment_context, args.validWriteIdList,resultHandler); } } @@ -24086,7 +24170,7 @@ protected boolean isOneway() { } public void start(I iface, get_table_args args, org.apache.thrift.async.AsyncMethodCallback
resultHandler) throws TException { - iface.get_table(args.dbname, args.tbl_name,resultHandler); + iface.get_table(args.dbname, args.tbl_name, args.validWriteIdList,resultHandler); } } @@ -25749,7 +25833,7 @@ protected boolean isOneway() { } public void start(I iface, get_partition_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_partition(args.db_name, args.tbl_name, args.part_vals,resultHandler); + iface.get_partition(args.db_name, args.tbl_name, args.part_vals, args.validTxnList,resultHandler); } } @@ -25955,7 +26039,7 @@ protected boolean isOneway() { } public void start(I iface, get_partition_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names,resultHandler); + iface.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names, args.validTxnList,resultHandler); } } @@ -26017,7 +26101,7 @@ protected boolean isOneway() { } public void start(I iface, get_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler); + iface.get_partition_by_name(args.db_name, args.tbl_name, args.part_name, args.validTxnList,resultHandler); } } @@ -26079,7 +26163,7 @@ protected boolean isOneway() { } public void start(I iface, get_partitions_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions(args.db_name, args.tbl_name, args.max_parts,resultHandler); + iface.get_partitions(args.db_name, args.tbl_name, args.max_parts, args.validTxnList,resultHandler); } } @@ -26141,7 +26225,7 @@ protected boolean isOneway() { } public void start(I iface, get_partitions_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names,resultHandler); + iface.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names, args.validTxnList,resultHandler); } } @@ -26203,7 +26287,7 @@ protected boolean isOneway() { } public void start(I iface, get_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts,resultHandler); + iface.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts, args.validTxnList,resultHandler); } } @@ -26265,7 +26349,7 @@ protected boolean isOneway() { } public void start(I iface, get_partition_names_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partition_names(args.db_name, args.tbl_name, args.max_parts,resultHandler); + iface.get_partition_names(args.db_name, args.tbl_name, args.max_parts, args.validTxnList,resultHandler); } } @@ -26389,7 +26473,7 @@ protected boolean isOneway() { } public void start(I iface, get_partitions_ps_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts,resultHandler); + iface.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.validTxnList,resultHandler); } } @@ -26451,7 +26535,7 @@ protected boolean isOneway() { } public void start(I iface, get_partitions_ps_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names,resultHandler); + iface.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names, args.validTxnList,resultHandler); } } @@ -26513,7 +26597,7 @@ protected boolean isOneway() { } public void start(I iface, get_partition_names_ps_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts,resultHandler); + iface.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.validTxnList,resultHandler); } } @@ -26575,7 +26659,7 @@ protected boolean isOneway() { } public void start(I iface, get_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler); + iface.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts, args.validTxnList,resultHandler); } } @@ -26637,7 +26721,7 @@ protected boolean isOneway() { } public void start(I iface, get_part_specs_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler); + iface.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts, args.validTxnList,resultHandler); } } @@ -26762,7 +26846,7 @@ protected boolean isOneway() { } public void start(I iface, get_num_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter,resultHandler); + iface.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.validTxnList,resultHandler); } } @@ -26824,7 +26908,7 @@ protected boolean isOneway() { } public void start(I iface, get_partitions_by_names_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_by_names(args.db_name, args.tbl_name, args.names,resultHandler); + iface.get_partitions_by_names(args.db_name, args.tbl_name, args.names, args.validTxnList,resultHandler); } } @@ -28442,7 +28526,7 @@ protected boolean isOneway() { } public void start(I iface, get_table_column_statistics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name,resultHandler); + iface.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name, args.validWriteIdList,resultHandler); } } @@ -28514,7 +28598,7 @@ protected boolean isOneway() { } public void start(I iface, get_partition_column_statistics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name,resultHandler); + iface.get_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name, args.validWriteIdList,resultHandler); } } @@ -45065,13 +45149,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1160 = iprot.readListBegin(); - struct.success = new ArrayList(_list1160.size); - String _elem1161; - for (int _i1162 = 0; _i1162 < _list1160.size; ++_i1162) + org.apache.thrift.protocol.TList _list1168 = iprot.readListBegin(); + struct.success = new ArrayList(_list1168.size); + String _elem1169; + for (int _i1170 = 0; _i1170 < _list1168.size; ++_i1170) { - _elem1161 = iprot.readString(); - struct.success.add(_elem1161); + _elem1169 = iprot.readString(); + struct.success.add(_elem1169); } iprot.readListEnd(); } @@ -45106,9 +45190,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1163 : struct.success) + for (String _iter1171 : struct.success) { - oprot.writeString(_iter1163); + oprot.writeString(_iter1171); } oprot.writeListEnd(); } @@ -45147,9 +45231,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1164 : struct.success) + for (String _iter1172 : struct.success) { - oprot.writeString(_iter1164); + oprot.writeString(_iter1172); } } } @@ -45164,13 +45248,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1165 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1165.size); - String _elem1166; - for (int _i1167 = 0; _i1167 < _list1165.size; ++_i1167) + org.apache.thrift.protocol.TList _list1173 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1173.size); + String _elem1174; + for (int _i1175 = 0; _i1175 < _list1173.size; ++_i1175) { - _elem1166 = iprot.readString(); - struct.success.add(_elem1166); + _elem1174 = iprot.readString(); + struct.success.add(_elem1174); } } struct.setSuccessIsSet(true); @@ -45824,13 +45908,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1168 = iprot.readListBegin(); - struct.success = new ArrayList(_list1168.size); - String _elem1169; - for (int _i1170 = 0; _i1170 < _list1168.size; ++_i1170) + org.apache.thrift.protocol.TList _list1176 = iprot.readListBegin(); + struct.success = new ArrayList(_list1176.size); + String _elem1177; + for (int _i1178 = 0; _i1178 < _list1176.size; ++_i1178) { - _elem1169 = iprot.readString(); - struct.success.add(_elem1169); + _elem1177 = iprot.readString(); + struct.success.add(_elem1177); } iprot.readListEnd(); } @@ -45865,9 +45949,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1171 : struct.success) + for (String _iter1179 : struct.success) { - oprot.writeString(_iter1171); + oprot.writeString(_iter1179); } oprot.writeListEnd(); } @@ -45906,9 +45990,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1172 : struct.success) + for (String _iter1180 : struct.success) { - oprot.writeString(_iter1172); + oprot.writeString(_iter1180); } } } @@ -45923,13 +46007,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1173 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1173.size); - String _elem1174; - for (int _i1175 = 0; _i1175 < _list1173.size; ++_i1175) + org.apache.thrift.protocol.TList _list1181 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1181.size); + String _elem1182; + for (int _i1183 = 0; _i1183 < _list1181.size; ++_i1183) { - _elem1174 = iprot.readString(); - struct.success.add(_elem1174); + _elem1182 = iprot.readString(); + struct.success.add(_elem1182); } } struct.setSuccessIsSet(true); @@ -50536,16 +50620,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1176 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1176.size); - String _key1177; - Type _val1178; - for (int _i1179 = 0; _i1179 < _map1176.size; ++_i1179) + org.apache.thrift.protocol.TMap _map1184 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1184.size); + String _key1185; + Type _val1186; + for (int _i1187 = 0; _i1187 < _map1184.size; ++_i1187) { - _key1177 = iprot.readString(); - _val1178 = new Type(); - _val1178.read(iprot); - struct.success.put(_key1177, _val1178); + _key1185 = iprot.readString(); + _val1186 = new Type(); + _val1186.read(iprot); + struct.success.put(_key1185, _val1186); } iprot.readMapEnd(); } @@ -50580,10 +50664,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter1180 : struct.success.entrySet()) + for (Map.Entry _iter1188 : struct.success.entrySet()) { - oprot.writeString(_iter1180.getKey()); - _iter1180.getValue().write(oprot); + oprot.writeString(_iter1188.getKey()); + _iter1188.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -50622,10 +50706,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1181 : struct.success.entrySet()) + for (Map.Entry _iter1189 : struct.success.entrySet()) { - oprot.writeString(_iter1181.getKey()); - _iter1181.getValue().write(oprot); + oprot.writeString(_iter1189.getKey()); + _iter1189.getValue().write(oprot); } } } @@ -50640,16 +50724,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1182 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map1182.size); - String _key1183; - Type _val1184; - for (int _i1185 = 0; _i1185 < _map1182.size; ++_i1185) + org.apache.thrift.protocol.TMap _map1190 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map1190.size); + String _key1191; + Type _val1192; + for (int _i1193 = 0; _i1193 < _map1190.size; ++_i1193) { - _key1183 = iprot.readString(); - _val1184 = new Type(); - _val1184.read(iprot); - struct.success.put(_key1183, _val1184); + _key1191 = iprot.readString(); + _val1192 = new Type(); + _val1192.read(iprot); + struct.success.put(_key1191, _val1192); } } struct.setSuccessIsSet(true); @@ -50669,6 +50753,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -50678,11 +50763,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result private String db_name; // required private String table_name; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), - TABLE_NAME((short)2, "table_name"); + TABLE_NAME((short)2, "table_name"), + VALID_WRITE_ID_LIST((short)3, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -50701,6 +50788,8 @@ public static _Fields findByThriftId(int fieldId) { return DB_NAME; case 2: // TABLE_NAME return TABLE_NAME; + case 3: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -50748,6 +50837,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_fields_args.class, metaDataMap); } @@ -50757,11 +50848,13 @@ public get_fields_args() { public get_fields_args( String db_name, - String table_name) + String table_name, + String validWriteIdList) { this(); this.db_name = db_name; this.table_name = table_name; + this.validWriteIdList = validWriteIdList; } /** @@ -50774,6 +50867,9 @@ public get_fields_args(get_fields_args other) { if (other.isSetTable_name()) { this.table_name = other.table_name; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public get_fields_args deepCopy() { @@ -50784,6 +50880,7 @@ public get_fields_args deepCopy() { public void clear() { this.db_name = null; this.table_name = null; + this.validWriteIdList = null; } public String getDb_name() { @@ -50832,6 +50929,29 @@ public void setTable_nameIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -50850,6 +50970,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -50861,6 +50989,9 @@ public Object getFieldValue(_Fields field) { case TABLE_NAME: return getTable_name(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -50876,6 +51007,8 @@ public boolean isSet(_Fields field) { return isSetDb_name(); case TABLE_NAME: return isSetTable_name(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -50911,6 +51044,15 @@ public boolean equals(get_fields_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -50928,6 +51070,11 @@ public int hashCode() { if (present_table_name) list.add(table_name); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -50959,6 +51106,16 @@ public int compareTo(get_fields_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -50994,6 +51151,14 @@ public String toString() { sb.append(this.table_name); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -51053,6 +51218,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_args str org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -51076,6 +51249,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_args st oprot.writeString(struct.table_name); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -51100,19 +51278,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_args str if (struct.isSetTable_name()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetValidWriteIdList()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } if (struct.isSetTable_name()) { oprot.writeString(struct.table_name); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -51121,6 +51305,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_args stru struct.table_name = iprot.readString(); struct.setTable_nameIsSet(true); } + if (incoming.get(2)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -51684,14 +51872,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1186 = iprot.readListBegin(); - struct.success = new ArrayList(_list1186.size); - FieldSchema _elem1187; - for (int _i1188 = 0; _i1188 < _list1186.size; ++_i1188) + org.apache.thrift.protocol.TList _list1194 = iprot.readListBegin(); + struct.success = new ArrayList(_list1194.size); + FieldSchema _elem1195; + for (int _i1196 = 0; _i1196 < _list1194.size; ++_i1196) { - _elem1187 = new FieldSchema(); - _elem1187.read(iprot); - struct.success.add(_elem1187); + _elem1195 = new FieldSchema(); + _elem1195.read(iprot); + struct.success.add(_elem1195); } iprot.readListEnd(); } @@ -51744,9 +51932,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1189 : struct.success) + for (FieldSchema _iter1197 : struct.success) { - _iter1189.write(oprot); + _iter1197.write(oprot); } oprot.writeListEnd(); } @@ -51801,9 +51989,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1190 : struct.success) + for (FieldSchema _iter1198 : struct.success) { - _iter1190.write(oprot); + _iter1198.write(oprot); } } } @@ -51824,14 +52012,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1191 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1191.size); - FieldSchema _elem1192; - for (int _i1193 = 0; _i1193 < _list1191.size; ++_i1193) + org.apache.thrift.protocol.TList _list1199 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1199.size); + FieldSchema _elem1200; + for (int _i1201 = 0; _i1201 < _list1199.size; ++_i1201) { - _elem1192 = new FieldSchema(); - _elem1192.read(iprot); - struct.success.add(_elem1192); + _elem1200 = new FieldSchema(); + _elem1200.read(iprot); + struct.success.add(_elem1200); } } struct.setSuccessIsSet(true); @@ -51862,6 +52050,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environment_context", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -51872,12 +52061,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st private String db_name; // required private String table_name; // required private EnvironmentContext environment_context; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TABLE_NAME((short)2, "table_name"), - ENVIRONMENT_CONTEXT((short)3, "environment_context"); + ENVIRONMENT_CONTEXT((short)3, "environment_context"), + VALID_WRITE_ID_LIST((short)4, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -51898,6 +52089,8 @@ public static _Fields findByThriftId(int fieldId) { return TABLE_NAME; case 3: // ENVIRONMENT_CONTEXT return ENVIRONMENT_CONTEXT; + case 4: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -51947,6 +52140,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environment_context", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_fields_with_environment_context_args.class, metaDataMap); } @@ -51957,12 +52152,14 @@ public get_fields_with_environment_context_args() { public get_fields_with_environment_context_args( String db_name, String table_name, - EnvironmentContext environment_context) + EnvironmentContext environment_context, + String validWriteIdList) { this(); this.db_name = db_name; this.table_name = table_name; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } /** @@ -51978,6 +52175,9 @@ public get_fields_with_environment_context_args(get_fields_with_environment_cont if (other.isSetEnvironment_context()) { this.environment_context = new EnvironmentContext(other.environment_context); } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public get_fields_with_environment_context_args deepCopy() { @@ -51989,6 +52189,7 @@ public void clear() { this.db_name = null; this.table_name = null; this.environment_context = null; + this.validWriteIdList = null; } public String getDb_name() { @@ -52060,6 +52261,29 @@ public void setEnvironment_contextIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -52086,6 +52310,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -52100,6 +52332,9 @@ public Object getFieldValue(_Fields field) { case ENVIRONMENT_CONTEXT: return getEnvironment_context(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -52117,6 +52352,8 @@ public boolean isSet(_Fields field) { return isSetTable_name(); case ENVIRONMENT_CONTEXT: return isSetEnvironment_context(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -52161,6 +52398,15 @@ public boolean equals(get_fields_with_environment_context_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -52183,6 +52429,11 @@ public int hashCode() { if (present_environment_context) list.add(environment_context); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -52224,6 +52475,16 @@ public int compareTo(get_fields_with_environment_context_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -52267,6 +52528,14 @@ public String toString() { sb.append(this.environment_context); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -52338,6 +52607,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -52366,6 +52643,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en struct.environment_context.write(oprot); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -52393,7 +52675,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetEnvironment_context()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidWriteIdList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -52403,12 +52688,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetEnvironment_context()) { struct.environment_context.write(oprot); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -52422,6 +52710,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi struct.environment_context.read(iprot); struct.setEnvironment_contextIsSet(true); } + if (incoming.get(3)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -52985,14 +53277,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1194 = iprot.readListBegin(); - struct.success = new ArrayList(_list1194.size); - FieldSchema _elem1195; - for (int _i1196 = 0; _i1196 < _list1194.size; ++_i1196) + org.apache.thrift.protocol.TList _list1202 = iprot.readListBegin(); + struct.success = new ArrayList(_list1202.size); + FieldSchema _elem1203; + for (int _i1204 = 0; _i1204 < _list1202.size; ++_i1204) { - _elem1195 = new FieldSchema(); - _elem1195.read(iprot); - struct.success.add(_elem1195); + _elem1203 = new FieldSchema(); + _elem1203.read(iprot); + struct.success.add(_elem1203); } iprot.readListEnd(); } @@ -53045,9 +53337,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1197 : struct.success) + for (FieldSchema _iter1205 : struct.success) { - _iter1197.write(oprot); + _iter1205.write(oprot); } oprot.writeListEnd(); } @@ -53102,9 +53394,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1198 : struct.success) + for (FieldSchema _iter1206 : struct.success) { - _iter1198.write(oprot); + _iter1206.write(oprot); } } } @@ -53125,14 +53417,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1199 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1199.size); - FieldSchema _elem1200; - for (int _i1201 = 0; _i1201 < _list1199.size; ++_i1201) + org.apache.thrift.protocol.TList _list1207 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1207.size); + FieldSchema _elem1208; + for (int _i1209 = 0; _i1209 < _list1207.size; ++_i1209) { - _elem1200 = new FieldSchema(); - _elem1200.read(iprot); - struct.success.add(_elem1200); + _elem1208 = new FieldSchema(); + _elem1208.read(iprot); + struct.success.add(_elem1208); } } struct.setSuccessIsSet(true); @@ -53162,6 +53454,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53171,11 +53464,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi private String db_name; // required private String table_name; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), - TABLE_NAME((short)2, "table_name"); + TABLE_NAME((short)2, "table_name"), + VALID_WRITE_ID_LIST((short)3, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -53194,6 +53489,8 @@ public static _Fields findByThriftId(int fieldId) { return DB_NAME; case 2: // TABLE_NAME return TABLE_NAME; + case 3: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -53241,6 +53538,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_schema_args.class, metaDataMap); } @@ -53250,11 +53549,13 @@ public get_schema_args() { public get_schema_args( String db_name, - String table_name) + String table_name, + String validWriteIdList) { this(); this.db_name = db_name; this.table_name = table_name; + this.validWriteIdList = validWriteIdList; } /** @@ -53267,6 +53568,9 @@ public get_schema_args(get_schema_args other) { if (other.isSetTable_name()) { this.table_name = other.table_name; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public get_schema_args deepCopy() { @@ -53277,6 +53581,7 @@ public get_schema_args deepCopy() { public void clear() { this.db_name = null; this.table_name = null; + this.validWriteIdList = null; } public String getDb_name() { @@ -53325,6 +53630,29 @@ public void setTable_nameIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -53343,6 +53671,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -53354,6 +53690,9 @@ public Object getFieldValue(_Fields field) { case TABLE_NAME: return getTable_name(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -53369,6 +53708,8 @@ public boolean isSet(_Fields field) { return isSetDb_name(); case TABLE_NAME: return isSetTable_name(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -53404,6 +53745,15 @@ public boolean equals(get_schema_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -53421,6 +53771,11 @@ public int hashCode() { if (present_table_name) list.add(table_name); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -53452,6 +53807,16 @@ public int compareTo(get_schema_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -53487,6 +53852,14 @@ public String toString() { sb.append(this.table_name); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -53546,6 +53919,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_args str org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -53569,6 +53950,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_args st oprot.writeString(struct.table_name); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -53593,19 +53979,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_args str if (struct.isSetTable_name()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetValidWriteIdList()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } if (struct.isSetTable_name()) { oprot.writeString(struct.table_name); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -53614,6 +54006,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_args stru struct.table_name = iprot.readString(); struct.setTable_nameIsSet(true); } + if (incoming.get(2)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -54177,14 +54573,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1202 = iprot.readListBegin(); - struct.success = new ArrayList(_list1202.size); - FieldSchema _elem1203; - for (int _i1204 = 0; _i1204 < _list1202.size; ++_i1204) + org.apache.thrift.protocol.TList _list1210 = iprot.readListBegin(); + struct.success = new ArrayList(_list1210.size); + FieldSchema _elem1211; + for (int _i1212 = 0; _i1212 < _list1210.size; ++_i1212) { - _elem1203 = new FieldSchema(); - _elem1203.read(iprot); - struct.success.add(_elem1203); + _elem1211 = new FieldSchema(); + _elem1211.read(iprot); + struct.success.add(_elem1211); } iprot.readListEnd(); } @@ -54237,9 +54633,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1205 : struct.success) + for (FieldSchema _iter1213 : struct.success) { - _iter1205.write(oprot); + _iter1213.write(oprot); } oprot.writeListEnd(); } @@ -54294,9 +54690,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1206 : struct.success) + for (FieldSchema _iter1214 : struct.success) { - _iter1206.write(oprot); + _iter1214.write(oprot); } } } @@ -54317,14 +54713,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1207 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1207.size); - FieldSchema _elem1208; - for (int _i1209 = 0; _i1209 < _list1207.size; ++_i1209) + org.apache.thrift.protocol.TList _list1215 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1215.size); + FieldSchema _elem1216; + for (int _i1217 = 0; _i1217 < _list1215.size; ++_i1217) { - _elem1208 = new FieldSchema(); - _elem1208.read(iprot); - struct.success.add(_elem1208); + _elem1216 = new FieldSchema(); + _elem1216.read(iprot); + struct.success.add(_elem1216); } } struct.setSuccessIsSet(true); @@ -54355,6 +54751,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environment_context", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -54365,12 +54762,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st private String db_name; // required private String table_name; // required private EnvironmentContext environment_context; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TABLE_NAME((short)2, "table_name"), - ENVIRONMENT_CONTEXT((short)3, "environment_context"); + ENVIRONMENT_CONTEXT((short)3, "environment_context"), + VALID_WRITE_ID_LIST((short)4, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -54391,6 +54790,8 @@ public static _Fields findByThriftId(int fieldId) { return TABLE_NAME; case 3: // ENVIRONMENT_CONTEXT return ENVIRONMENT_CONTEXT; + case 4: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -54440,6 +54841,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environment_context", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_schema_with_environment_context_args.class, metaDataMap); } @@ -54450,12 +54853,14 @@ public get_schema_with_environment_context_args() { public get_schema_with_environment_context_args( String db_name, String table_name, - EnvironmentContext environment_context) + EnvironmentContext environment_context, + String validWriteIdList) { this(); this.db_name = db_name; this.table_name = table_name; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } /** @@ -54471,6 +54876,9 @@ public get_schema_with_environment_context_args(get_schema_with_environment_cont if (other.isSetEnvironment_context()) { this.environment_context = new EnvironmentContext(other.environment_context); } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public get_schema_with_environment_context_args deepCopy() { @@ -54482,6 +54890,7 @@ public void clear() { this.db_name = null; this.table_name = null; this.environment_context = null; + this.validWriteIdList = null; } public String getDb_name() { @@ -54553,6 +54962,29 @@ public void setEnvironment_contextIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -54579,6 +55011,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -54593,6 +55033,9 @@ public Object getFieldValue(_Fields field) { case ENVIRONMENT_CONTEXT: return getEnvironment_context(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -54610,6 +55053,8 @@ public boolean isSet(_Fields field) { return isSetTable_name(); case ENVIRONMENT_CONTEXT: return isSetEnvironment_context(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -54654,6 +55099,15 @@ public boolean equals(get_schema_with_environment_context_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -54676,6 +55130,11 @@ public int hashCode() { if (present_environment_context) list.add(environment_context); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -54717,6 +55176,16 @@ public int compareTo(get_schema_with_environment_context_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -54760,6 +55229,14 @@ public String toString() { sb.append(this.environment_context); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -54831,6 +55308,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -54859,6 +55344,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en struct.environment_context.write(oprot); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -54886,7 +55376,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetEnvironment_context()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidWriteIdList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -54896,12 +55389,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetEnvironment_context()) { struct.environment_context.write(oprot); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -54915,6 +55411,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi struct.environment_context.read(iprot); struct.setEnvironment_contextIsSet(true); } + if (incoming.get(3)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -55478,14 +55978,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1210 = iprot.readListBegin(); - struct.success = new ArrayList(_list1210.size); - FieldSchema _elem1211; - for (int _i1212 = 0; _i1212 < _list1210.size; ++_i1212) + org.apache.thrift.protocol.TList _list1218 = iprot.readListBegin(); + struct.success = new ArrayList(_list1218.size); + FieldSchema _elem1219; + for (int _i1220 = 0; _i1220 < _list1218.size; ++_i1220) { - _elem1211 = new FieldSchema(); - _elem1211.read(iprot); - struct.success.add(_elem1211); + _elem1219 = new FieldSchema(); + _elem1219.read(iprot); + struct.success.add(_elem1219); } iprot.readListEnd(); } @@ -55538,9 +56038,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1213 : struct.success) + for (FieldSchema _iter1221 : struct.success) { - _iter1213.write(oprot); + _iter1221.write(oprot); } oprot.writeListEnd(); } @@ -55595,9 +56095,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1214 : struct.success) + for (FieldSchema _iter1222 : struct.success) { - _iter1214.write(oprot); + _iter1222.write(oprot); } } } @@ -55618,14 +56118,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1215 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1215.size); - FieldSchema _elem1216; - for (int _i1217 = 0; _i1217 < _list1215.size; ++_i1217) + org.apache.thrift.protocol.TList _list1223 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1223.size); + FieldSchema _elem1224; + for (int _i1225 = 0; _i1225 < _list1223.size; ++_i1225) { - _elem1216 = new FieldSchema(); - _elem1216.read(iprot); - struct.success.add(_elem1216); + _elem1224 = new FieldSchema(); + _elem1224.read(iprot); + struct.success.add(_elem1224); } } struct.setSuccessIsSet(true); @@ -58754,14 +59254,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 2: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1218 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list1218.size); - SQLPrimaryKey _elem1219; - for (int _i1220 = 0; _i1220 < _list1218.size; ++_i1220) + org.apache.thrift.protocol.TList _list1226 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list1226.size); + SQLPrimaryKey _elem1227; + for (int _i1228 = 0; _i1228 < _list1226.size; ++_i1228) { - _elem1219 = new SQLPrimaryKey(); - _elem1219.read(iprot); - struct.primaryKeys.add(_elem1219); + _elem1227 = new SQLPrimaryKey(); + _elem1227.read(iprot); + struct.primaryKeys.add(_elem1227); } iprot.readListEnd(); } @@ -58773,14 +59273,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 3: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1221 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list1221.size); - SQLForeignKey _elem1222; - for (int _i1223 = 0; _i1223 < _list1221.size; ++_i1223) + org.apache.thrift.protocol.TList _list1229 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list1229.size); + SQLForeignKey _elem1230; + for (int _i1231 = 0; _i1231 < _list1229.size; ++_i1231) { - _elem1222 = new SQLForeignKey(); - _elem1222.read(iprot); - struct.foreignKeys.add(_elem1222); + _elem1230 = new SQLForeignKey(); + _elem1230.read(iprot); + struct.foreignKeys.add(_elem1230); } iprot.readListEnd(); } @@ -58792,14 +59292,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 4: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1224 = iprot.readListBegin(); - struct.uniqueConstraints = new ArrayList(_list1224.size); - SQLUniqueConstraint _elem1225; - for (int _i1226 = 0; _i1226 < _list1224.size; ++_i1226) + org.apache.thrift.protocol.TList _list1232 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList(_list1232.size); + SQLUniqueConstraint _elem1233; + for (int _i1234 = 0; _i1234 < _list1232.size; ++_i1234) { - _elem1225 = new SQLUniqueConstraint(); - _elem1225.read(iprot); - struct.uniqueConstraints.add(_elem1225); + _elem1233 = new SQLUniqueConstraint(); + _elem1233.read(iprot); + struct.uniqueConstraints.add(_elem1233); } iprot.readListEnd(); } @@ -58811,14 +59311,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 5: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1227 = iprot.readListBegin(); - struct.notNullConstraints = new ArrayList(_list1227.size); - SQLNotNullConstraint _elem1228; - for (int _i1229 = 0; _i1229 < _list1227.size; ++_i1229) + org.apache.thrift.protocol.TList _list1235 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList(_list1235.size); + SQLNotNullConstraint _elem1236; + for (int _i1237 = 0; _i1237 < _list1235.size; ++_i1237) { - _elem1228 = new SQLNotNullConstraint(); - _elem1228.read(iprot); - struct.notNullConstraints.add(_elem1228); + _elem1236 = new SQLNotNullConstraint(); + _elem1236.read(iprot); + struct.notNullConstraints.add(_elem1236); } iprot.readListEnd(); } @@ -58830,14 +59330,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 6: // DEFAULT_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1230 = iprot.readListBegin(); - struct.defaultConstraints = new ArrayList(_list1230.size); - SQLDefaultConstraint _elem1231; - for (int _i1232 = 0; _i1232 < _list1230.size; ++_i1232) + org.apache.thrift.protocol.TList _list1238 = iprot.readListBegin(); + struct.defaultConstraints = new ArrayList(_list1238.size); + SQLDefaultConstraint _elem1239; + for (int _i1240 = 0; _i1240 < _list1238.size; ++_i1240) { - _elem1231 = new SQLDefaultConstraint(); - _elem1231.read(iprot); - struct.defaultConstraints.add(_elem1231); + _elem1239 = new SQLDefaultConstraint(); + _elem1239.read(iprot); + struct.defaultConstraints.add(_elem1239); } iprot.readListEnd(); } @@ -58849,14 +59349,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 7: // CHECK_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1233 = iprot.readListBegin(); - struct.checkConstraints = new ArrayList(_list1233.size); - SQLCheckConstraint _elem1234; - for (int _i1235 = 0; _i1235 < _list1233.size; ++_i1235) + org.apache.thrift.protocol.TList _list1241 = iprot.readListBegin(); + struct.checkConstraints = new ArrayList(_list1241.size); + SQLCheckConstraint _elem1242; + for (int _i1243 = 0; _i1243 < _list1241.size; ++_i1243) { - _elem1234 = new SQLCheckConstraint(); - _elem1234.read(iprot); - struct.checkConstraints.add(_elem1234); + _elem1242 = new SQLCheckConstraint(); + _elem1242.read(iprot); + struct.checkConstraints.add(_elem1242); } iprot.readListEnd(); } @@ -58887,9 +59387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter1236 : struct.primaryKeys) + for (SQLPrimaryKey _iter1244 : struct.primaryKeys) { - _iter1236.write(oprot); + _iter1244.write(oprot); } oprot.writeListEnd(); } @@ -58899,9 +59399,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter1237 : struct.foreignKeys) + for (SQLForeignKey _iter1245 : struct.foreignKeys) { - _iter1237.write(oprot); + _iter1245.write(oprot); } oprot.writeListEnd(); } @@ -58911,9 +59411,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter1238 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1246 : struct.uniqueConstraints) { - _iter1238.write(oprot); + _iter1246.write(oprot); } oprot.writeListEnd(); } @@ -58923,9 +59423,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter1239 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1247 : struct.notNullConstraints) { - _iter1239.write(oprot); + _iter1247.write(oprot); } oprot.writeListEnd(); } @@ -58935,9 +59435,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); - for (SQLDefaultConstraint _iter1240 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1248 : struct.defaultConstraints) { - _iter1240.write(oprot); + _iter1248.write(oprot); } oprot.writeListEnd(); } @@ -58947,9 +59447,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size())); - for (SQLCheckConstraint _iter1241 : struct.checkConstraints) + for (SQLCheckConstraint _iter1249 : struct.checkConstraints) { - _iter1241.write(oprot); + _iter1249.write(oprot); } oprot.writeListEnd(); } @@ -59001,54 +59501,54 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c if (struct.isSetPrimaryKeys()) { { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter1242 : struct.primaryKeys) + for (SQLPrimaryKey _iter1250 : struct.primaryKeys) { - _iter1242.write(oprot); + _iter1250.write(oprot); } } } if (struct.isSetForeignKeys()) { { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter1243 : struct.foreignKeys) + for (SQLForeignKey _iter1251 : struct.foreignKeys) { - _iter1243.write(oprot); + _iter1251.write(oprot); } } } if (struct.isSetUniqueConstraints()) { { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter1244 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1252 : struct.uniqueConstraints) { - _iter1244.write(oprot); + _iter1252.write(oprot); } } } if (struct.isSetNotNullConstraints()) { { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter1245 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1253 : struct.notNullConstraints) { - _iter1245.write(oprot); + _iter1253.write(oprot); } } } if (struct.isSetDefaultConstraints()) { { oprot.writeI32(struct.defaultConstraints.size()); - for (SQLDefaultConstraint _iter1246 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1254 : struct.defaultConstraints) { - _iter1246.write(oprot); + _iter1254.write(oprot); } } } if (struct.isSetCheckConstraints()) { { oprot.writeI32(struct.checkConstraints.size()); - for (SQLCheckConstraint _iter1247 : struct.checkConstraints) + for (SQLCheckConstraint _iter1255 : struct.checkConstraints) { - _iter1247.write(oprot); + _iter1255.write(oprot); } } } @@ -59065,84 +59565,84 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_co } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1248 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list1248.size); - SQLPrimaryKey _elem1249; - for (int _i1250 = 0; _i1250 < _list1248.size; ++_i1250) + org.apache.thrift.protocol.TList _list1256 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list1256.size); + SQLPrimaryKey _elem1257; + for (int _i1258 = 0; _i1258 < _list1256.size; ++_i1258) { - _elem1249 = new SQLPrimaryKey(); - _elem1249.read(iprot); - struct.primaryKeys.add(_elem1249); + _elem1257 = new SQLPrimaryKey(); + _elem1257.read(iprot); + struct.primaryKeys.add(_elem1257); } } struct.setPrimaryKeysIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1251 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list1251.size); - SQLForeignKey _elem1252; - for (int _i1253 = 0; _i1253 < _list1251.size; ++_i1253) + org.apache.thrift.protocol.TList _list1259 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list1259.size); + SQLForeignKey _elem1260; + for (int _i1261 = 0; _i1261 < _list1259.size; ++_i1261) { - _elem1252 = new SQLForeignKey(); - _elem1252.read(iprot); - struct.foreignKeys.add(_elem1252); + _elem1260 = new SQLForeignKey(); + _elem1260.read(iprot); + struct.foreignKeys.add(_elem1260); } } struct.setForeignKeysIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list1254 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraints = new ArrayList(_list1254.size); - SQLUniqueConstraint _elem1255; - for (int _i1256 = 0; _i1256 < _list1254.size; ++_i1256) + org.apache.thrift.protocol.TList _list1262 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraints = new ArrayList(_list1262.size); + SQLUniqueConstraint _elem1263; + for (int _i1264 = 0; _i1264 < _list1262.size; ++_i1264) { - _elem1255 = new SQLUniqueConstraint(); - _elem1255.read(iprot); - struct.uniqueConstraints.add(_elem1255); + _elem1263 = new SQLUniqueConstraint(); + _elem1263.read(iprot); + struct.uniqueConstraints.add(_elem1263); } } struct.setUniqueConstraintsIsSet(true); } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1257 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraints = new ArrayList(_list1257.size); - SQLNotNullConstraint _elem1258; - for (int _i1259 = 0; _i1259 < _list1257.size; ++_i1259) + org.apache.thrift.protocol.TList _list1265 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraints = new ArrayList(_list1265.size); + SQLNotNullConstraint _elem1266; + for (int _i1267 = 0; _i1267 < _list1265.size; ++_i1267) { - _elem1258 = new SQLNotNullConstraint(); - _elem1258.read(iprot); - struct.notNullConstraints.add(_elem1258); + _elem1266 = new SQLNotNullConstraint(); + _elem1266.read(iprot); + struct.notNullConstraints.add(_elem1266); } } struct.setNotNullConstraintsIsSet(true); } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1260 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.defaultConstraints = new ArrayList(_list1260.size); - SQLDefaultConstraint _elem1261; - for (int _i1262 = 0; _i1262 < _list1260.size; ++_i1262) + org.apache.thrift.protocol.TList _list1268 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraints = new ArrayList(_list1268.size); + SQLDefaultConstraint _elem1269; + for (int _i1270 = 0; _i1270 < _list1268.size; ++_i1270) { - _elem1261 = new SQLDefaultConstraint(); - _elem1261.read(iprot); - struct.defaultConstraints.add(_elem1261); + _elem1269 = new SQLDefaultConstraint(); + _elem1269.read(iprot); + struct.defaultConstraints.add(_elem1269); } } struct.setDefaultConstraintsIsSet(true); } if (incoming.get(6)) { { - org.apache.thrift.protocol.TList _list1263 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.checkConstraints = new ArrayList(_list1263.size); - SQLCheckConstraint _elem1264; - for (int _i1265 = 0; _i1265 < _list1263.size; ++_i1265) + org.apache.thrift.protocol.TList _list1271 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.checkConstraints = new ArrayList(_list1271.size); + SQLCheckConstraint _elem1272; + for (int _i1273 = 0; _i1273 < _list1271.size; ++_i1273) { - _elem1264 = new SQLCheckConstraint(); - _elem1264.read(iprot); - struct.checkConstraints.add(_elem1264); + _elem1272 = new SQLCheckConstraint(); + _elem1272.read(iprot); + struct.checkConstraints.add(_elem1272); } } struct.setCheckConstraintsIsSet(true); @@ -69333,13 +69833,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args case 3: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1266 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list1266.size); - String _elem1267; - for (int _i1268 = 0; _i1268 < _list1266.size; ++_i1268) + org.apache.thrift.protocol.TList _list1274 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list1274.size); + String _elem1275; + for (int _i1276 = 0; _i1276 < _list1274.size; ++_i1276) { - _elem1267 = iprot.readString(); - struct.partNames.add(_elem1267); + _elem1275 = iprot.readString(); + struct.partNames.add(_elem1275); } iprot.readListEnd(); } @@ -69375,9 +69875,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_arg oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter1269 : struct.partNames) + for (String _iter1277 : struct.partNames) { - oprot.writeString(_iter1269); + oprot.writeString(_iter1277); } oprot.writeListEnd(); } @@ -69420,9 +69920,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_args if (struct.isSetPartNames()) { { oprot.writeI32(struct.partNames.size()); - for (String _iter1270 : struct.partNames) + for (String _iter1278 : struct.partNames) { - oprot.writeString(_iter1270); + oprot.writeString(_iter1278); } } } @@ -69442,13 +69942,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1271 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list1271.size); - String _elem1272; - for (int _i1273 = 0; _i1273 < _list1271.size; ++_i1273) + org.apache.thrift.protocol.TList _list1279 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list1279.size); + String _elem1280; + for (int _i1281 = 0; _i1281 < _list1279.size; ++_i1281) { - _elem1272 = iprot.readString(); - struct.partNames.add(_elem1272); + _elem1280 = iprot.readString(); + struct.partNames.add(_elem1280); } } struct.setPartNamesIsSet(true); @@ -71505,13 +72005,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1274 = iprot.readListBegin(); - struct.success = new ArrayList(_list1274.size); - String _elem1275; - for (int _i1276 = 0; _i1276 < _list1274.size; ++_i1276) + org.apache.thrift.protocol.TList _list1282 = iprot.readListBegin(); + struct.success = new ArrayList(_list1282.size); + String _elem1283; + for (int _i1284 = 0; _i1284 < _list1282.size; ++_i1284) { - _elem1275 = iprot.readString(); - struct.success.add(_elem1275); + _elem1283 = iprot.readString(); + struct.success.add(_elem1283); } iprot.readListEnd(); } @@ -71546,9 +72046,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1277 : struct.success) + for (String _iter1285 : struct.success) { - oprot.writeString(_iter1277); + oprot.writeString(_iter1285); } oprot.writeListEnd(); } @@ -71587,9 +72087,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1278 : struct.success) + for (String _iter1286 : struct.success) { - oprot.writeString(_iter1278); + oprot.writeString(_iter1286); } } } @@ -71604,13 +72104,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1279 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1279.size); - String _elem1280; - for (int _i1281 = 0; _i1281 < _list1279.size; ++_i1281) + org.apache.thrift.protocol.TList _list1287 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1287.size); + String _elem1288; + for (int _i1289 = 0; _i1289 < _list1287.size; ++_i1289) { - _elem1280 = iprot.readString(); - struct.success.add(_elem1280); + _elem1288 = iprot.readString(); + struct.success.add(_elem1288); } } struct.setSuccessIsSet(true); @@ -72584,13 +73084,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1282 = iprot.readListBegin(); - struct.success = new ArrayList(_list1282.size); - String _elem1283; - for (int _i1284 = 0; _i1284 < _list1282.size; ++_i1284) + org.apache.thrift.protocol.TList _list1290 = iprot.readListBegin(); + struct.success = new ArrayList(_list1290.size); + String _elem1291; + for (int _i1292 = 0; _i1292 < _list1290.size; ++_i1292) { - _elem1283 = iprot.readString(); - struct.success.add(_elem1283); + _elem1291 = iprot.readString(); + struct.success.add(_elem1291); } iprot.readListEnd(); } @@ -72625,9 +73125,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1285 : struct.success) + for (String _iter1293 : struct.success) { - oprot.writeString(_iter1285); + oprot.writeString(_iter1293); } oprot.writeListEnd(); } @@ -72666,9 +73166,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1286 : struct.success) + for (String _iter1294 : struct.success) { - oprot.writeString(_iter1286); + oprot.writeString(_iter1294); } } } @@ -72683,13 +73183,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_r BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1287 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1287.size); - String _elem1288; - for (int _i1289 = 0; _i1289 < _list1287.size; ++_i1289) + org.apache.thrift.protocol.TList _list1295 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1295.size); + String _elem1296; + for (int _i1297 = 0; _i1297 < _list1295.size; ++_i1297) { - _elem1288 = iprot.readString(); - struct.success.add(_elem1288); + _elem1296 = iprot.readString(); + struct.success.add(_elem1296); } } struct.setSuccessIsSet(true); @@ -73346,14 +73846,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_materialize case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1290 = iprot.readListBegin(); - struct.success = new ArrayList
(_list1290.size); - Table _elem1291; - for (int _i1292 = 0; _i1292 < _list1290.size; ++_i1292) + org.apache.thrift.protocol.TList _list1298 = iprot.readListBegin(); + struct.success = new ArrayList
(_list1298.size); + Table _elem1299; + for (int _i1300 = 0; _i1300 < _list1298.size; ++_i1300) { - _elem1291 = new Table(); - _elem1291.read(iprot); - struct.success.add(_elem1291); + _elem1299 = new Table(); + _elem1299.read(iprot); + struct.success.add(_elem1299); } iprot.readListEnd(); } @@ -73388,9 +73888,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_materializ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter1293 : struct.success) + for (Table _iter1301 : struct.success) { - _iter1293.write(oprot); + _iter1301.write(oprot); } oprot.writeListEnd(); } @@ -73429,9 +73929,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_materialize if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter1294 : struct.success) + for (Table _iter1302 : struct.success) { - _iter1294.write(oprot); + _iter1302.write(oprot); } } } @@ -73446,14 +73946,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_materialized BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1295 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list1295.size); - Table _elem1296; - for (int _i1297 = 0; _i1297 < _list1295.size; ++_i1297) + org.apache.thrift.protocol.TList _list1303 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list1303.size); + Table _elem1304; + for (int _i1305 = 0; _i1305 < _list1303.size; ++_i1305) { - _elem1296 = new Table(); - _elem1296.read(iprot); - struct.success.add(_elem1296); + _elem1304 = new Table(); + _elem1304.read(iprot); + struct.success.add(_elem1304); } } struct.setSuccessIsSet(true); @@ -74219,13 +74719,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_vi case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1298 = iprot.readListBegin(); - struct.success = new ArrayList(_list1298.size); - String _elem1299; - for (int _i1300 = 0; _i1300 < _list1298.size; ++_i1300) + org.apache.thrift.protocol.TList _list1306 = iprot.readListBegin(); + struct.success = new ArrayList(_list1306.size); + String _elem1307; + for (int _i1308 = 0; _i1308 < _list1306.size; ++_i1308) { - _elem1299 = iprot.readString(); - struct.success.add(_elem1299); + _elem1307 = iprot.readString(); + struct.success.add(_elem1307); } iprot.readListEnd(); } @@ -74260,9 +74760,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_v oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1301 : struct.success) + for (String _iter1309 : struct.success) { - oprot.writeString(_iter1301); + oprot.writeString(_iter1309); } oprot.writeListEnd(); } @@ -74301,9 +74801,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_vi if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1302 : struct.success) + for (String _iter1310 : struct.success) { - oprot.writeString(_iter1302); + oprot.writeString(_iter1310); } } } @@ -74318,13 +74818,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_vie BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1303 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1303.size); - String _elem1304; - for (int _i1305 = 0; _i1305 < _list1303.size; ++_i1305) + org.apache.thrift.protocol.TList _list1311 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1311.size); + String _elem1312; + for (int _i1313 = 0; _i1313 < _list1311.size; ++_i1313) { - _elem1304 = iprot.readString(); - struct.success.add(_elem1304); + _elem1312 = iprot.readString(); + struct.success.add(_elem1312); } } struct.setSuccessIsSet(true); @@ -74829,13 +75329,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args case 3: // TBL_TYPES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1306 = iprot.readListBegin(); - struct.tbl_types = new ArrayList(_list1306.size); - String _elem1307; - for (int _i1308 = 0; _i1308 < _list1306.size; ++_i1308) + org.apache.thrift.protocol.TList _list1314 = iprot.readListBegin(); + struct.tbl_types = new ArrayList(_list1314.size); + String _elem1315; + for (int _i1316 = 0; _i1316 < _list1314.size; ++_i1316) { - _elem1307 = iprot.readString(); - struct.tbl_types.add(_elem1307); + _elem1315 = iprot.readString(); + struct.tbl_types.add(_elem1315); } iprot.readListEnd(); } @@ -74871,9 +75371,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_arg oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size())); - for (String _iter1309 : struct.tbl_types) + for (String _iter1317 : struct.tbl_types) { - oprot.writeString(_iter1309); + oprot.writeString(_iter1317); } oprot.writeListEnd(); } @@ -74916,9 +75416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args if (struct.isSetTbl_types()) { { oprot.writeI32(struct.tbl_types.size()); - for (String _iter1310 : struct.tbl_types) + for (String _iter1318 : struct.tbl_types) { - oprot.writeString(_iter1310); + oprot.writeString(_iter1318); } } } @@ -74938,13 +75438,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1311 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_types = new ArrayList(_list1311.size); - String _elem1312; - for (int _i1313 = 0; _i1313 < _list1311.size; ++_i1313) + org.apache.thrift.protocol.TList _list1319 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_types = new ArrayList(_list1319.size); + String _elem1320; + for (int _i1321 = 0; _i1321 < _list1319.size; ++_i1321) { - _elem1312 = iprot.readString(); - struct.tbl_types.add(_elem1312); + _elem1320 = iprot.readString(); + struct.tbl_types.add(_elem1320); } } struct.setTbl_typesIsSet(true); @@ -75350,14 +75850,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1314 = iprot.readListBegin(); - struct.success = new ArrayList(_list1314.size); - TableMeta _elem1315; - for (int _i1316 = 0; _i1316 < _list1314.size; ++_i1316) + org.apache.thrift.protocol.TList _list1322 = iprot.readListBegin(); + struct.success = new ArrayList(_list1322.size); + TableMeta _elem1323; + for (int _i1324 = 0; _i1324 < _list1322.size; ++_i1324) { - _elem1315 = new TableMeta(); - _elem1315.read(iprot); - struct.success.add(_elem1315); + _elem1323 = new TableMeta(); + _elem1323.read(iprot); + struct.success.add(_elem1323); } iprot.readListEnd(); } @@ -75392,9 +75892,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TableMeta _iter1317 : struct.success) + for (TableMeta _iter1325 : struct.success) { - _iter1317.write(oprot); + _iter1325.write(oprot); } oprot.writeListEnd(); } @@ -75433,9 +75933,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TableMeta _iter1318 : struct.success) + for (TableMeta _iter1326 : struct.success) { - _iter1318.write(oprot); + _iter1326.write(oprot); } } } @@ -75450,14 +75950,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1319 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1319.size); - TableMeta _elem1320; - for (int _i1321 = 0; _i1321 < _list1319.size; ++_i1321) + org.apache.thrift.protocol.TList _list1327 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1327.size); + TableMeta _elem1328; + for (int _i1329 = 0; _i1329 < _list1327.size; ++_i1329) { - _elem1320 = new TableMeta(); - _elem1320.read(iprot); - struct.success.add(_elem1320); + _elem1328 = new TableMeta(); + _elem1328.read(iprot); + struct.success.add(_elem1328); } } struct.setSuccessIsSet(true); @@ -76223,13 +76723,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1322 = iprot.readListBegin(); - struct.success = new ArrayList(_list1322.size); - String _elem1323; - for (int _i1324 = 0; _i1324 < _list1322.size; ++_i1324) + org.apache.thrift.protocol.TList _list1330 = iprot.readListBegin(); + struct.success = new ArrayList(_list1330.size); + String _elem1331; + for (int _i1332 = 0; _i1332 < _list1330.size; ++_i1332) { - _elem1323 = iprot.readString(); - struct.success.add(_elem1323); + _elem1331 = iprot.readString(); + struct.success.add(_elem1331); } iprot.readListEnd(); } @@ -76264,9 +76764,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1325 : struct.success) + for (String _iter1333 : struct.success) { - oprot.writeString(_iter1325); + oprot.writeString(_iter1333); } oprot.writeListEnd(); } @@ -76305,9 +76805,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1326 : struct.success) + for (String _iter1334 : struct.success) { - oprot.writeString(_iter1326); + oprot.writeString(_iter1334); } } } @@ -76322,13 +76822,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1327 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1327.size); - String _elem1328; - for (int _i1329 = 0; _i1329 < _list1327.size; ++_i1329) + org.apache.thrift.protocol.TList _list1335 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1335.size); + String _elem1336; + for (int _i1337 = 0; _i1337 < _list1335.size; ++_i1337) { - _elem1328 = iprot.readString(); - struct.success.add(_elem1328); + _elem1336 = iprot.readString(); + struct.success.add(_elem1336); } } struct.setSuccessIsSet(true); @@ -76348,6 +76848,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -76357,11 +76858,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul private String dbname; // required private String tbl_name; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DBNAME((short)1, "dbname"), - TBL_NAME((short)2, "tbl_name"); + TBL_NAME((short)2, "tbl_name"), + VALID_WRITE_ID_LIST((short)3, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -76380,6 +76883,8 @@ public static _Fields findByThriftId(int fieldId) { return DBNAME; case 2: // TBL_NAME return TBL_NAME; + case 3: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -76427,6 +76932,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_table_args.class, metaDataMap); } @@ -76436,11 +76943,13 @@ public get_table_args() { public get_table_args( String dbname, - String tbl_name) + String tbl_name, + String validWriteIdList) { this(); this.dbname = dbname; this.tbl_name = tbl_name; + this.validWriteIdList = validWriteIdList; } /** @@ -76453,6 +76962,9 @@ public get_table_args(get_table_args other) { if (other.isSetTbl_name()) { this.tbl_name = other.tbl_name; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public get_table_args deepCopy() { @@ -76463,6 +76975,7 @@ public get_table_args deepCopy() { public void clear() { this.dbname = null; this.tbl_name = null; + this.validWriteIdList = null; } public String getDbname() { @@ -76511,6 +77024,29 @@ public void setTbl_nameIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DBNAME: @@ -76529,6 +77065,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -76540,6 +77084,9 @@ public Object getFieldValue(_Fields field) { case TBL_NAME: return getTbl_name(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -76555,6 +77102,8 @@ public boolean isSet(_Fields field) { return isSetDbname(); case TBL_NAME: return isSetTbl_name(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -76590,6 +77139,15 @@ public boolean equals(get_table_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -76607,6 +77165,11 @@ public int hashCode() { if (present_tbl_name) list.add(tbl_name); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -76638,6 +77201,16 @@ public int compareTo(get_table_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -76673,6 +77246,14 @@ public String toString() { sb.append(this.tbl_name); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -76732,6 +77313,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_args stru org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -76755,6 +77344,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_args str oprot.writeString(struct.tbl_name); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -76779,19 +77373,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_args stru if (struct.isSetTbl_name()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetValidWriteIdList()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetDbname()) { oprot.writeString(struct.dbname); } if (struct.isSetTbl_name()) { oprot.writeString(struct.tbl_name); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_table_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.dbname = iprot.readString(); struct.setDbnameIsSet(true); @@ -76800,6 +77400,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_args struc struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); } + if (incoming.get(2)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -77781,13 +78385,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1330 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list1330.size); - String _elem1331; - for (int _i1332 = 0; _i1332 < _list1330.size; ++_i1332) + org.apache.thrift.protocol.TList _list1338 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1338.size); + String _elem1339; + for (int _i1340 = 0; _i1340 < _list1338.size; ++_i1340) { - _elem1331 = iprot.readString(); - struct.tbl_names.add(_elem1331); + _elem1339 = iprot.readString(); + struct.tbl_names.add(_elem1339); } iprot.readListEnd(); } @@ -77818,9 +78422,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter1333 : struct.tbl_names) + for (String _iter1341 : struct.tbl_names) { - oprot.writeString(_iter1333); + oprot.writeString(_iter1341); } oprot.writeListEnd(); } @@ -77857,9 +78461,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter1334 : struct.tbl_names) + for (String _iter1342 : struct.tbl_names) { - oprot.writeString(_iter1334); + oprot.writeString(_iter1342); } } } @@ -77875,13 +78479,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1335 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list1335.size); - String _elem1336; - for (int _i1337 = 0; _i1337 < _list1335.size; ++_i1337) + org.apache.thrift.protocol.TList _list1343 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1343.size); + String _elem1344; + for (int _i1345 = 0; _i1345 < _list1343.size; ++_i1345) { - _elem1336 = iprot.readString(); - struct.tbl_names.add(_elem1336); + _elem1344 = iprot.readString(); + struct.tbl_names.add(_elem1344); } } struct.setTbl_namesIsSet(true); @@ -78206,14 +78810,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1338 = iprot.readListBegin(); - struct.success = new ArrayList
(_list1338.size); - Table _elem1339; - for (int _i1340 = 0; _i1340 < _list1338.size; ++_i1340) + org.apache.thrift.protocol.TList _list1346 = iprot.readListBegin(); + struct.success = new ArrayList
(_list1346.size); + Table _elem1347; + for (int _i1348 = 0; _i1348 < _list1346.size; ++_i1348) { - _elem1339 = new Table(); - _elem1339.read(iprot); - struct.success.add(_elem1339); + _elem1347 = new Table(); + _elem1347.read(iprot); + struct.success.add(_elem1347); } iprot.readListEnd(); } @@ -78239,9 +78843,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter1341 : struct.success) + for (Table _iter1349 : struct.success) { - _iter1341.write(oprot); + _iter1349.write(oprot); } oprot.writeListEnd(); } @@ -78272,9 +78876,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter1342 : struct.success) + for (Table _iter1350 : struct.success) { - _iter1342.write(oprot); + _iter1350.write(oprot); } } } @@ -78286,14 +78890,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1343 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list1343.size); - Table _elem1344; - for (int _i1345 = 0; _i1345 < _list1343.size; ++_i1345) + org.apache.thrift.protocol.TList _list1351 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list1351.size); + Table _elem1352; + for (int _i1353 = 0; _i1353 < _list1351.size; ++_i1353) { - _elem1344 = new Table(); - _elem1344.read(iprot); - struct.success.add(_elem1344); + _elem1352 = new Table(); + _elem1352.read(iprot); + struct.success.add(_elem1352); } } struct.setSuccessIsSet(true); @@ -79062,14 +79666,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_ext_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1346 = iprot.readListBegin(); - struct.success = new ArrayList(_list1346.size); - ExtendedTableInfo _elem1347; - for (int _i1348 = 0; _i1348 < _list1346.size; ++_i1348) + org.apache.thrift.protocol.TList _list1354 = iprot.readListBegin(); + struct.success = new ArrayList(_list1354.size); + ExtendedTableInfo _elem1355; + for (int _i1356 = 0; _i1356 < _list1354.size; ++_i1356) { - _elem1347 = new ExtendedTableInfo(); - _elem1347.read(iprot); - struct.success.add(_elem1347); + _elem1355 = new ExtendedTableInfo(); + _elem1355.read(iprot); + struct.success.add(_elem1355); } iprot.readListEnd(); } @@ -79104,9 +79708,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_ext_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (ExtendedTableInfo _iter1349 : struct.success) + for (ExtendedTableInfo _iter1357 : struct.success) { - _iter1349.write(oprot); + _iter1357.write(oprot); } oprot.writeListEnd(); } @@ -79145,9 +79749,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_ext_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (ExtendedTableInfo _iter1350 : struct.success) + for (ExtendedTableInfo _iter1358 : struct.success) { - _iter1350.write(oprot); + _iter1358.write(oprot); } } } @@ -79162,14 +79766,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_ext_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1351 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1351.size); - ExtendedTableInfo _elem1352; - for (int _i1353 = 0; _i1353 < _list1351.size; ++_i1353) + org.apache.thrift.protocol.TList _list1359 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1359.size); + ExtendedTableInfo _elem1360; + for (int _i1361 = 0; _i1361 < _list1359.size; ++_i1361) { - _elem1352 = new ExtendedTableInfo(); - _elem1352.read(iprot); - struct.success.add(_elem1352); + _elem1360 = new ExtendedTableInfo(); + _elem1360.read(iprot); + struct.success.add(_elem1360); } } struct.setSuccessIsSet(true); @@ -84682,13 +85286,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1354 = iprot.readListBegin(); - struct.success = new ArrayList(_list1354.size); - String _elem1355; - for (int _i1356 = 0; _i1356 < _list1354.size; ++_i1356) + org.apache.thrift.protocol.TList _list1362 = iprot.readListBegin(); + struct.success = new ArrayList(_list1362.size); + String _elem1363; + for (int _i1364 = 0; _i1364 < _list1362.size; ++_i1364) { - _elem1355 = iprot.readString(); - struct.success.add(_elem1355); + _elem1363 = iprot.readString(); + struct.success.add(_elem1363); } iprot.readListEnd(); } @@ -84741,9 +85345,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1357 : struct.success) + for (String _iter1365 : struct.success) { - oprot.writeString(_iter1357); + oprot.writeString(_iter1365); } oprot.writeListEnd(); } @@ -84798,9 +85402,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1358 : struct.success) + for (String _iter1366 : struct.success) { - oprot.writeString(_iter1358); + oprot.writeString(_iter1366); } } } @@ -84821,13 +85425,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1359 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1359.size); - String _elem1360; - for (int _i1361 = 0; _i1361 < _list1359.size; ++_i1361) + org.apache.thrift.protocol.TList _list1367 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1367.size); + String _elem1368; + for (int _i1369 = 0; _i1369 < _list1367.size; ++_i1369) { - _elem1360 = iprot.readString(); - struct.success.add(_elem1360); + _elem1368 = iprot.readString(); + struct.success.add(_elem1368); } } struct.setSuccessIsSet(true); @@ -91624,14 +92228,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1362 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1362.size); - Partition _elem1363; - for (int _i1364 = 0; _i1364 < _list1362.size; ++_i1364) + org.apache.thrift.protocol.TList _list1370 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1370.size); + Partition _elem1371; + for (int _i1372 = 0; _i1372 < _list1370.size; ++_i1372) { - _elem1363 = new Partition(); - _elem1363.read(iprot); - struct.new_parts.add(_elem1363); + _elem1371 = new Partition(); + _elem1371.read(iprot); + struct.new_parts.add(_elem1371); } iprot.readListEnd(); } @@ -91657,9 +92261,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1365 : struct.new_parts) + for (Partition _iter1373 : struct.new_parts) { - _iter1365.write(oprot); + _iter1373.write(oprot); } oprot.writeListEnd(); } @@ -91690,9 +92294,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1366 : struct.new_parts) + for (Partition _iter1374 : struct.new_parts) { - _iter1366.write(oprot); + _iter1374.write(oprot); } } } @@ -91704,14 +92308,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1367 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1367.size); - Partition _elem1368; - for (int _i1369 = 0; _i1369 < _list1367.size; ++_i1369) + org.apache.thrift.protocol.TList _list1375 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1375.size); + Partition _elem1376; + for (int _i1377 = 0; _i1377 < _list1375.size; ++_i1377) { - _elem1368 = new Partition(); - _elem1368.read(iprot); - struct.new_parts.add(_elem1368); + _elem1376 = new Partition(); + _elem1376.read(iprot); + struct.new_parts.add(_elem1376); } } struct.setNew_partsIsSet(true); @@ -92712,14 +93316,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1370 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1370.size); - PartitionSpec _elem1371; - for (int _i1372 = 0; _i1372 < _list1370.size; ++_i1372) + org.apache.thrift.protocol.TList _list1378 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1378.size); + PartitionSpec _elem1379; + for (int _i1380 = 0; _i1380 < _list1378.size; ++_i1380) { - _elem1371 = new PartitionSpec(); - _elem1371.read(iprot); - struct.new_parts.add(_elem1371); + _elem1379 = new PartitionSpec(); + _elem1379.read(iprot); + struct.new_parts.add(_elem1379); } iprot.readListEnd(); } @@ -92745,9 +93349,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (PartitionSpec _iter1373 : struct.new_parts) + for (PartitionSpec _iter1381 : struct.new_parts) { - _iter1373.write(oprot); + _iter1381.write(oprot); } oprot.writeListEnd(); } @@ -92778,9 +93382,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (PartitionSpec _iter1374 : struct.new_parts) + for (PartitionSpec _iter1382 : struct.new_parts) { - _iter1374.write(oprot); + _iter1382.write(oprot); } } } @@ -92792,14 +93396,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1375 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1375.size); - PartitionSpec _elem1376; - for (int _i1377 = 0; _i1377 < _list1375.size; ++_i1377) + org.apache.thrift.protocol.TList _list1383 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1383.size); + PartitionSpec _elem1384; + for (int _i1385 = 0; _i1385 < _list1383.size; ++_i1385) { - _elem1376 = new PartitionSpec(); - _elem1376.read(iprot); - struct.new_parts.add(_elem1376); + _elem1384 = new PartitionSpec(); + _elem1384.read(iprot); + struct.new_parts.add(_elem1384); } } struct.setNew_partsIsSet(true); @@ -93975,13 +94579,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1378 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1378.size); - String _elem1379; - for (int _i1380 = 0; _i1380 < _list1378.size; ++_i1380) + org.apache.thrift.protocol.TList _list1386 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1386.size); + String _elem1387; + for (int _i1388 = 0; _i1388 < _list1386.size; ++_i1388) { - _elem1379 = iprot.readString(); - struct.part_vals.add(_elem1379); + _elem1387 = iprot.readString(); + struct.part_vals.add(_elem1387); } iprot.readListEnd(); } @@ -94017,9 +94621,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1381 : struct.part_vals) + for (String _iter1389 : struct.part_vals) { - oprot.writeString(_iter1381); + oprot.writeString(_iter1389); } oprot.writeListEnd(); } @@ -94062,9 +94666,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1382 : struct.part_vals) + for (String _iter1390 : struct.part_vals) { - oprot.writeString(_iter1382); + oprot.writeString(_iter1390); } } } @@ -94084,13 +94688,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1383 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1383.size); - String _elem1384; - for (int _i1385 = 0; _i1385 < _list1383.size; ++_i1385) + org.apache.thrift.protocol.TList _list1391 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1391.size); + String _elem1392; + for (int _i1393 = 0; _i1393 < _list1391.size; ++_i1393) { - _elem1384 = iprot.readString(); - struct.part_vals.add(_elem1384); + _elem1392 = iprot.readString(); + struct.part_vals.add(_elem1392); } } struct.setPart_valsIsSet(true); @@ -96399,13 +97003,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1386 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1386.size); - String _elem1387; - for (int _i1388 = 0; _i1388 < _list1386.size; ++_i1388) + org.apache.thrift.protocol.TList _list1394 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1394.size); + String _elem1395; + for (int _i1396 = 0; _i1396 < _list1394.size; ++_i1396) { - _elem1387 = iprot.readString(); - struct.part_vals.add(_elem1387); + _elem1395 = iprot.readString(); + struct.part_vals.add(_elem1395); } iprot.readListEnd(); } @@ -96450,9 +97054,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1389 : struct.part_vals) + for (String _iter1397 : struct.part_vals) { - oprot.writeString(_iter1389); + oprot.writeString(_iter1397); } oprot.writeListEnd(); } @@ -96503,9 +97107,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1390 : struct.part_vals) + for (String _iter1398 : struct.part_vals) { - oprot.writeString(_iter1390); + oprot.writeString(_iter1398); } } } @@ -96528,13 +97132,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1391 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1391.size); - String _elem1392; - for (int _i1393 = 0; _i1393 < _list1391.size; ++_i1393) + org.apache.thrift.protocol.TList _list1399 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1399.size); + String _elem1400; + for (int _i1401 = 0; _i1401 < _list1399.size; ++_i1401) { - _elem1392 = iprot.readString(); - struct.part_vals.add(_elem1392); + _elem1400 = iprot.readString(); + struct.part_vals.add(_elem1400); } } struct.setPart_valsIsSet(true); @@ -100404,13 +101008,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1394 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1394.size); - String _elem1395; - for (int _i1396 = 0; _i1396 < _list1394.size; ++_i1396) + org.apache.thrift.protocol.TList _list1402 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1402.size); + String _elem1403; + for (int _i1404 = 0; _i1404 < _list1402.size; ++_i1404) { - _elem1395 = iprot.readString(); - struct.part_vals.add(_elem1395); + _elem1403 = iprot.readString(); + struct.part_vals.add(_elem1403); } iprot.readListEnd(); } @@ -100454,9 +101058,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1397 : struct.part_vals) + for (String _iter1405 : struct.part_vals) { - oprot.writeString(_iter1397); + oprot.writeString(_iter1405); } oprot.writeListEnd(); } @@ -100505,9 +101109,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1398 : struct.part_vals) + for (String _iter1406 : struct.part_vals) { - oprot.writeString(_iter1398); + oprot.writeString(_iter1406); } } } @@ -100530,13 +101134,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1399 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1399.size); - String _elem1400; - for (int _i1401 = 0; _i1401 < _list1399.size; ++_i1401) + org.apache.thrift.protocol.TList _list1407 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1407.size); + String _elem1408; + for (int _i1409 = 0; _i1409 < _list1407.size; ++_i1409) { - _elem1400 = iprot.readString(); - struct.part_vals.add(_elem1400); + _elem1408 = iprot.readString(); + struct.part_vals.add(_elem1408); } } struct.setPart_valsIsSet(true); @@ -101775,13 +102379,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1402 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1402.size); - String _elem1403; - for (int _i1404 = 0; _i1404 < _list1402.size; ++_i1404) + org.apache.thrift.protocol.TList _list1410 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1410.size); + String _elem1411; + for (int _i1412 = 0; _i1412 < _list1410.size; ++_i1412) { - _elem1403 = iprot.readString(); - struct.part_vals.add(_elem1403); + _elem1411 = iprot.readString(); + struct.part_vals.add(_elem1411); } iprot.readListEnd(); } @@ -101834,9 +102438,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1405 : struct.part_vals) + for (String _iter1413 : struct.part_vals) { - oprot.writeString(_iter1405); + oprot.writeString(_iter1413); } oprot.writeListEnd(); } @@ -101893,9 +102497,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1406 : struct.part_vals) + for (String _iter1414 : struct.part_vals) { - oprot.writeString(_iter1406); + oprot.writeString(_iter1414); } } } @@ -101921,13 +102525,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1407 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1407.size); - String _elem1408; - for (int _i1409 = 0; _i1409 < _list1407.size; ++_i1409) + org.apache.thrift.protocol.TList _list1415 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1415.size); + String _elem1416; + for (int _i1417 = 0; _i1417 < _list1415.size; ++_i1417) { - _elem1408 = iprot.readString(); - struct.part_vals.add(_elem1408); + _elem1416 = iprot.readString(); + struct.part_vals.add(_elem1416); } } struct.setPart_valsIsSet(true); @@ -106045,6 +106649,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partitions_req_ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("part_vals", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -106055,12 +106660,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partitions_req_ private String db_name; // required private String tbl_name; // required private List part_vals; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - PART_VALS((short)3, "part_vals"); + PART_VALS((short)3, "part_vals"), + VALID_TXN_LIST((short)4, "validTxnList"); private static final Map byName = new HashMap(); @@ -106081,6 +106688,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // PART_VALS return PART_VALS; + case 4: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -106131,6 +106740,8 @@ public String getFieldName() { tmpMap.put(_Fields.PART_VALS, new org.apache.thrift.meta_data.FieldMetaData("part_vals", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_args.class, metaDataMap); } @@ -106141,12 +106752,14 @@ public get_partition_args() { public get_partition_args( String db_name, String tbl_name, - List part_vals) + List part_vals, + String validTxnList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.part_vals = part_vals; + this.validTxnList = validTxnList; } /** @@ -106163,6 +106776,9 @@ public get_partition_args(get_partition_args other) { List __this__part_vals = new ArrayList(other.part_vals); this.part_vals = __this__part_vals; } + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partition_args deepCopy() { @@ -106174,6 +106790,7 @@ public void clear() { this.db_name = null; this.tbl_name = null; this.part_vals = null; + this.validTxnList = null; } public String getDb_name() { @@ -106260,6 +106877,29 @@ public void setPart_valsIsSet(boolean value) { } } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -106286,6 +106926,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -106300,6 +106948,9 @@ public Object getFieldValue(_Fields field) { case PART_VALS: return getPart_vals(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -106317,6 +106968,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case PART_VALS: return isSetPart_vals(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -106361,6 +107014,15 @@ public boolean equals(get_partition_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -106383,6 +107045,11 @@ public int hashCode() { if (present_part_vals) list.add(part_vals); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -106424,6 +107091,16 @@ public int compareTo(get_partition_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -106467,6 +107144,14 @@ public String toString() { sb.append(this.part_vals); } first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -106529,13 +107214,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1410 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1410.size); - String _elem1411; - for (int _i1412 = 0; _i1412 < _list1410.size; ++_i1412) + org.apache.thrift.protocol.TList _list1418 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1418.size); + String _elem1419; + for (int _i1420 = 0; _i1420 < _list1418.size; ++_i1420) { - _elem1411 = iprot.readString(); - struct.part_vals.add(_elem1411); + _elem1419 = iprot.readString(); + struct.part_vals.add(_elem1419); } iprot.readListEnd(); } @@ -106544,6 +107229,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -106571,14 +107264,19 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1413 : struct.part_vals) + for (String _iter1421 : struct.part_vals) { - oprot.writeString(_iter1413); + oprot.writeString(_iter1421); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -106606,7 +107304,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidTxnList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -106616,18 +107317,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1414 : struct.part_vals) + for (String _iter1422 : struct.part_vals) { - oprot.writeString(_iter1414); + oprot.writeString(_iter1422); } } } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -106638,17 +107342,21 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1415 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1415.size); - String _elem1416; - for (int _i1417 = 0; _i1417 < _list1415.size; ++_i1417) + org.apache.thrift.protocol.TList _list1423 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1423.size); + String _elem1424; + for (int _i1425 = 0; _i1425 < _list1423.size; ++_i1425) { - _elem1416 = iprot.readString(); - struct.part_vals.add(_elem1416); + _elem1424 = iprot.readString(); + struct.part_vals.add(_elem1424); } } struct.setPart_valsIsSet(true); } + if (incoming.get(3)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -107862,15 +108570,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1418 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1418.size); - String _key1419; - String _val1420; - for (int _i1421 = 0; _i1421 < _map1418.size; ++_i1421) + org.apache.thrift.protocol.TMap _map1426 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1426.size); + String _key1427; + String _val1428; + for (int _i1429 = 0; _i1429 < _map1426.size; ++_i1429) { - _key1419 = iprot.readString(); - _val1420 = iprot.readString(); - struct.partitionSpecs.put(_key1419, _val1420); + _key1427 = iprot.readString(); + _val1428 = iprot.readString(); + struct.partitionSpecs.put(_key1427, _val1428); } iprot.readMapEnd(); } @@ -107928,10 +108636,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1422 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1430 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1422.getKey()); - oprot.writeString(_iter1422.getValue()); + oprot.writeString(_iter1430.getKey()); + oprot.writeString(_iter1430.getValue()); } oprot.writeMapEnd(); } @@ -107994,10 +108702,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1423 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1431 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1423.getKey()); - oprot.writeString(_iter1423.getValue()); + oprot.writeString(_iter1431.getKey()); + oprot.writeString(_iter1431.getValue()); } } } @@ -108021,15 +108729,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1424 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1424.size); - String _key1425; - String _val1426; - for (int _i1427 = 0; _i1427 < _map1424.size; ++_i1427) + org.apache.thrift.protocol.TMap _map1432 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1432.size); + String _key1433; + String _val1434; + for (int _i1435 = 0; _i1435 < _map1432.size; ++_i1435) { - _key1425 = iprot.readString(); - _val1426 = iprot.readString(); - struct.partitionSpecs.put(_key1425, _val1426); + _key1433 = iprot.readString(); + _val1434 = iprot.readString(); + struct.partitionSpecs.put(_key1433, _val1434); } } struct.setPartitionSpecsIsSet(true); @@ -109475,15 +110183,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1428 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1428.size); - String _key1429; - String _val1430; - for (int _i1431 = 0; _i1431 < _map1428.size; ++_i1431) + org.apache.thrift.protocol.TMap _map1436 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1436.size); + String _key1437; + String _val1438; + for (int _i1439 = 0; _i1439 < _map1436.size; ++_i1439) { - _key1429 = iprot.readString(); - _val1430 = iprot.readString(); - struct.partitionSpecs.put(_key1429, _val1430); + _key1437 = iprot.readString(); + _val1438 = iprot.readString(); + struct.partitionSpecs.put(_key1437, _val1438); } iprot.readMapEnd(); } @@ -109541,10 +110249,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1432 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1440 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1432.getKey()); - oprot.writeString(_iter1432.getValue()); + oprot.writeString(_iter1440.getKey()); + oprot.writeString(_iter1440.getValue()); } oprot.writeMapEnd(); } @@ -109607,10 +110315,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1433 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1441 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1433.getKey()); - oprot.writeString(_iter1433.getValue()); + oprot.writeString(_iter1441.getKey()); + oprot.writeString(_iter1441.getValue()); } } } @@ -109634,15 +110342,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1434 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1434.size); - String _key1435; - String _val1436; - for (int _i1437 = 0; _i1437 < _map1434.size; ++_i1437) + org.apache.thrift.protocol.TMap _map1442 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1442.size); + String _key1443; + String _val1444; + for (int _i1445 = 0; _i1445 < _map1442.size; ++_i1445) { - _key1435 = iprot.readString(); - _val1436 = iprot.readString(); - struct.partitionSpecs.put(_key1435, _val1436); + _key1443 = iprot.readString(); + _val1444 = iprot.readString(); + struct.partitionSpecs.put(_key1443, _val1444); } } struct.setPartitionSpecsIsSet(true); @@ -110307,14 +111015,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1438 = iprot.readListBegin(); - struct.success = new ArrayList(_list1438.size); - Partition _elem1439; - for (int _i1440 = 0; _i1440 < _list1438.size; ++_i1440) + org.apache.thrift.protocol.TList _list1446 = iprot.readListBegin(); + struct.success = new ArrayList(_list1446.size); + Partition _elem1447; + for (int _i1448 = 0; _i1448 < _list1446.size; ++_i1448) { - _elem1439 = new Partition(); - _elem1439.read(iprot); - struct.success.add(_elem1439); + _elem1447 = new Partition(); + _elem1447.read(iprot); + struct.success.add(_elem1447); } iprot.readListEnd(); } @@ -110376,9 +111084,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1441 : struct.success) + for (Partition _iter1449 : struct.success) { - _iter1441.write(oprot); + _iter1449.write(oprot); } oprot.writeListEnd(); } @@ -110441,9 +111149,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1442 : struct.success) + for (Partition _iter1450 : struct.success) { - _iter1442.write(oprot); + _iter1450.write(oprot); } } } @@ -110467,14 +111175,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1443 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1443.size); - Partition _elem1444; - for (int _i1445 = 0; _i1445 < _list1443.size; ++_i1445) + org.apache.thrift.protocol.TList _list1451 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1451.size); + Partition _elem1452; + for (int _i1453 = 0; _i1453 < _list1451.size; ++_i1453) { - _elem1444 = new Partition(); - _elem1444.read(iprot); - struct.success.add(_elem1444); + _elem1452 = new Partition(); + _elem1452.read(iprot); + struct.success.add(_elem1452); } } struct.setSuccessIsSet(true); @@ -110512,6 +111220,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("part_vals", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField USER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("user_name", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField GROUP_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("group_names", org.apache.thrift.protocol.TType.LIST, (short)5); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -110524,6 +111233,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ private List part_vals; // required private String user_name; // required private List group_names; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -110531,7 +111241,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ TBL_NAME((short)2, "tbl_name"), PART_VALS((short)3, "part_vals"), USER_NAME((short)4, "user_name"), - GROUP_NAMES((short)5, "group_names"); + GROUP_NAMES((short)5, "group_names"), + VALID_TXN_LIST((short)6, "validTxnList"); private static final Map byName = new HashMap(); @@ -110556,6 +111267,8 @@ public static _Fields findByThriftId(int fieldId) { return USER_NAME; case 5: // GROUP_NAMES return GROUP_NAMES; + case 6: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -110611,6 +111324,8 @@ public String getFieldName() { tmpMap.put(_Fields.GROUP_NAMES, new org.apache.thrift.meta_data.FieldMetaData("group_names", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_with_auth_args.class, metaDataMap); } @@ -110623,7 +111338,8 @@ public get_partition_with_auth_args( String tbl_name, List part_vals, String user_name, - List group_names) + List group_names, + String validTxnList) { this(); this.db_name = db_name; @@ -110631,6 +111347,7 @@ public get_partition_with_auth_args( this.part_vals = part_vals; this.user_name = user_name; this.group_names = group_names; + this.validTxnList = validTxnList; } /** @@ -110654,6 +111371,9 @@ public get_partition_with_auth_args(get_partition_with_auth_args other) { List __this__group_names = new ArrayList(other.group_names); this.group_names = __this__group_names; } + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partition_with_auth_args deepCopy() { @@ -110667,6 +111387,7 @@ public void clear() { this.part_vals = null; this.user_name = null; this.group_names = null; + this.validTxnList = null; } public String getDb_name() { @@ -110814,6 +111535,29 @@ public void setGroup_namesIsSet(boolean value) { } } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -110856,6 +111600,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -110876,6 +111628,9 @@ public Object getFieldValue(_Fields field) { case GROUP_NAMES: return getGroup_names(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -110897,6 +111652,8 @@ public boolean isSet(_Fields field) { return isSetUser_name(); case GROUP_NAMES: return isSetGroup_names(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -110959,6 +111716,15 @@ public boolean equals(get_partition_with_auth_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -110991,6 +111757,11 @@ public int hashCode() { if (present_group_names) list.add(group_names); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -111052,6 +111823,16 @@ public int compareTo(get_partition_with_auth_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -111111,6 +111892,14 @@ public String toString() { sb.append(this.group_names); } first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -111173,13 +111962,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1446 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1446.size); - String _elem1447; - for (int _i1448 = 0; _i1448 < _list1446.size; ++_i1448) + org.apache.thrift.protocol.TList _list1454 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1454.size); + String _elem1455; + for (int _i1456 = 0; _i1456 < _list1454.size; ++_i1456) { - _elem1447 = iprot.readString(); - struct.part_vals.add(_elem1447); + _elem1455 = iprot.readString(); + struct.part_vals.add(_elem1455); } iprot.readListEnd(); } @@ -111199,13 +111988,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1449 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1449.size); - String _elem1450; - for (int _i1451 = 0; _i1451 < _list1449.size; ++_i1451) + org.apache.thrift.protocol.TList _list1457 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1457.size); + String _elem1458; + for (int _i1459 = 0; _i1459 < _list1457.size; ++_i1459) { - _elem1450 = iprot.readString(); - struct.group_names.add(_elem1450); + _elem1458 = iprot.readString(); + struct.group_names.add(_elem1458); } iprot.readListEnd(); } @@ -111214,6 +112003,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -111241,9 +112038,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1452 : struct.part_vals) + for (String _iter1460 : struct.part_vals) { - oprot.writeString(_iter1452); + oprot.writeString(_iter1460); } oprot.writeListEnd(); } @@ -111258,14 +112055,19 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1453 : struct.group_names) + for (String _iter1461 : struct.group_names) { - oprot.writeString(_iter1453); + oprot.writeString(_iter1461); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -111299,7 +112101,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { optionals.set(4); } - oprot.writeBitSet(optionals, 5); + if (struct.isSetValidTxnList()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -111309,9 +112114,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1454 : struct.part_vals) + for (String _iter1462 : struct.part_vals) { - oprot.writeString(_iter1454); + oprot.writeString(_iter1462); } } } @@ -111321,18 +112126,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1455 : struct.group_names) + for (String _iter1463 : struct.group_names) { - oprot.writeString(_iter1455); + oprot.writeString(_iter1463); } } } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_auth_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -111343,13 +112151,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1456 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1456.size); - String _elem1457; - for (int _i1458 = 0; _i1458 < _list1456.size; ++_i1458) + org.apache.thrift.protocol.TList _list1464 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1464.size); + String _elem1465; + for (int _i1466 = 0; _i1466 < _list1464.size; ++_i1466) { - _elem1457 = iprot.readString(); - struct.part_vals.add(_elem1457); + _elem1465 = iprot.readString(); + struct.part_vals.add(_elem1465); } } struct.setPart_valsIsSet(true); @@ -111360,17 +112168,21 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1459 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1459.size); - String _elem1460; - for (int _i1461 = 0; _i1461 < _list1459.size; ++_i1461) + org.apache.thrift.protocol.TList _list1467 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1467.size); + String _elem1468; + for (int _i1469 = 0; _i1469 < _list1467.size; ++_i1469) { - _elem1460 = iprot.readString(); - struct.group_names.add(_elem1460); + _elem1468 = iprot.readString(); + struct.group_names.add(_elem1468); } } struct.setGroup_namesIsSet(true); } + if (incoming.get(5)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -111957,6 +112769,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField PART_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("part_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -111967,12 +112780,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a private String db_name; // required private String tbl_name; // required private String part_name; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - PART_NAME((short)3, "part_name"); + PART_NAME((short)3, "part_name"), + VALID_TXN_LIST((short)4, "validTxnList"); private static final Map byName = new HashMap(); @@ -111993,6 +112808,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // PART_NAME return PART_NAME; + case 4: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -112042,6 +112859,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.PART_NAME, new org.apache.thrift.meta_data.FieldMetaData("part_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_by_name_args.class, metaDataMap); } @@ -112052,12 +112871,14 @@ public get_partition_by_name_args() { public get_partition_by_name_args( String db_name, String tbl_name, - String part_name) + String part_name, + String validTxnList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.part_name = part_name; + this.validTxnList = validTxnList; } /** @@ -112073,6 +112894,9 @@ public get_partition_by_name_args(get_partition_by_name_args other) { if (other.isSetPart_name()) { this.part_name = other.part_name; } + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partition_by_name_args deepCopy() { @@ -112084,6 +112908,7 @@ public void clear() { this.db_name = null; this.tbl_name = null; this.part_name = null; + this.validTxnList = null; } public String getDb_name() { @@ -112155,6 +112980,29 @@ public void setPart_nameIsSet(boolean value) { } } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -112181,6 +113029,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -112195,6 +113051,9 @@ public Object getFieldValue(_Fields field) { case PART_NAME: return getPart_name(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -112212,6 +113071,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case PART_NAME: return isSetPart_name(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -112256,6 +113117,15 @@ public boolean equals(get_partition_by_name_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -112278,6 +113148,11 @@ public int hashCode() { if (present_part_name) list.add(part_name); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -112319,6 +113194,16 @@ public int compareTo(get_partition_by_name_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -112362,6 +113247,14 @@ public String toString() { sb.append(this.part_name); } first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -112429,6 +113322,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_by_na org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -112457,6 +113358,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_by_n oprot.writeString(struct.part_name); oprot.writeFieldEnd(); } + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -112484,7 +113390,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_by_na if (struct.isSetPart_name()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidTxnList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -112494,12 +113403,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_by_na if (struct.isSetPart_name()) { oprot.writeString(struct.part_name); } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_by_name_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -112512,6 +113424,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_by_nam struct.part_name = iprot.readString(); struct.setPart_nameIsSet(true); } + if (incoming.get(3)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -113098,6 +114014,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_by_nam private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I16, (short)3); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -113108,12 +114025,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_by_nam private String db_name; // required private String tbl_name; // required private short max_parts; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - MAX_PARTS((short)3, "max_parts"); + MAX_PARTS((short)3, "max_parts"), + VALID_TXN_LIST((short)4, "validTxnList"); private static final Map byName = new HashMap(); @@ -113134,6 +114053,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // MAX_PARTS return MAX_PARTS; + case 4: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -113185,6 +114106,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_args.class, metaDataMap); } @@ -113197,13 +114120,15 @@ public get_partitions_args() { public get_partitions_args( String db_name, String tbl_name, - short max_parts) + short max_parts, + String validTxnList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.max_parts = max_parts; setMax_partsIsSet(true); + this.validTxnList = validTxnList; } /** @@ -113218,6 +114143,9 @@ public get_partitions_args(get_partitions_args other) { this.tbl_name = other.tbl_name; } this.max_parts = other.max_parts; + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partitions_args deepCopy() { @@ -113230,6 +114158,7 @@ public void clear() { this.tbl_name = null; this.max_parts = (short)-1; + this.validTxnList = null; } public String getDb_name() { @@ -113300,6 +114229,29 @@ public void setMax_partsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_PARTS_ISSET_ID, value); } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -113326,6 +114278,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -113340,6 +114300,9 @@ public Object getFieldValue(_Fields field) { case MAX_PARTS: return getMax_parts(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -113357,6 +114320,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case MAX_PARTS: return isSetMax_parts(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -113401,6 +114366,15 @@ public boolean equals(get_partitions_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -113423,6 +114397,11 @@ public int hashCode() { if (present_max_parts) list.add(max_parts); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -113464,6 +114443,16 @@ public int compareTo(get_partitions_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -113503,6 +114492,14 @@ public String toString() { sb.append("max_parts:"); sb.append(this.max_parts); first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -113572,6 +114569,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_args org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -113598,6 +114603,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_arg oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); oprot.writeI16(struct.max_parts); oprot.writeFieldEnd(); + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -113625,7 +114635,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_args if (struct.isSetMax_parts()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidTxnList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -113635,12 +114648,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_args if (struct.isSetMax_parts()) { oprot.writeI16(struct.max_parts); } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -113653,6 +114669,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_args struct.max_parts = iprot.readI16(); struct.setMax_partsIsSet(true); } + if (incoming.get(3)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -114135,14 +115155,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1462 = iprot.readListBegin(); - struct.success = new ArrayList(_list1462.size); - Partition _elem1463; - for (int _i1464 = 0; _i1464 < _list1462.size; ++_i1464) + org.apache.thrift.protocol.TList _list1470 = iprot.readListBegin(); + struct.success = new ArrayList(_list1470.size); + Partition _elem1471; + for (int _i1472 = 0; _i1472 < _list1470.size; ++_i1472) { - _elem1463 = new Partition(); - _elem1463.read(iprot); - struct.success.add(_elem1463); + _elem1471 = new Partition(); + _elem1471.read(iprot); + struct.success.add(_elem1471); } iprot.readListEnd(); } @@ -114186,9 +115206,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1465 : struct.success) + for (Partition _iter1473 : struct.success) { - _iter1465.write(oprot); + _iter1473.write(oprot); } oprot.writeListEnd(); } @@ -114235,9 +115255,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1466 : struct.success) + for (Partition _iter1474 : struct.success) { - _iter1466.write(oprot); + _iter1474.write(oprot); } } } @@ -114255,14 +115275,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1467 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1467.size); - Partition _elem1468; - for (int _i1469 = 0; _i1469 < _list1467.size; ++_i1469) + org.apache.thrift.protocol.TList _list1475 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1475.size); + Partition _elem1476; + for (int _i1477 = 0; _i1477 < _list1475.size; ++_i1477) { - _elem1468 = new Partition(); - _elem1468.read(iprot); - struct.success.add(_elem1468); + _elem1476 = new Partition(); + _elem1476.read(iprot); + struct.success.add(_elem1476); } } struct.setSuccessIsSet(true); @@ -114290,6 +115310,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I16, (short)3); private static final org.apache.thrift.protocol.TField USER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("user_name", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField GROUP_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("group_names", org.apache.thrift.protocol.TType.LIST, (short)5); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -114302,6 +115323,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul private short max_parts; // required private String user_name; // required private List group_names; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -114309,7 +115331,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul TBL_NAME((short)2, "tbl_name"), MAX_PARTS((short)3, "max_parts"), USER_NAME((short)4, "user_name"), - GROUP_NAMES((short)5, "group_names"); + GROUP_NAMES((short)5, "group_names"), + VALID_TXN_LIST((short)6, "validTxnList"); private static final Map byName = new HashMap(); @@ -114334,6 +115357,8 @@ public static _Fields findByThriftId(int fieldId) { return USER_NAME; case 5: // GROUP_NAMES return GROUP_NAMES; + case 6: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -114390,6 +115415,8 @@ public String getFieldName() { tmpMap.put(_Fields.GROUP_NAMES, new org.apache.thrift.meta_data.FieldMetaData("group_names", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_with_auth_args.class, metaDataMap); } @@ -114404,7 +115431,8 @@ public get_partitions_with_auth_args( String tbl_name, short max_parts, String user_name, - List group_names) + List group_names, + String validTxnList) { this(); this.db_name = db_name; @@ -114413,6 +115441,7 @@ public get_partitions_with_auth_args( setMax_partsIsSet(true); this.user_name = user_name; this.group_names = group_names; + this.validTxnList = validTxnList; } /** @@ -114434,6 +115463,9 @@ public get_partitions_with_auth_args(get_partitions_with_auth_args other) { List __this__group_names = new ArrayList(other.group_names); this.group_names = __this__group_names; } + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partitions_with_auth_args deepCopy() { @@ -114448,6 +115480,7 @@ public void clear() { this.user_name = null; this.group_names = null; + this.validTxnList = null; } public String getDb_name() { @@ -114579,6 +115612,29 @@ public void setGroup_namesIsSet(boolean value) { } } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -114621,6 +115677,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -114641,6 +115705,9 @@ public Object getFieldValue(_Fields field) { case GROUP_NAMES: return getGroup_names(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -114662,6 +115729,8 @@ public boolean isSet(_Fields field) { return isSetUser_name(); case GROUP_NAMES: return isSetGroup_names(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -114724,6 +115793,15 @@ public boolean equals(get_partitions_with_auth_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -114756,6 +115834,11 @@ public int hashCode() { if (present_group_names) list.add(group_names); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -114817,6 +115900,16 @@ public int compareTo(get_partitions_with_auth_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -114872,6 +115965,14 @@ public String toString() { sb.append(this.group_names); } first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -114952,13 +116053,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1470 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1470.size); - String _elem1471; - for (int _i1472 = 0; _i1472 < _list1470.size; ++_i1472) + org.apache.thrift.protocol.TList _list1478 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1478.size); + String _elem1479; + for (int _i1480 = 0; _i1480 < _list1478.size; ++_i1480) { - _elem1471 = iprot.readString(); - struct.group_names.add(_elem1471); + _elem1479 = iprot.readString(); + struct.group_names.add(_elem1479); } iprot.readListEnd(); } @@ -114967,6 +116068,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -115002,14 +116111,19 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1473 : struct.group_names) + for (String _iter1481 : struct.group_names) { - oprot.writeString(_iter1473); + oprot.writeString(_iter1481); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -115043,7 +116157,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { optionals.set(4); } - oprot.writeBitSet(optionals, 5); + if (struct.isSetValidTxnList()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -115059,18 +116176,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1474 : struct.group_names) + for (String _iter1482 : struct.group_names) { - oprot.writeString(_iter1474); + oprot.writeString(_iter1482); } } } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_auth_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -115089,17 +116209,21 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1475 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1475.size); - String _elem1476; - for (int _i1477 = 0; _i1477 < _list1475.size; ++_i1477) + org.apache.thrift.protocol.TList _list1483 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1483.size); + String _elem1484; + for (int _i1485 = 0; _i1485 < _list1483.size; ++_i1485) { - _elem1476 = iprot.readString(); - struct.group_names.add(_elem1476); + _elem1484 = iprot.readString(); + struct.group_names.add(_elem1484); } } struct.setGroup_namesIsSet(true); } + if (incoming.get(5)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -115582,14 +116706,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1478 = iprot.readListBegin(); - struct.success = new ArrayList(_list1478.size); - Partition _elem1479; - for (int _i1480 = 0; _i1480 < _list1478.size; ++_i1480) + org.apache.thrift.protocol.TList _list1486 = iprot.readListBegin(); + struct.success = new ArrayList(_list1486.size); + Partition _elem1487; + for (int _i1488 = 0; _i1488 < _list1486.size; ++_i1488) { - _elem1479 = new Partition(); - _elem1479.read(iprot); - struct.success.add(_elem1479); + _elem1487 = new Partition(); + _elem1487.read(iprot); + struct.success.add(_elem1487); } iprot.readListEnd(); } @@ -115633,9 +116757,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1481 : struct.success) + for (Partition _iter1489 : struct.success) { - _iter1481.write(oprot); + _iter1489.write(oprot); } oprot.writeListEnd(); } @@ -115682,9 +116806,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1482 : struct.success) + for (Partition _iter1490 : struct.success) { - _iter1482.write(oprot); + _iter1490.write(oprot); } } } @@ -115702,14 +116826,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1483 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1483.size); - Partition _elem1484; - for (int _i1485 = 0; _i1485 < _list1483.size; ++_i1485) + org.apache.thrift.protocol.TList _list1491 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1491.size); + Partition _elem1492; + for (int _i1493 = 0; _i1493 < _list1491.size; ++_i1493) { - _elem1484 = new Partition(); - _elem1484.read(iprot); - struct.success.add(_elem1484); + _elem1492 = new Partition(); + _elem1492.read(iprot); + struct.success.add(_elem1492); } } struct.setSuccessIsSet(true); @@ -115735,6 +116859,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I32, (short)3); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -115745,12 +116870,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ private String db_name; // required private String tbl_name; // required private int max_parts; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - MAX_PARTS((short)3, "max_parts"); + MAX_PARTS((short)3, "max_parts"), + VALID_TXN_LIST((short)4, "validTxnList"); private static final Map byName = new HashMap(); @@ -115771,6 +116898,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // MAX_PARTS return MAX_PARTS; + case 4: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -115822,6 +116951,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_pspec_args.class, metaDataMap); } @@ -115834,13 +116965,15 @@ public get_partitions_pspec_args() { public get_partitions_pspec_args( String db_name, String tbl_name, - int max_parts) + int max_parts, + String validTxnList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.max_parts = max_parts; setMax_partsIsSet(true); + this.validTxnList = validTxnList; } /** @@ -115855,6 +116988,9 @@ public get_partitions_pspec_args(get_partitions_pspec_args other) { this.tbl_name = other.tbl_name; } this.max_parts = other.max_parts; + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partitions_pspec_args deepCopy() { @@ -115867,6 +117003,7 @@ public void clear() { this.tbl_name = null; this.max_parts = -1; + this.validTxnList = null; } public String getDb_name() { @@ -115937,6 +117074,29 @@ public void setMax_partsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_PARTS_ISSET_ID, value); } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -115963,6 +117123,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -115977,6 +117145,9 @@ public Object getFieldValue(_Fields field) { case MAX_PARTS: return getMax_parts(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -115994,6 +117165,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case MAX_PARTS: return isSetMax_parts(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -116038,6 +117211,15 @@ public boolean equals(get_partitions_pspec_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -116060,6 +117242,11 @@ public int hashCode() { if (present_max_parts) list.add(max_parts); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -116101,6 +117288,16 @@ public int compareTo(get_partitions_pspec_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -116140,6 +117337,14 @@ public String toString() { sb.append("max_parts:"); sb.append(this.max_parts); first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -116209,6 +117414,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -116235,6 +117448,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); oprot.writeI32(struct.max_parts); oprot.writeFieldEnd(); + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -116262,7 +117480,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetMax_parts()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidTxnList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -116272,12 +117493,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetMax_parts()) { oprot.writeI32(struct.max_parts); } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -116290,6 +117514,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec struct.max_parts = iprot.readI32(); struct.setMax_partsIsSet(true); } + if (incoming.get(3)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -116772,14 +118000,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1486 = iprot.readListBegin(); - struct.success = new ArrayList(_list1486.size); - PartitionSpec _elem1487; - for (int _i1488 = 0; _i1488 < _list1486.size; ++_i1488) + org.apache.thrift.protocol.TList _list1494 = iprot.readListBegin(); + struct.success = new ArrayList(_list1494.size); + PartitionSpec _elem1495; + for (int _i1496 = 0; _i1496 < _list1494.size; ++_i1496) { - _elem1487 = new PartitionSpec(); - _elem1487.read(iprot); - struct.success.add(_elem1487); + _elem1495 = new PartitionSpec(); + _elem1495.read(iprot); + struct.success.add(_elem1495); } iprot.readListEnd(); } @@ -116823,9 +118051,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1489 : struct.success) + for (PartitionSpec _iter1497 : struct.success) { - _iter1489.write(oprot); + _iter1497.write(oprot); } oprot.writeListEnd(); } @@ -116872,9 +118100,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1490 : struct.success) + for (PartitionSpec _iter1498 : struct.success) { - _iter1490.write(oprot); + _iter1498.write(oprot); } } } @@ -116892,14 +118120,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1491 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1491.size); - PartitionSpec _elem1492; - for (int _i1493 = 0; _i1493 < _list1491.size; ++_i1493) + org.apache.thrift.protocol.TList _list1499 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1499.size); + PartitionSpec _elem1500; + for (int _i1501 = 0; _i1501 < _list1499.size; ++_i1501) { - _elem1492 = new PartitionSpec(); - _elem1492.read(iprot); - struct.success.add(_elem1492); + _elem1500 = new PartitionSpec(); + _elem1500.read(iprot); + struct.success.add(_elem1500); } } struct.setSuccessIsSet(true); @@ -116925,6 +118153,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I16, (short)3); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -116935,12 +118164,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec private String db_name; // required private String tbl_name; // required private short max_parts; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - MAX_PARTS((short)3, "max_parts"); + MAX_PARTS((short)3, "max_parts"), + VALID_TXN_LIST((short)4, "validTxnList"); private static final Map byName = new HashMap(); @@ -116961,6 +118192,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // MAX_PARTS return MAX_PARTS; + case 4: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -117012,6 +118245,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_names_args.class, metaDataMap); } @@ -117024,13 +118259,15 @@ public get_partition_names_args() { public get_partition_names_args( String db_name, String tbl_name, - short max_parts) + short max_parts, + String validTxnList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.max_parts = max_parts; setMax_partsIsSet(true); + this.validTxnList = validTxnList; } /** @@ -117045,6 +118282,9 @@ public get_partition_names_args(get_partition_names_args other) { this.tbl_name = other.tbl_name; } this.max_parts = other.max_parts; + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partition_names_args deepCopy() { @@ -117057,6 +118297,7 @@ public void clear() { this.tbl_name = null; this.max_parts = (short)-1; + this.validTxnList = null; } public String getDb_name() { @@ -117127,6 +118368,29 @@ public void setMax_partsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_PARTS_ISSET_ID, value); } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -117153,6 +118417,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -117167,6 +118439,9 @@ public Object getFieldValue(_Fields field) { case MAX_PARTS: return getMax_parts(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -117184,6 +118459,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case MAX_PARTS: return isSetMax_parts(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -117228,6 +118505,15 @@ public boolean equals(get_partition_names_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -117250,6 +118536,11 @@ public int hashCode() { if (present_max_parts) list.add(max_parts); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -117291,6 +118582,16 @@ public int compareTo(get_partition_names_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -117330,6 +118631,14 @@ public String toString() { sb.append("max_parts:"); sb.append(this.max_parts); first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -117399,6 +118708,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -117425,6 +118742,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); oprot.writeI16(struct.max_parts); oprot.writeFieldEnd(); + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -117452,7 +118774,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetMax_parts()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidTxnList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -117462,12 +118787,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetMax_parts()) { oprot.writeI16(struct.max_parts); } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -117480,6 +118808,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ struct.max_parts = iprot.readI16(); struct.setMax_partsIsSet(true); } + if (incoming.get(3)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -117959,13 +119291,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1494 = iprot.readListBegin(); - struct.success = new ArrayList(_list1494.size); - String _elem1495; - for (int _i1496 = 0; _i1496 < _list1494.size; ++_i1496) + org.apache.thrift.protocol.TList _list1502 = iprot.readListBegin(); + struct.success = new ArrayList(_list1502.size); + String _elem1503; + for (int _i1504 = 0; _i1504 < _list1502.size; ++_i1504) { - _elem1495 = iprot.readString(); - struct.success.add(_elem1495); + _elem1503 = iprot.readString(); + struct.success.add(_elem1503); } iprot.readListEnd(); } @@ -118009,9 +119341,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1497 : struct.success) + for (String _iter1505 : struct.success) { - oprot.writeString(_iter1497); + oprot.writeString(_iter1505); } oprot.writeListEnd(); } @@ -118058,9 +119390,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1498 : struct.success) + for (String _iter1506 : struct.success) { - oprot.writeString(_iter1498); + oprot.writeString(_iter1506); } } } @@ -118078,13 +119410,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1499 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1499.size); - String _elem1500; - for (int _i1501 = 0; _i1501 < _list1499.size; ++_i1501) + org.apache.thrift.protocol.TList _list1507 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1507.size); + String _elem1508; + for (int _i1509 = 0; _i1509 < _list1507.size; ++_i1509) { - _elem1500 = iprot.readString(); - struct.success.add(_elem1500); + _elem1508 = iprot.readString(); + struct.success.add(_elem1508); } } struct.setSuccessIsSet(true); @@ -119049,6 +120381,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_values private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("part_vals", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I16, (short)4); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -119060,13 +120393,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_values private String tbl_name; // required private List part_vals; // required private short max_parts; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), PART_VALS((short)3, "part_vals"), - MAX_PARTS((short)4, "max_parts"); + MAX_PARTS((short)4, "max_parts"), + VALID_TXN_LIST((short)5, "validTxnList"); private static final Map byName = new HashMap(); @@ -119089,6 +120424,8 @@ public static _Fields findByThriftId(int fieldId) { return PART_VALS; case 4: // MAX_PARTS return MAX_PARTS; + case 5: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -119143,6 +120480,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_ps_args.class, metaDataMap); } @@ -119156,7 +120495,8 @@ public get_partitions_ps_args( String db_name, String tbl_name, List part_vals, - short max_parts) + short max_parts, + String validTxnList) { this(); this.db_name = db_name; @@ -119164,6 +120504,7 @@ public get_partitions_ps_args( this.part_vals = part_vals; this.max_parts = max_parts; setMax_partsIsSet(true); + this.validTxnList = validTxnList; } /** @@ -119182,6 +120523,9 @@ public get_partitions_ps_args(get_partitions_ps_args other) { this.part_vals = __this__part_vals; } this.max_parts = other.max_parts; + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partitions_ps_args deepCopy() { @@ -119195,6 +120539,7 @@ public void clear() { this.part_vals = null; this.max_parts = (short)-1; + this.validTxnList = null; } public String getDb_name() { @@ -119303,6 +120648,29 @@ public void setMax_partsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_PARTS_ISSET_ID, value); } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -119337,6 +120705,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -119354,6 +120730,9 @@ public Object getFieldValue(_Fields field) { case MAX_PARTS: return getMax_parts(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -119373,6 +120752,8 @@ public boolean isSet(_Fields field) { return isSetPart_vals(); case MAX_PARTS: return isSetMax_parts(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -119426,6 +120807,15 @@ public boolean equals(get_partitions_ps_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -119453,6 +120843,11 @@ public int hashCode() { if (present_max_parts) list.add(max_parts); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -119504,6 +120899,16 @@ public int compareTo(get_partitions_ps_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -119551,6 +120956,14 @@ public String toString() { sb.append("max_parts:"); sb.append(this.max_parts); first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -119615,13 +121028,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1502 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1502.size); - String _elem1503; - for (int _i1504 = 0; _i1504 < _list1502.size; ++_i1504) + org.apache.thrift.protocol.TList _list1510 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1510.size); + String _elem1511; + for (int _i1512 = 0; _i1512 < _list1510.size; ++_i1512) { - _elem1503 = iprot.readString(); - struct.part_vals.add(_elem1503); + _elem1511 = iprot.readString(); + struct.part_vals.add(_elem1511); } iprot.readListEnd(); } @@ -119638,6 +121051,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -119665,9 +121086,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1505 : struct.part_vals) + for (String _iter1513 : struct.part_vals) { - oprot.writeString(_iter1505); + oprot.writeString(_iter1513); } oprot.writeListEnd(); } @@ -119676,6 +121097,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); oprot.writeI16(struct.max_parts); oprot.writeFieldEnd(); + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -119706,7 +121132,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetMax_parts()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetValidTxnList()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -119716,21 +121145,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1506 : struct.part_vals) + for (String _iter1514 : struct.part_vals) { - oprot.writeString(_iter1506); + oprot.writeString(_iter1514); } } } if (struct.isSetMax_parts()) { oprot.writeI16(struct.max_parts); } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -119741,13 +121173,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1507 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1507.size); - String _elem1508; - for (int _i1509 = 0; _i1509 < _list1507.size; ++_i1509) + org.apache.thrift.protocol.TList _list1515 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1515.size); + String _elem1516; + for (int _i1517 = 0; _i1517 < _list1515.size; ++_i1517) { - _elem1508 = iprot.readString(); - struct.part_vals.add(_elem1508); + _elem1516 = iprot.readString(); + struct.part_vals.add(_elem1516); } } struct.setPart_valsIsSet(true); @@ -119756,6 +121188,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar struct.max_parts = iprot.readI16(); struct.setMax_partsIsSet(true); } + if (incoming.get(4)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -120238,14 +121674,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1510 = iprot.readListBegin(); - struct.success = new ArrayList(_list1510.size); - Partition _elem1511; - for (int _i1512 = 0; _i1512 < _list1510.size; ++_i1512) + org.apache.thrift.protocol.TList _list1518 = iprot.readListBegin(); + struct.success = new ArrayList(_list1518.size); + Partition _elem1519; + for (int _i1520 = 0; _i1520 < _list1518.size; ++_i1520) { - _elem1511 = new Partition(); - _elem1511.read(iprot); - struct.success.add(_elem1511); + _elem1519 = new Partition(); + _elem1519.read(iprot); + struct.success.add(_elem1519); } iprot.readListEnd(); } @@ -120289,9 +121725,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1513 : struct.success) + for (Partition _iter1521 : struct.success) { - _iter1513.write(oprot); + _iter1521.write(oprot); } oprot.writeListEnd(); } @@ -120338,9 +121774,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1514 : struct.success) + for (Partition _iter1522 : struct.success) { - _iter1514.write(oprot); + _iter1522.write(oprot); } } } @@ -120358,14 +121794,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1515 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1515.size); - Partition _elem1516; - for (int _i1517 = 0; _i1517 < _list1515.size; ++_i1517) + org.apache.thrift.protocol.TList _list1523 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1523.size); + Partition _elem1524; + for (int _i1525 = 0; _i1525 < _list1523.size; ++_i1525) { - _elem1516 = new Partition(); - _elem1516.read(iprot); - struct.success.add(_elem1516); + _elem1524 = new Partition(); + _elem1524.read(iprot); + struct.success.add(_elem1524); } } struct.setSuccessIsSet(true); @@ -120394,6 +121830,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I16, (short)4); private static final org.apache.thrift.protocol.TField USER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("user_name", org.apache.thrift.protocol.TType.STRING, (short)5); private static final org.apache.thrift.protocol.TField GROUP_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("group_names", org.apache.thrift.protocol.TType.LIST, (short)6); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)7); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -120407,6 +121844,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re private short max_parts; // required private String user_name; // required private List group_names; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -120415,7 +121853,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re PART_VALS((short)3, "part_vals"), MAX_PARTS((short)4, "max_parts"), USER_NAME((short)5, "user_name"), - GROUP_NAMES((short)6, "group_names"); + GROUP_NAMES((short)6, "group_names"), + VALID_TXN_LIST((short)7, "validTxnList"); private static final Map byName = new HashMap(); @@ -120442,6 +121881,8 @@ public static _Fields findByThriftId(int fieldId) { return USER_NAME; case 6: // GROUP_NAMES return GROUP_NAMES; + case 7: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -120501,6 +121942,8 @@ public String getFieldName() { tmpMap.put(_Fields.GROUP_NAMES, new org.apache.thrift.meta_data.FieldMetaData("group_names", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_ps_with_auth_args.class, metaDataMap); } @@ -120516,7 +121959,8 @@ public get_partitions_ps_with_auth_args( List part_vals, short max_parts, String user_name, - List group_names) + List group_names, + String validTxnList) { this(); this.db_name = db_name; @@ -120526,6 +121970,7 @@ public get_partitions_ps_with_auth_args( setMax_partsIsSet(true); this.user_name = user_name; this.group_names = group_names; + this.validTxnList = validTxnList; } /** @@ -120551,6 +121996,9 @@ public get_partitions_ps_with_auth_args(get_partitions_ps_with_auth_args other) List __this__group_names = new ArrayList(other.group_names); this.group_names = __this__group_names; } + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partitions_ps_with_auth_args deepCopy() { @@ -120566,6 +122014,7 @@ public void clear() { this.user_name = null; this.group_names = null; + this.validTxnList = null; } public String getDb_name() { @@ -120735,6 +122184,29 @@ public void setGroup_namesIsSet(boolean value) { } } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -120785,6 +122257,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -120808,6 +122288,9 @@ public Object getFieldValue(_Fields field) { case GROUP_NAMES: return getGroup_names(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -120831,6 +122314,8 @@ public boolean isSet(_Fields field) { return isSetUser_name(); case GROUP_NAMES: return isSetGroup_names(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -120902,6 +122387,15 @@ public boolean equals(get_partitions_ps_with_auth_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -120939,6 +122433,11 @@ public int hashCode() { if (present_group_names) list.add(group_names); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -121010,6 +122509,16 @@ public int compareTo(get_partitions_ps_with_auth_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -121073,6 +122582,14 @@ public String toString() { sb.append(this.group_names); } first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -121137,13 +122654,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1518 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1518.size); - String _elem1519; - for (int _i1520 = 0; _i1520 < _list1518.size; ++_i1520) + org.apache.thrift.protocol.TList _list1526 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1526.size); + String _elem1527; + for (int _i1528 = 0; _i1528 < _list1526.size; ++_i1528) { - _elem1519 = iprot.readString(); - struct.part_vals.add(_elem1519); + _elem1527 = iprot.readString(); + struct.part_vals.add(_elem1527); } iprot.readListEnd(); } @@ -121171,13 +122688,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1521 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1521.size); - String _elem1522; - for (int _i1523 = 0; _i1523 < _list1521.size; ++_i1523) + org.apache.thrift.protocol.TList _list1529 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1529.size); + String _elem1530; + for (int _i1531 = 0; _i1531 < _list1529.size; ++_i1531) { - _elem1522 = iprot.readString(); - struct.group_names.add(_elem1522); + _elem1530 = iprot.readString(); + struct.group_names.add(_elem1530); } iprot.readListEnd(); } @@ -121186,6 +122703,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -121213,9 +122738,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1524 : struct.part_vals) + for (String _iter1532 : struct.part_vals) { - oprot.writeString(_iter1524); + oprot.writeString(_iter1532); } oprot.writeListEnd(); } @@ -121233,14 +122758,19 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1525 : struct.group_names) + for (String _iter1533 : struct.group_names) { - oprot.writeString(_iter1525); + oprot.writeString(_iter1533); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -121277,7 +122807,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { optionals.set(5); } - oprot.writeBitSet(optionals, 6); + if (struct.isSetValidTxnList()) { + optionals.set(6); + } + oprot.writeBitSet(optionals, 7); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -121287,9 +122820,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1526 : struct.part_vals) + for (String _iter1534 : struct.part_vals) { - oprot.writeString(_iter1526); + oprot.writeString(_iter1534); } } } @@ -121302,18 +122835,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1527 : struct.group_names) + for (String _iter1535 : struct.group_names) { - oprot.writeString(_iter1527); + oprot.writeString(_iter1535); } } } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_with_auth_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(6); + BitSet incoming = iprot.readBitSet(7); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -121324,13 +122860,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1528 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1528.size); - String _elem1529; - for (int _i1530 = 0; _i1530 < _list1528.size; ++_i1530) + org.apache.thrift.protocol.TList _list1536 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1536.size); + String _elem1537; + for (int _i1538 = 0; _i1538 < _list1536.size; ++_i1538) { - _elem1529 = iprot.readString(); - struct.part_vals.add(_elem1529); + _elem1537 = iprot.readString(); + struct.part_vals.add(_elem1537); } } struct.setPart_valsIsSet(true); @@ -121345,17 +122881,21 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1531 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1531.size); - String _elem1532; - for (int _i1533 = 0; _i1533 < _list1531.size; ++_i1533) + org.apache.thrift.protocol.TList _list1539 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1539.size); + String _elem1540; + for (int _i1541 = 0; _i1541 < _list1539.size; ++_i1541) { - _elem1532 = iprot.readString(); - struct.group_names.add(_elem1532); + _elem1540 = iprot.readString(); + struct.group_names.add(_elem1540); } } struct.setGroup_namesIsSet(true); } + if (incoming.get(6)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -121838,14 +123378,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1534 = iprot.readListBegin(); - struct.success = new ArrayList(_list1534.size); - Partition _elem1535; - for (int _i1536 = 0; _i1536 < _list1534.size; ++_i1536) + org.apache.thrift.protocol.TList _list1542 = iprot.readListBegin(); + struct.success = new ArrayList(_list1542.size); + Partition _elem1543; + for (int _i1544 = 0; _i1544 < _list1542.size; ++_i1544) { - _elem1535 = new Partition(); - _elem1535.read(iprot); - struct.success.add(_elem1535); + _elem1543 = new Partition(); + _elem1543.read(iprot); + struct.success.add(_elem1543); } iprot.readListEnd(); } @@ -121889,9 +123429,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1537 : struct.success) + for (Partition _iter1545 : struct.success) { - _iter1537.write(oprot); + _iter1545.write(oprot); } oprot.writeListEnd(); } @@ -121938,9 +123478,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1538 : struct.success) + for (Partition _iter1546 : struct.success) { - _iter1538.write(oprot); + _iter1546.write(oprot); } } } @@ -121958,14 +123498,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1539 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1539.size); - Partition _elem1540; - for (int _i1541 = 0; _i1541 < _list1539.size; ++_i1541) + org.apache.thrift.protocol.TList _list1547 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1547.size); + Partition _elem1548; + for (int _i1549 = 0; _i1549 < _list1547.size; ++_i1549) { - _elem1540 = new Partition(); - _elem1540.read(iprot); - struct.success.add(_elem1540); + _elem1548 = new Partition(); + _elem1548.read(iprot); + struct.success.add(_elem1548); } } struct.setSuccessIsSet(true); @@ -121992,6 +123532,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("part_vals", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I16, (short)4); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -122003,13 +123544,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi private String tbl_name; // required private List part_vals; // required private short max_parts; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), PART_VALS((short)3, "part_vals"), - MAX_PARTS((short)4, "max_parts"); + MAX_PARTS((short)4, "max_parts"), + VALID_TXN_LIST((short)5, "validTxnList"); private static final Map byName = new HashMap(); @@ -122032,6 +123575,8 @@ public static _Fields findByThriftId(int fieldId) { return PART_VALS; case 4: // MAX_PARTS return MAX_PARTS; + case 5: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -122086,6 +123631,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_names_ps_args.class, metaDataMap); } @@ -122099,7 +123646,8 @@ public get_partition_names_ps_args( String db_name, String tbl_name, List part_vals, - short max_parts) + short max_parts, + String validTxnList) { this(); this.db_name = db_name; @@ -122107,6 +123655,7 @@ public get_partition_names_ps_args( this.part_vals = part_vals; this.max_parts = max_parts; setMax_partsIsSet(true); + this.validTxnList = validTxnList; } /** @@ -122125,6 +123674,9 @@ public get_partition_names_ps_args(get_partition_names_ps_args other) { this.part_vals = __this__part_vals; } this.max_parts = other.max_parts; + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partition_names_ps_args deepCopy() { @@ -122138,6 +123690,7 @@ public void clear() { this.part_vals = null; this.max_parts = (short)-1; + this.validTxnList = null; } public String getDb_name() { @@ -122246,6 +123799,29 @@ public void setMax_partsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_PARTS_ISSET_ID, value); } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -122280,6 +123856,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -122297,6 +123881,9 @@ public Object getFieldValue(_Fields field) { case MAX_PARTS: return getMax_parts(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -122316,6 +123903,8 @@ public boolean isSet(_Fields field) { return isSetPart_vals(); case MAX_PARTS: return isSetMax_parts(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -122369,6 +123958,15 @@ public boolean equals(get_partition_names_ps_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -122396,6 +123994,11 @@ public int hashCode() { if (present_max_parts) list.add(max_parts); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -122447,6 +124050,16 @@ public int compareTo(get_partition_names_ps_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -122494,6 +124107,14 @@ public String toString() { sb.append("max_parts:"); sb.append(this.max_parts); first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -122558,13 +124179,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1542 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1542.size); - String _elem1543; - for (int _i1544 = 0; _i1544 < _list1542.size; ++_i1544) + org.apache.thrift.protocol.TList _list1550 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1550.size); + String _elem1551; + for (int _i1552 = 0; _i1552 < _list1550.size; ++_i1552) { - _elem1543 = iprot.readString(); - struct.part_vals.add(_elem1543); + _elem1551 = iprot.readString(); + struct.part_vals.add(_elem1551); } iprot.readListEnd(); } @@ -122581,6 +124202,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -122608,9 +124237,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1545 : struct.part_vals) + for (String _iter1553 : struct.part_vals) { - oprot.writeString(_iter1545); + oprot.writeString(_iter1553); } oprot.writeListEnd(); } @@ -122619,6 +124248,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); oprot.writeI16(struct.max_parts); oprot.writeFieldEnd(); + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -122649,7 +124283,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetMax_parts()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetValidTxnList()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -122659,21 +124296,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1546 : struct.part_vals) + for (String _iter1554 : struct.part_vals) { - oprot.writeString(_iter1546); + oprot.writeString(_iter1554); } } } if (struct.isSetMax_parts()) { oprot.writeI16(struct.max_parts); } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ps_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -122684,13 +124324,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1547 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1547.size); - String _elem1548; - for (int _i1549 = 0; _i1549 < _list1547.size; ++_i1549) + org.apache.thrift.protocol.TList _list1555 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1555.size); + String _elem1556; + for (int _i1557 = 0; _i1557 < _list1555.size; ++_i1557) { - _elem1548 = iprot.readString(); - struct.part_vals.add(_elem1548); + _elem1556 = iprot.readString(); + struct.part_vals.add(_elem1556); } } struct.setPart_valsIsSet(true); @@ -122699,6 +124339,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ struct.max_parts = iprot.readI16(); struct.setMax_partsIsSet(true); } + if (incoming.get(4)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -123178,13 +124822,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1550 = iprot.readListBegin(); - struct.success = new ArrayList(_list1550.size); - String _elem1551; - for (int _i1552 = 0; _i1552 < _list1550.size; ++_i1552) + org.apache.thrift.protocol.TList _list1558 = iprot.readListBegin(); + struct.success = new ArrayList(_list1558.size); + String _elem1559; + for (int _i1560 = 0; _i1560 < _list1558.size; ++_i1560) { - _elem1551 = iprot.readString(); - struct.success.add(_elem1551); + _elem1559 = iprot.readString(); + struct.success.add(_elem1559); } iprot.readListEnd(); } @@ -123228,9 +124872,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1553 : struct.success) + for (String _iter1561 : struct.success) { - oprot.writeString(_iter1553); + oprot.writeString(_iter1561); } oprot.writeListEnd(); } @@ -123277,9 +124921,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1554 : struct.success) + for (String _iter1562 : struct.success) { - oprot.writeString(_iter1554); + oprot.writeString(_iter1562); } } } @@ -123297,13 +124941,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1555 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1555.size); - String _elem1556; - for (int _i1557 = 0; _i1557 < _list1555.size; ++_i1557) + org.apache.thrift.protocol.TList _list1563 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1563.size); + String _elem1564; + for (int _i1565 = 0; _i1565 < _list1563.size; ++_i1565) { - _elem1556 = iprot.readString(); - struct.success.add(_elem1556); + _elem1564 = iprot.readString(); + struct.success.add(_elem1564); } } struct.setSuccessIsSet(true); @@ -123330,6 +124974,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField FILTER_FIELD_DESC = new org.apache.thrift.protocol.TField("filter", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I16, (short)4); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -123341,13 +124986,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ private String tbl_name; // required private String filter; // required private short max_parts; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), FILTER((short)3, "filter"), - MAX_PARTS((short)4, "max_parts"); + MAX_PARTS((short)4, "max_parts"), + VALID_TXN_LIST((short)5, "validTxnList"); private static final Map byName = new HashMap(); @@ -123370,6 +125017,8 @@ public static _Fields findByThriftId(int fieldId) { return FILTER; case 4: // MAX_PARTS return MAX_PARTS; + case 5: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -123423,6 +125072,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_by_filter_args.class, metaDataMap); } @@ -123436,7 +125087,8 @@ public get_partitions_by_filter_args( String db_name, String tbl_name, String filter, - short max_parts) + short max_parts, + String validTxnList) { this(); this.db_name = db_name; @@ -123444,6 +125096,7 @@ public get_partitions_by_filter_args( this.filter = filter; this.max_parts = max_parts; setMax_partsIsSet(true); + this.validTxnList = validTxnList; } /** @@ -123461,6 +125114,9 @@ public get_partitions_by_filter_args(get_partitions_by_filter_args other) { this.filter = other.filter; } this.max_parts = other.max_parts; + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partitions_by_filter_args deepCopy() { @@ -123474,6 +125130,7 @@ public void clear() { this.filter = null; this.max_parts = (short)-1; + this.validTxnList = null; } public String getDb_name() { @@ -123567,6 +125224,29 @@ public void setMax_partsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_PARTS_ISSET_ID, value); } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -123601,6 +125281,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -123618,6 +125306,9 @@ public Object getFieldValue(_Fields field) { case MAX_PARTS: return getMax_parts(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -123637,6 +125328,8 @@ public boolean isSet(_Fields field) { return isSetFilter(); case MAX_PARTS: return isSetMax_parts(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -123690,6 +125383,15 @@ public boolean equals(get_partitions_by_filter_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -123717,6 +125419,11 @@ public int hashCode() { if (present_max_parts) list.add(max_parts); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -123768,6 +125475,16 @@ public int compareTo(get_partitions_by_filter_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -123815,6 +125532,14 @@ public String toString() { sb.append("max_parts:"); sb.append(this.max_parts); first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -123892,6 +125617,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -123923,6 +125656,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); oprot.writeI16(struct.max_parts); oprot.writeFieldEnd(); + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -123953,7 +125691,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetMax_parts()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetValidTxnList()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -123966,12 +125707,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetMax_parts()) { oprot.writeI16(struct.max_parts); } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_filter_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -123988,6 +125732,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi struct.max_parts = iprot.readI16(); struct.setMax_partsIsSet(true); } + if (incoming.get(4)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -124470,14 +126218,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1558 = iprot.readListBegin(); - struct.success = new ArrayList(_list1558.size); - Partition _elem1559; - for (int _i1560 = 0; _i1560 < _list1558.size; ++_i1560) + org.apache.thrift.protocol.TList _list1566 = iprot.readListBegin(); + struct.success = new ArrayList(_list1566.size); + Partition _elem1567; + for (int _i1568 = 0; _i1568 < _list1566.size; ++_i1568) { - _elem1559 = new Partition(); - _elem1559.read(iprot); - struct.success.add(_elem1559); + _elem1567 = new Partition(); + _elem1567.read(iprot); + struct.success.add(_elem1567); } iprot.readListEnd(); } @@ -124521,9 +126269,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1561 : struct.success) + for (Partition _iter1569 : struct.success) { - _iter1561.write(oprot); + _iter1569.write(oprot); } oprot.writeListEnd(); } @@ -124570,9 +126318,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1562 : struct.success) + for (Partition _iter1570 : struct.success) { - _iter1562.write(oprot); + _iter1570.write(oprot); } } } @@ -124590,14 +126338,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1563 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1563.size); - Partition _elem1564; - for (int _i1565 = 0; _i1565 < _list1563.size; ++_i1565) + org.apache.thrift.protocol.TList _list1571 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1571.size); + Partition _elem1572; + for (int _i1573 = 0; _i1573 < _list1571.size; ++_i1573) { - _elem1564 = new Partition(); - _elem1564.read(iprot); - struct.success.add(_elem1564); + _elem1572 = new Partition(); + _elem1572.read(iprot); + struct.success.add(_elem1572); } } struct.setSuccessIsSet(true); @@ -124624,6 +126372,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField FILTER_FIELD_DESC = new org.apache.thrift.protocol.TField("filter", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I32, (short)4); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -124635,13 +126384,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi private String tbl_name; // required private String filter; // required private int max_parts; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), FILTER((short)3, "filter"), - MAX_PARTS((short)4, "max_parts"); + MAX_PARTS((short)4, "max_parts"), + VALID_TXN_LIST((short)5, "validTxnList"); private static final Map byName = new HashMap(); @@ -124664,6 +126415,8 @@ public static _Fields findByThriftId(int fieldId) { return FILTER; case 4: // MAX_PARTS return MAX_PARTS; + case 5: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -124717,6 +126470,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_part_specs_by_filter_args.class, metaDataMap); } @@ -124730,7 +126485,8 @@ public get_part_specs_by_filter_args( String db_name, String tbl_name, String filter, - int max_parts) + int max_parts, + String validTxnList) { this(); this.db_name = db_name; @@ -124738,6 +126494,7 @@ public get_part_specs_by_filter_args( this.filter = filter; this.max_parts = max_parts; setMax_partsIsSet(true); + this.validTxnList = validTxnList; } /** @@ -124755,6 +126512,9 @@ public get_part_specs_by_filter_args(get_part_specs_by_filter_args other) { this.filter = other.filter; } this.max_parts = other.max_parts; + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_part_specs_by_filter_args deepCopy() { @@ -124768,6 +126528,7 @@ public void clear() { this.filter = null; this.max_parts = -1; + this.validTxnList = null; } public String getDb_name() { @@ -124861,6 +126622,29 @@ public void setMax_partsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_PARTS_ISSET_ID, value); } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -124895,6 +126679,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -124912,6 +126704,9 @@ public Object getFieldValue(_Fields field) { case MAX_PARTS: return getMax_parts(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -124931,6 +126726,8 @@ public boolean isSet(_Fields field) { return isSetFilter(); case MAX_PARTS: return isSetMax_parts(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -124984,6 +126781,15 @@ public boolean equals(get_part_specs_by_filter_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -125011,6 +126817,11 @@ public int hashCode() { if (present_max_parts) list.add(max_parts); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -125062,6 +126873,16 @@ public int compareTo(get_part_specs_by_filter_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -125109,6 +126930,14 @@ public String toString() { sb.append("max_parts:"); sb.append(this.max_parts); first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -125186,6 +127015,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -125217,6 +127054,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); oprot.writeI32(struct.max_parts); oprot.writeFieldEnd(); + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -125247,7 +127089,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetMax_parts()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetValidTxnList()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -125260,12 +127105,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetMax_parts()) { oprot.writeI32(struct.max_parts); } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -125282,6 +127130,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi struct.max_parts = iprot.readI32(); struct.setMax_partsIsSet(true); } + if (incoming.get(4)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -125764,14 +127616,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1566 = iprot.readListBegin(); - struct.success = new ArrayList(_list1566.size); - PartitionSpec _elem1567; - for (int _i1568 = 0; _i1568 < _list1566.size; ++_i1568) + org.apache.thrift.protocol.TList _list1574 = iprot.readListBegin(); + struct.success = new ArrayList(_list1574.size); + PartitionSpec _elem1575; + for (int _i1576 = 0; _i1576 < _list1574.size; ++_i1576) { - _elem1567 = new PartitionSpec(); - _elem1567.read(iprot); - struct.success.add(_elem1567); + _elem1575 = new PartitionSpec(); + _elem1575.read(iprot); + struct.success.add(_elem1575); } iprot.readListEnd(); } @@ -125815,9 +127667,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1569 : struct.success) + for (PartitionSpec _iter1577 : struct.success) { - _iter1569.write(oprot); + _iter1577.write(oprot); } oprot.writeListEnd(); } @@ -125864,9 +127716,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1570 : struct.success) + for (PartitionSpec _iter1578 : struct.success) { - _iter1570.write(oprot); + _iter1578.write(oprot); } } } @@ -125884,14 +127736,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1571 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1571.size); - PartitionSpec _elem1572; - for (int _i1573 = 0; _i1573 < _list1571.size; ++_i1573) + org.apache.thrift.protocol.TList _list1579 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1579.size); + PartitionSpec _elem1580; + for (int _i1581 = 0; _i1581 < _list1579.size; ++_i1581) { - _elem1572 = new PartitionSpec(); - _elem1572.read(iprot); - struct.success.add(_elem1572); + _elem1580 = new PartitionSpec(); + _elem1580.read(iprot); + struct.success.add(_elem1580); } } struct.setSuccessIsSet(true); @@ -126855,6 +128707,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_ex private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField FILTER_FIELD_DESC = new org.apache.thrift.protocol.TField("filter", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -126865,12 +128718,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_ex private String db_name; // required private String tbl_name; // required private String filter; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - FILTER((short)3, "filter"); + FILTER((short)3, "filter"), + VALID_TXN_LIST((short)4, "validTxnList"); private static final Map byName = new HashMap(); @@ -126891,6 +128746,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // FILTER return FILTER; + case 4: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -126940,6 +128797,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.FILTER, new org.apache.thrift.meta_data.FieldMetaData("filter", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_num_partitions_by_filter_args.class, metaDataMap); } @@ -126950,12 +128809,14 @@ public get_num_partitions_by_filter_args() { public get_num_partitions_by_filter_args( String db_name, String tbl_name, - String filter) + String filter, + String validTxnList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.filter = filter; + this.validTxnList = validTxnList; } /** @@ -126971,6 +128832,9 @@ public get_num_partitions_by_filter_args(get_num_partitions_by_filter_args other if (other.isSetFilter()) { this.filter = other.filter; } + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_num_partitions_by_filter_args deepCopy() { @@ -126982,6 +128846,7 @@ public void clear() { this.db_name = null; this.tbl_name = null; this.filter = null; + this.validTxnList = null; } public String getDb_name() { @@ -127053,6 +128918,29 @@ public void setFilterIsSet(boolean value) { } } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -127079,6 +128967,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -127093,6 +128989,9 @@ public Object getFieldValue(_Fields field) { case FILTER: return getFilter(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -127110,6 +129009,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case FILTER: return isSetFilter(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -127154,6 +129055,15 @@ public boolean equals(get_num_partitions_by_filter_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -127176,6 +129086,11 @@ public int hashCode() { if (present_filter) list.add(filter); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -127217,6 +129132,16 @@ public int compareTo(get_num_partitions_by_filter_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -127260,6 +129185,14 @@ public String toString() { sb.append(this.filter); } first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -127327,6 +129260,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_num_partitions_ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -127355,6 +129296,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_num_partitions oprot.writeString(struct.filter); oprot.writeFieldEnd(); } + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -127382,7 +129328,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_num_partitions_ if (struct.isSetFilter()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidTxnList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -127392,12 +129341,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_num_partitions_ if (struct.isSetFilter()) { oprot.writeString(struct.filter); } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_num_partitions_by_filter_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -127410,6 +129362,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_num_partitions_b struct.filter = iprot.readString(); struct.setFilterIsSet(true); } + if (incoming.get(3)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -127991,6 +129947,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_num_partitions_b private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("names", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -128001,12 +129958,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_num_partitions_b private String db_name; // required private String tbl_name; // required private List names; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - NAMES((short)3, "names"); + NAMES((short)3, "names"), + VALID_TXN_LIST((short)4, "validTxnList"); private static final Map byName = new HashMap(); @@ -128027,6 +129986,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // NAMES return NAMES; + case 4: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -128077,6 +130038,8 @@ public String getFieldName() { tmpMap.put(_Fields.NAMES, new org.apache.thrift.meta_data.FieldMetaData("names", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_by_names_args.class, metaDataMap); } @@ -128087,12 +130050,14 @@ public get_partitions_by_names_args() { public get_partitions_by_names_args( String db_name, String tbl_name, - List names) + List names, + String validTxnList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.names = names; + this.validTxnList = validTxnList; } /** @@ -128109,6 +130074,9 @@ public get_partitions_by_names_args(get_partitions_by_names_args other) { List __this__names = new ArrayList(other.names); this.names = __this__names; } + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partitions_by_names_args deepCopy() { @@ -128120,6 +130088,7 @@ public void clear() { this.db_name = null; this.tbl_name = null; this.names = null; + this.validTxnList = null; } public String getDb_name() { @@ -128206,6 +130175,29 @@ public void setNamesIsSet(boolean value) { } } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -128232,6 +130224,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -128246,6 +130246,9 @@ public Object getFieldValue(_Fields field) { case NAMES: return getNames(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -128263,6 +130266,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case NAMES: return isSetNames(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -128307,6 +130312,15 @@ public boolean equals(get_partitions_by_names_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -128329,6 +130343,11 @@ public int hashCode() { if (present_names) list.add(names); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -128370,6 +130389,16 @@ public int compareTo(get_partitions_by_names_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -128413,6 +130442,14 @@ public String toString() { sb.append(this.names); } first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -128475,13 +130512,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1574 = iprot.readListBegin(); - struct.names = new ArrayList(_list1574.size); - String _elem1575; - for (int _i1576 = 0; _i1576 < _list1574.size; ++_i1576) + org.apache.thrift.protocol.TList _list1582 = iprot.readListBegin(); + struct.names = new ArrayList(_list1582.size); + String _elem1583; + for (int _i1584 = 0; _i1584 < _list1582.size; ++_i1584) { - _elem1575 = iprot.readString(); - struct.names.add(_elem1575); + _elem1583 = iprot.readString(); + struct.names.add(_elem1583); } iprot.readListEnd(); } @@ -128490,6 +130527,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -128517,14 +130562,19 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter1577 : struct.names) + for (String _iter1585 : struct.names) { - oprot.writeString(_iter1577); + oprot.writeString(_iter1585); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -128552,7 +130602,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidTxnList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -128562,18 +130615,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter1578 : struct.names) + for (String _iter1586 : struct.names) { - oprot.writeString(_iter1578); + oprot.writeString(_iter1586); } } } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_names_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -128584,17 +130640,21 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1579 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list1579.size); - String _elem1580; - for (int _i1581 = 0; _i1581 < _list1579.size; ++_i1581) + org.apache.thrift.protocol.TList _list1587 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list1587.size); + String _elem1588; + for (int _i1589 = 0; _i1589 < _list1587.size; ++_i1589) { - _elem1580 = iprot.readString(); - struct.names.add(_elem1580); + _elem1588 = iprot.readString(); + struct.names.add(_elem1588); } } struct.setNamesIsSet(true); } + if (incoming.get(3)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -129077,14 +131137,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1582 = iprot.readListBegin(); - struct.success = new ArrayList(_list1582.size); - Partition _elem1583; - for (int _i1584 = 0; _i1584 < _list1582.size; ++_i1584) + org.apache.thrift.protocol.TList _list1590 = iprot.readListBegin(); + struct.success = new ArrayList(_list1590.size); + Partition _elem1591; + for (int _i1592 = 0; _i1592 < _list1590.size; ++_i1592) { - _elem1583 = new Partition(); - _elem1583.read(iprot); - struct.success.add(_elem1583); + _elem1591 = new Partition(); + _elem1591.read(iprot); + struct.success.add(_elem1591); } iprot.readListEnd(); } @@ -129128,9 +131188,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1585 : struct.success) + for (Partition _iter1593 : struct.success) { - _iter1585.write(oprot); + _iter1593.write(oprot); } oprot.writeListEnd(); } @@ -129177,9 +131237,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1586 : struct.success) + for (Partition _iter1594 : struct.success) { - _iter1586.write(oprot); + _iter1594.write(oprot); } } } @@ -129197,14 +131257,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1587 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1587.size); - Partition _elem1588; - for (int _i1589 = 0; _i1589 < _list1587.size; ++_i1589) + org.apache.thrift.protocol.TList _list1595 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1595.size); + Partition _elem1596; + for (int _i1597 = 0; _i1597 < _list1595.size; ++_i1597) { - _elem1588 = new Partition(); - _elem1588.read(iprot); - struct.success.add(_elem1588); + _elem1596 = new Partition(); + _elem1596.read(iprot); + struct.success.add(_elem1596); } } struct.setSuccessIsSet(true); @@ -131692,14 +133752,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1590 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1590.size); - Partition _elem1591; - for (int _i1592 = 0; _i1592 < _list1590.size; ++_i1592) + org.apache.thrift.protocol.TList _list1598 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1598.size); + Partition _elem1599; + for (int _i1600 = 0; _i1600 < _list1598.size; ++_i1600) { - _elem1591 = new Partition(); - _elem1591.read(iprot); - struct.new_parts.add(_elem1591); + _elem1599 = new Partition(); + _elem1599.read(iprot); + struct.new_parts.add(_elem1599); } iprot.readListEnd(); } @@ -131735,9 +133795,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1593 : struct.new_parts) + for (Partition _iter1601 : struct.new_parts) { - _iter1593.write(oprot); + _iter1601.write(oprot); } oprot.writeListEnd(); } @@ -131780,9 +133840,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1594 : struct.new_parts) + for (Partition _iter1602 : struct.new_parts) { - _iter1594.write(oprot); + _iter1602.write(oprot); } } } @@ -131802,14 +133862,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1595 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1595.size); - Partition _elem1596; - for (int _i1597 = 0; _i1597 < _list1595.size; ++_i1597) + org.apache.thrift.protocol.TList _list1603 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1603.size); + Partition _elem1604; + for (int _i1605 = 0; _i1605 < _list1603.size; ++_i1605) { - _elem1596 = new Partition(); - _elem1596.read(iprot); - struct.new_parts.add(_elem1596); + _elem1604 = new Partition(); + _elem1604.read(iprot); + struct.new_parts.add(_elem1604); } } struct.setNew_partsIsSet(true); @@ -132862,14 +134922,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1598 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1598.size); - Partition _elem1599; - for (int _i1600 = 0; _i1600 < _list1598.size; ++_i1600) + org.apache.thrift.protocol.TList _list1606 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1606.size); + Partition _elem1607; + for (int _i1608 = 0; _i1608 < _list1606.size; ++_i1608) { - _elem1599 = new Partition(); - _elem1599.read(iprot); - struct.new_parts.add(_elem1599); + _elem1607 = new Partition(); + _elem1607.read(iprot); + struct.new_parts.add(_elem1607); } iprot.readListEnd(); } @@ -132914,9 +134974,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1601 : struct.new_parts) + for (Partition _iter1609 : struct.new_parts) { - _iter1601.write(oprot); + _iter1609.write(oprot); } oprot.writeListEnd(); } @@ -132967,9 +135027,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1602 : struct.new_parts) + for (Partition _iter1610 : struct.new_parts) { - _iter1602.write(oprot); + _iter1610.write(oprot); } } } @@ -132992,14 +135052,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1603 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1603.size); - Partition _elem1604; - for (int _i1605 = 0; _i1605 < _list1603.size; ++_i1605) + org.apache.thrift.protocol.TList _list1611 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1611.size); + Partition _elem1612; + for (int _i1613 = 0; _i1613 < _list1611.size; ++_i1613) { - _elem1604 = new Partition(); - _elem1604.read(iprot); - struct.new_parts.add(_elem1604); + _elem1612 = new Partition(); + _elem1612.read(iprot); + struct.new_parts.add(_elem1612); } } struct.setNew_partsIsSet(true); @@ -136138,13 +138198,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1606 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1606.size); - String _elem1607; - for (int _i1608 = 0; _i1608 < _list1606.size; ++_i1608) + org.apache.thrift.protocol.TList _list1614 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1614.size); + String _elem1615; + for (int _i1616 = 0; _i1616 < _list1614.size; ++_i1616) { - _elem1607 = iprot.readString(); - struct.part_vals.add(_elem1607); + _elem1615 = iprot.readString(); + struct.part_vals.add(_elem1615); } iprot.readListEnd(); } @@ -136189,9 +138249,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1609 : struct.part_vals) + for (String _iter1617 : struct.part_vals) { - oprot.writeString(_iter1609); + oprot.writeString(_iter1617); } oprot.writeListEnd(); } @@ -136242,9 +138302,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1610 : struct.part_vals) + for (String _iter1618 : struct.part_vals) { - oprot.writeString(_iter1610); + oprot.writeString(_iter1618); } } } @@ -136267,13 +138327,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1611 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1611.size); - String _elem1612; - for (int _i1613 = 0; _i1613 < _list1611.size; ++_i1613) + org.apache.thrift.protocol.TList _list1619 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1619.size); + String _elem1620; + for (int _i1621 = 0; _i1621 < _list1619.size; ++_i1621) { - _elem1612 = iprot.readString(); - struct.part_vals.add(_elem1612); + _elem1620 = iprot.readString(); + struct.part_vals.add(_elem1620); } } struct.setPart_valsIsSet(true); @@ -138085,13 +140145,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1614 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1614.size); - String _elem1615; - for (int _i1616 = 0; _i1616 < _list1614.size; ++_i1616) + org.apache.thrift.protocol.TList _list1622 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1622.size); + String _elem1623; + for (int _i1624 = 0; _i1624 < _list1622.size; ++_i1624) { - _elem1615 = iprot.readString(); - struct.part_vals.add(_elem1615); + _elem1623 = iprot.readString(); + struct.part_vals.add(_elem1623); } iprot.readListEnd(); } @@ -138125,9 +140185,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1617 : struct.part_vals) + for (String _iter1625 : struct.part_vals) { - oprot.writeString(_iter1617); + oprot.writeString(_iter1625); } oprot.writeListEnd(); } @@ -138164,9 +140224,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1618 : struct.part_vals) + for (String _iter1626 : struct.part_vals) { - oprot.writeString(_iter1618); + oprot.writeString(_iter1626); } } } @@ -138181,13 +140241,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1619 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1619.size); - String _elem1620; - for (int _i1621 = 0; _i1621 < _list1619.size; ++_i1621) + org.apache.thrift.protocol.TList _list1627 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1627.size); + String _elem1628; + for (int _i1629 = 0; _i1629 < _list1627.size; ++_i1629) { - _elem1620 = iprot.readString(); - struct.part_vals.add(_elem1620); + _elem1628 = iprot.readString(); + struct.part_vals.add(_elem1628); } } struct.setPart_valsIsSet(true); @@ -140342,13 +142402,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1622 = iprot.readListBegin(); - struct.success = new ArrayList(_list1622.size); - String _elem1623; - for (int _i1624 = 0; _i1624 < _list1622.size; ++_i1624) + org.apache.thrift.protocol.TList _list1630 = iprot.readListBegin(); + struct.success = new ArrayList(_list1630.size); + String _elem1631; + for (int _i1632 = 0; _i1632 < _list1630.size; ++_i1632) { - _elem1623 = iprot.readString(); - struct.success.add(_elem1623); + _elem1631 = iprot.readString(); + struct.success.add(_elem1631); } iprot.readListEnd(); } @@ -140383,9 +142443,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1625 : struct.success) + for (String _iter1633 : struct.success) { - oprot.writeString(_iter1625); + oprot.writeString(_iter1633); } oprot.writeListEnd(); } @@ -140424,9 +142484,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1626 : struct.success) + for (String _iter1634 : struct.success) { - oprot.writeString(_iter1626); + oprot.writeString(_iter1634); } } } @@ -140441,13 +142501,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1627 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1627.size); - String _elem1628; - for (int _i1629 = 0; _i1629 < _list1627.size; ++_i1629) + org.apache.thrift.protocol.TList _list1635 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1635.size); + String _elem1636; + for (int _i1637 = 0; _i1637 < _list1635.size; ++_i1637) { - _elem1628 = iprot.readString(); - struct.success.add(_elem1628); + _elem1636 = iprot.readString(); + struct.success.add(_elem1636); } } struct.setSuccessIsSet(true); @@ -141210,15 +143270,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1630 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1630.size); - String _key1631; - String _val1632; - for (int _i1633 = 0; _i1633 < _map1630.size; ++_i1633) + org.apache.thrift.protocol.TMap _map1638 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1638.size); + String _key1639; + String _val1640; + for (int _i1641 = 0; _i1641 < _map1638.size; ++_i1641) { - _key1631 = iprot.readString(); - _val1632 = iprot.readString(); - struct.success.put(_key1631, _val1632); + _key1639 = iprot.readString(); + _val1640 = iprot.readString(); + struct.success.put(_key1639, _val1640); } iprot.readMapEnd(); } @@ -141253,10 +143313,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter1634 : struct.success.entrySet()) + for (Map.Entry _iter1642 : struct.success.entrySet()) { - oprot.writeString(_iter1634.getKey()); - oprot.writeString(_iter1634.getValue()); + oprot.writeString(_iter1642.getKey()); + oprot.writeString(_iter1642.getValue()); } oprot.writeMapEnd(); } @@ -141295,10 +143355,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1635 : struct.success.entrySet()) + for (Map.Entry _iter1643 : struct.success.entrySet()) { - oprot.writeString(_iter1635.getKey()); - oprot.writeString(_iter1635.getValue()); + oprot.writeString(_iter1643.getKey()); + oprot.writeString(_iter1643.getValue()); } } } @@ -141313,15 +143373,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1636 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map1636.size); - String _key1637; - String _val1638; - for (int _i1639 = 0; _i1639 < _map1636.size; ++_i1639) + org.apache.thrift.protocol.TMap _map1644 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map1644.size); + String _key1645; + String _val1646; + for (int _i1647 = 0; _i1647 < _map1644.size; ++_i1647) { - _key1637 = iprot.readString(); - _val1638 = iprot.readString(); - struct.success.put(_key1637, _val1638); + _key1645 = iprot.readString(); + _val1646 = iprot.readString(); + struct.success.put(_key1645, _val1646); } } struct.setSuccessIsSet(true); @@ -141916,15 +143976,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1640 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1640.size); - String _key1641; - String _val1642; - for (int _i1643 = 0; _i1643 < _map1640.size; ++_i1643) + org.apache.thrift.protocol.TMap _map1648 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1648.size); + String _key1649; + String _val1650; + for (int _i1651 = 0; _i1651 < _map1648.size; ++_i1651) { - _key1641 = iprot.readString(); - _val1642 = iprot.readString(); - struct.part_vals.put(_key1641, _val1642); + _key1649 = iprot.readString(); + _val1650 = iprot.readString(); + struct.part_vals.put(_key1649, _val1650); } iprot.readMapEnd(); } @@ -141968,10 +144028,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1644 : struct.part_vals.entrySet()) + for (Map.Entry _iter1652 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1644.getKey()); - oprot.writeString(_iter1644.getValue()); + oprot.writeString(_iter1652.getKey()); + oprot.writeString(_iter1652.getValue()); } oprot.writeMapEnd(); } @@ -142022,10 +144082,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1645 : struct.part_vals.entrySet()) + for (Map.Entry _iter1653 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1645.getKey()); - oprot.writeString(_iter1645.getValue()); + oprot.writeString(_iter1653.getKey()); + oprot.writeString(_iter1653.getValue()); } } } @@ -142048,15 +144108,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1646 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1646.size); - String _key1647; - String _val1648; - for (int _i1649 = 0; _i1649 < _map1646.size; ++_i1649) + org.apache.thrift.protocol.TMap _map1654 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1654.size); + String _key1655; + String _val1656; + for (int _i1657 = 0; _i1657 < _map1654.size; ++_i1657) { - _key1647 = iprot.readString(); - _val1648 = iprot.readString(); - struct.part_vals.put(_key1647, _val1648); + _key1655 = iprot.readString(); + _val1656 = iprot.readString(); + struct.part_vals.put(_key1655, _val1656); } } struct.setPart_valsIsSet(true); @@ -143540,15 +145600,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1650 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1650.size); - String _key1651; - String _val1652; - for (int _i1653 = 0; _i1653 < _map1650.size; ++_i1653) + org.apache.thrift.protocol.TMap _map1658 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1658.size); + String _key1659; + String _val1660; + for (int _i1661 = 0; _i1661 < _map1658.size; ++_i1661) { - _key1651 = iprot.readString(); - _val1652 = iprot.readString(); - struct.part_vals.put(_key1651, _val1652); + _key1659 = iprot.readString(); + _val1660 = iprot.readString(); + struct.part_vals.put(_key1659, _val1660); } iprot.readMapEnd(); } @@ -143592,10 +145652,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1654 : struct.part_vals.entrySet()) + for (Map.Entry _iter1662 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1654.getKey()); - oprot.writeString(_iter1654.getValue()); + oprot.writeString(_iter1662.getKey()); + oprot.writeString(_iter1662.getValue()); } oprot.writeMapEnd(); } @@ -143646,10 +145706,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1655 : struct.part_vals.entrySet()) + for (Map.Entry _iter1663 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1655.getKey()); - oprot.writeString(_iter1655.getValue()); + oprot.writeString(_iter1663.getKey()); + oprot.writeString(_iter1663.getValue()); } } } @@ -143672,15 +145732,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1656 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1656.size); - String _key1657; - String _val1658; - for (int _i1659 = 0; _i1659 < _map1656.size; ++_i1659) + org.apache.thrift.protocol.TMap _map1664 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1664.size); + String _key1665; + String _val1666; + for (int _i1667 = 0; _i1667 < _map1664.size; ++_i1667) { - _key1657 = iprot.readString(); - _val1658 = iprot.readString(); - struct.part_vals.put(_key1657, _val1658); + _key1665 = iprot.readString(); + _val1666 = iprot.readString(); + struct.part_vals.put(_key1665, _val1666); } } struct.setPart_valsIsSet(true); @@ -154912,6 +156972,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_col private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField COL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("col_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -154922,12 +156983,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_col private String db_name; // required private String tbl_name; // required private String col_name; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - COL_NAME((short)3, "col_name"); + COL_NAME((short)3, "col_name"), + VALID_WRITE_ID_LIST((short)4, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -154948,6 +157011,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // COL_NAME return COL_NAME; + case 4: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -154997,6 +157062,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.COL_NAME, new org.apache.thrift.meta_data.FieldMetaData("col_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_table_column_statistics_args.class, metaDataMap); } @@ -155007,12 +157074,14 @@ public get_table_column_statistics_args() { public get_table_column_statistics_args( String db_name, String tbl_name, - String col_name) + String col_name, + String validWriteIdList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.col_name = col_name; + this.validWriteIdList = validWriteIdList; } /** @@ -155028,6 +157097,9 @@ public get_table_column_statistics_args(get_table_column_statistics_args other) if (other.isSetCol_name()) { this.col_name = other.col_name; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public get_table_column_statistics_args deepCopy() { @@ -155039,6 +157111,7 @@ public void clear() { this.db_name = null; this.tbl_name = null; this.col_name = null; + this.validWriteIdList = null; } public String getDb_name() { @@ -155110,6 +157183,29 @@ public void setCol_nameIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -155136,6 +157232,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -155150,6 +157254,9 @@ public Object getFieldValue(_Fields field) { case COL_NAME: return getCol_name(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -155167,6 +157274,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case COL_NAME: return isSetCol_name(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -155211,6 +157320,15 @@ public boolean equals(get_table_column_statistics_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -155233,6 +157351,11 @@ public int hashCode() { if (present_col_name) list.add(col_name); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -155274,6 +157397,16 @@ public int compareTo(get_table_column_statistics_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -155317,6 +157450,14 @@ public String toString() { sb.append(this.col_name); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -155384,6 +157525,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_column_st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -155412,6 +157561,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_column_s oprot.writeString(struct.col_name); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -155439,7 +157593,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_column_st if (struct.isSetCol_name()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidWriteIdList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -155449,12 +157606,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_column_st if (struct.isSetCol_name()) { oprot.writeString(struct.col_name); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_table_column_statistics_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -155467,6 +157627,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_column_sta struct.col_name = iprot.readString(); struct.setCol_nameIsSet(true); } + if (incoming.get(3)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -156266,6 +158430,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_column_sta private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField PART_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("part_name", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField COL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("col_name", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -156277,13 +158442,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_column_sta private String tbl_name; // required private String part_name; // required private String col_name; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), PART_NAME((short)3, "part_name"), - COL_NAME((short)4, "col_name"); + COL_NAME((short)4, "col_name"), + VALID_WRITE_ID_LIST((short)5, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -156306,6 +158473,8 @@ public static _Fields findByThriftId(int fieldId) { return PART_NAME; case 4: // COL_NAME return COL_NAME; + case 5: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -156357,6 +158526,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.COL_NAME, new org.apache.thrift.meta_data.FieldMetaData("col_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_column_statistics_args.class, metaDataMap); } @@ -156368,13 +158539,15 @@ public get_partition_column_statistics_args( String db_name, String tbl_name, String part_name, - String col_name) + String col_name, + String validWriteIdList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.part_name = part_name; this.col_name = col_name; + this.validWriteIdList = validWriteIdList; } /** @@ -156393,6 +158566,9 @@ public get_partition_column_statistics_args(get_partition_column_statistics_args if (other.isSetCol_name()) { this.col_name = other.col_name; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public get_partition_column_statistics_args deepCopy() { @@ -156405,6 +158581,7 @@ public void clear() { this.tbl_name = null; this.part_name = null; this.col_name = null; + this.validWriteIdList = null; } public String getDb_name() { @@ -156499,6 +158676,29 @@ public void setCol_nameIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -156533,6 +158733,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -156550,6 +158758,9 @@ public Object getFieldValue(_Fields field) { case COL_NAME: return getCol_name(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -156569,6 +158780,8 @@ public boolean isSet(_Fields field) { return isSetPart_name(); case COL_NAME: return isSetCol_name(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -156622,6 +158835,15 @@ public boolean equals(get_partition_column_statistics_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -156649,6 +158871,11 @@ public int hashCode() { if (present_col_name) list.add(col_name); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -156700,6 +158927,16 @@ public int compareTo(get_partition_column_statistics_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -156751,6 +158988,14 @@ public String toString() { sb.append(this.col_name); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -156826,6 +159071,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_colum org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -156859,6 +159112,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_colu oprot.writeString(struct.col_name); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -156889,7 +159147,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_colum if (struct.isSetCol_name()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetValidWriteIdList()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -156902,12 +159163,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_colum if (struct.isSetCol_name()) { oprot.writeString(struct.col_name); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_column_statistics_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -156924,6 +159188,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_column struct.col_name = iprot.readString(); struct.setCol_nameIsSet(true); } + if (incoming.get(4)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -168336,13 +170604,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1660 = iprot.readListBegin(); - struct.success = new ArrayList(_list1660.size); - String _elem1661; - for (int _i1662 = 0; _i1662 < _list1660.size; ++_i1662) + org.apache.thrift.protocol.TList _list1668 = iprot.readListBegin(); + struct.success = new ArrayList(_list1668.size); + String _elem1669; + for (int _i1670 = 0; _i1670 < _list1668.size; ++_i1670) { - _elem1661 = iprot.readString(); - struct.success.add(_elem1661); + _elem1669 = iprot.readString(); + struct.success.add(_elem1669); } iprot.readListEnd(); } @@ -168377,9 +170645,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1663 : struct.success) + for (String _iter1671 : struct.success) { - oprot.writeString(_iter1663); + oprot.writeString(_iter1671); } oprot.writeListEnd(); } @@ -168418,9 +170686,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1664 : struct.success) + for (String _iter1672 : struct.success) { - oprot.writeString(_iter1664); + oprot.writeString(_iter1672); } } } @@ -168435,13 +170703,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1665 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1665.size); - String _elem1666; - for (int _i1667 = 0; _i1667 < _list1665.size; ++_i1667) + org.apache.thrift.protocol.TList _list1673 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1673.size); + String _elem1674; + for (int _i1675 = 0; _i1675 < _list1673.size; ++_i1675) { - _elem1666 = iprot.readString(); - struct.success.add(_elem1666); + _elem1674 = iprot.readString(); + struct.success.add(_elem1674); } } struct.setSuccessIsSet(true); @@ -172496,13 +174764,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1668 = iprot.readListBegin(); - struct.success = new ArrayList(_list1668.size); - String _elem1669; - for (int _i1670 = 0; _i1670 < _list1668.size; ++_i1670) + org.apache.thrift.protocol.TList _list1676 = iprot.readListBegin(); + struct.success = new ArrayList(_list1676.size); + String _elem1677; + for (int _i1678 = 0; _i1678 < _list1676.size; ++_i1678) { - _elem1669 = iprot.readString(); - struct.success.add(_elem1669); + _elem1677 = iprot.readString(); + struct.success.add(_elem1677); } iprot.readListEnd(); } @@ -172537,9 +174805,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1671 : struct.success) + for (String _iter1679 : struct.success) { - oprot.writeString(_iter1671); + oprot.writeString(_iter1679); } oprot.writeListEnd(); } @@ -172578,9 +174846,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1672 : struct.success) + for (String _iter1680 : struct.success) { - oprot.writeString(_iter1672); + oprot.writeString(_iter1680); } } } @@ -172595,13 +174863,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1673 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1673.size); - String _elem1674; - for (int _i1675 = 0; _i1675 < _list1673.size; ++_i1675) + org.apache.thrift.protocol.TList _list1681 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1681.size); + String _elem1682; + for (int _i1683 = 0; _i1683 < _list1681.size; ++_i1683) { - _elem1674 = iprot.readString(); - struct.success.add(_elem1674); + _elem1682 = iprot.readString(); + struct.success.add(_elem1682); } } struct.setSuccessIsSet(true); @@ -175892,14 +178160,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1676 = iprot.readListBegin(); - struct.success = new ArrayList(_list1676.size); - Role _elem1677; - for (int _i1678 = 0; _i1678 < _list1676.size; ++_i1678) + org.apache.thrift.protocol.TList _list1684 = iprot.readListBegin(); + struct.success = new ArrayList(_list1684.size); + Role _elem1685; + for (int _i1686 = 0; _i1686 < _list1684.size; ++_i1686) { - _elem1677 = new Role(); - _elem1677.read(iprot); - struct.success.add(_elem1677); + _elem1685 = new Role(); + _elem1685.read(iprot); + struct.success.add(_elem1685); } iprot.readListEnd(); } @@ -175934,9 +178202,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1679 : struct.success) + for (Role _iter1687 : struct.success) { - _iter1679.write(oprot); + _iter1687.write(oprot); } oprot.writeListEnd(); } @@ -175975,9 +178243,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1680 : struct.success) + for (Role _iter1688 : struct.success) { - _iter1680.write(oprot); + _iter1688.write(oprot); } } } @@ -175992,14 +178260,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1681 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1681.size); - Role _elem1682; - for (int _i1683 = 0; _i1683 < _list1681.size; ++_i1683) + org.apache.thrift.protocol.TList _list1689 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1689.size); + Role _elem1690; + for (int _i1691 = 0; _i1691 < _list1689.size; ++_i1691) { - _elem1682 = new Role(); - _elem1682.read(iprot); - struct.success.add(_elem1682); + _elem1690 = new Role(); + _elem1690.read(iprot); + struct.success.add(_elem1690); } } struct.setSuccessIsSet(true); @@ -179004,13 +181272,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1684 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1684.size); - String _elem1685; - for (int _i1686 = 0; _i1686 < _list1684.size; ++_i1686) + org.apache.thrift.protocol.TList _list1692 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1692.size); + String _elem1693; + for (int _i1694 = 0; _i1694 < _list1692.size; ++_i1694) { - _elem1685 = iprot.readString(); - struct.group_names.add(_elem1685); + _elem1693 = iprot.readString(); + struct.group_names.add(_elem1693); } iprot.readListEnd(); } @@ -179046,9 +181314,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1687 : struct.group_names) + for (String _iter1695 : struct.group_names) { - oprot.writeString(_iter1687); + oprot.writeString(_iter1695); } oprot.writeListEnd(); } @@ -179091,9 +181359,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1688 : struct.group_names) + for (String _iter1696 : struct.group_names) { - oprot.writeString(_iter1688); + oprot.writeString(_iter1696); } } } @@ -179114,13 +181382,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1689 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1689.size); - String _elem1690; - for (int _i1691 = 0; _i1691 < _list1689.size; ++_i1691) + org.apache.thrift.protocol.TList _list1697 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1697.size); + String _elem1698; + for (int _i1699 = 0; _i1699 < _list1697.size; ++_i1699) { - _elem1690 = iprot.readString(); - struct.group_names.add(_elem1690); + _elem1698 = iprot.readString(); + struct.group_names.add(_elem1698); } } struct.setGroup_namesIsSet(true); @@ -180578,14 +182846,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1692 = iprot.readListBegin(); - struct.success = new ArrayList(_list1692.size); - HiveObjectPrivilege _elem1693; - for (int _i1694 = 0; _i1694 < _list1692.size; ++_i1694) + org.apache.thrift.protocol.TList _list1700 = iprot.readListBegin(); + struct.success = new ArrayList(_list1700.size); + HiveObjectPrivilege _elem1701; + for (int _i1702 = 0; _i1702 < _list1700.size; ++_i1702) { - _elem1693 = new HiveObjectPrivilege(); - _elem1693.read(iprot); - struct.success.add(_elem1693); + _elem1701 = new HiveObjectPrivilege(); + _elem1701.read(iprot); + struct.success.add(_elem1701); } iprot.readListEnd(); } @@ -180620,9 +182888,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1695 : struct.success) + for (HiveObjectPrivilege _iter1703 : struct.success) { - _iter1695.write(oprot); + _iter1703.write(oprot); } oprot.writeListEnd(); } @@ -180661,9 +182929,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1696 : struct.success) + for (HiveObjectPrivilege _iter1704 : struct.success) { - _iter1696.write(oprot); + _iter1704.write(oprot); } } } @@ -180678,14 +182946,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1697 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1697.size); - HiveObjectPrivilege _elem1698; - for (int _i1699 = 0; _i1699 < _list1697.size; ++_i1699) + org.apache.thrift.protocol.TList _list1705 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1705.size); + HiveObjectPrivilege _elem1706; + for (int _i1707 = 0; _i1707 < _list1705.size; ++_i1707) { - _elem1698 = new HiveObjectPrivilege(); - _elem1698.read(iprot); - struct.success.add(_elem1698); + _elem1706 = new HiveObjectPrivilege(); + _elem1706.read(iprot); + struct.success.add(_elem1706); } } struct.setSuccessIsSet(true); @@ -184632,13 +186900,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1700 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1700.size); - String _elem1701; - for (int _i1702 = 0; _i1702 < _list1700.size; ++_i1702) + org.apache.thrift.protocol.TList _list1708 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1708.size); + String _elem1709; + for (int _i1710 = 0; _i1710 < _list1708.size; ++_i1710) { - _elem1701 = iprot.readString(); - struct.group_names.add(_elem1701); + _elem1709 = iprot.readString(); + struct.group_names.add(_elem1709); } iprot.readListEnd(); } @@ -184669,9 +186937,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1703 : struct.group_names) + for (String _iter1711 : struct.group_names) { - oprot.writeString(_iter1703); + oprot.writeString(_iter1711); } oprot.writeListEnd(); } @@ -184708,9 +186976,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1704 : struct.group_names) + for (String _iter1712 : struct.group_names) { - oprot.writeString(_iter1704); + oprot.writeString(_iter1712); } } } @@ -184726,13 +186994,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1705 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1705.size); - String _elem1706; - for (int _i1707 = 0; _i1707 < _list1705.size; ++_i1707) + org.apache.thrift.protocol.TList _list1713 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1713.size); + String _elem1714; + for (int _i1715 = 0; _i1715 < _list1713.size; ++_i1715) { - _elem1706 = iprot.readString(); - struct.group_names.add(_elem1706); + _elem1714 = iprot.readString(); + struct.group_names.add(_elem1714); } } struct.setGroup_namesIsSet(true); @@ -185135,13 +187403,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1708 = iprot.readListBegin(); - struct.success = new ArrayList(_list1708.size); - String _elem1709; - for (int _i1710 = 0; _i1710 < _list1708.size; ++_i1710) + org.apache.thrift.protocol.TList _list1716 = iprot.readListBegin(); + struct.success = new ArrayList(_list1716.size); + String _elem1717; + for (int _i1718 = 0; _i1718 < _list1716.size; ++_i1718) { - _elem1709 = iprot.readString(); - struct.success.add(_elem1709); + _elem1717 = iprot.readString(); + struct.success.add(_elem1717); } iprot.readListEnd(); } @@ -185176,9 +187444,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1711 : struct.success) + for (String _iter1719 : struct.success) { - oprot.writeString(_iter1711); + oprot.writeString(_iter1719); } oprot.writeListEnd(); } @@ -185217,9 +187485,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1712 : struct.success) + for (String _iter1720 : struct.success) { - oprot.writeString(_iter1712); + oprot.writeString(_iter1720); } } } @@ -185234,13 +187502,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1713 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1713.size); - String _elem1714; - for (int _i1715 = 0; _i1715 < _list1713.size; ++_i1715) + org.apache.thrift.protocol.TList _list1721 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1721.size); + String _elem1722; + for (int _i1723 = 0; _i1723 < _list1721.size; ++_i1723) { - _elem1714 = iprot.readString(); - struct.success.add(_elem1714); + _elem1722 = iprot.readString(); + struct.success.add(_elem1722); } } struct.setSuccessIsSet(true); @@ -190531,13 +192799,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1716 = iprot.readListBegin(); - struct.success = new ArrayList(_list1716.size); - String _elem1717; - for (int _i1718 = 0; _i1718 < _list1716.size; ++_i1718) + org.apache.thrift.protocol.TList _list1724 = iprot.readListBegin(); + struct.success = new ArrayList(_list1724.size); + String _elem1725; + for (int _i1726 = 0; _i1726 < _list1724.size; ++_i1726) { - _elem1717 = iprot.readString(); - struct.success.add(_elem1717); + _elem1725 = iprot.readString(); + struct.success.add(_elem1725); } iprot.readListEnd(); } @@ -190563,9 +192831,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1719 : struct.success) + for (String _iter1727 : struct.success) { - oprot.writeString(_iter1719); + oprot.writeString(_iter1727); } oprot.writeListEnd(); } @@ -190596,9 +192864,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1720 : struct.success) + for (String _iter1728 : struct.success) { - oprot.writeString(_iter1720); + oprot.writeString(_iter1728); } } } @@ -190610,13 +192878,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1721 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1721.size); - String _elem1722; - for (int _i1723 = 0; _i1723 < _list1721.size; ++_i1723) + org.apache.thrift.protocol.TList _list1729 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1729.size); + String _elem1730; + for (int _i1731 = 0; _i1731 < _list1729.size; ++_i1731) { - _elem1722 = iprot.readString(); - struct.success.add(_elem1722); + _elem1730 = iprot.readString(); + struct.success.add(_elem1730); } } struct.setSuccessIsSet(true); @@ -193646,13 +195914,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1724 = iprot.readListBegin(); - struct.success = new ArrayList(_list1724.size); - String _elem1725; - for (int _i1726 = 0; _i1726 < _list1724.size; ++_i1726) + org.apache.thrift.protocol.TList _list1732 = iprot.readListBegin(); + struct.success = new ArrayList(_list1732.size); + String _elem1733; + for (int _i1734 = 0; _i1734 < _list1732.size; ++_i1734) { - _elem1725 = iprot.readString(); - struct.success.add(_elem1725); + _elem1733 = iprot.readString(); + struct.success.add(_elem1733); } iprot.readListEnd(); } @@ -193678,9 +195946,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1727 : struct.success) + for (String _iter1735 : struct.success) { - oprot.writeString(_iter1727); + oprot.writeString(_iter1735); } oprot.writeListEnd(); } @@ -193711,9 +195979,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1728 : struct.success) + for (String _iter1736 : struct.success) { - oprot.writeString(_iter1728); + oprot.writeString(_iter1736); } } } @@ -193725,13 +195993,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1729 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1729.size); - String _elem1730; - for (int _i1731 = 0; _i1731 < _list1729.size; ++_i1731) + org.apache.thrift.protocol.TList _list1737 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1737.size); + String _elem1738; + for (int _i1739 = 0; _i1739 < _list1737.size; ++_i1739) { - _elem1730 = iprot.readString(); - struct.success.add(_elem1730); + _elem1738 = iprot.readString(); + struct.success.add(_elem1738); } } struct.setSuccessIsSet(true); @@ -210852,13 +213120,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, find_columns_with_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1732 = iprot.readListBegin(); - struct.success = new ArrayList(_list1732.size); - String _elem1733; - for (int _i1734 = 0; _i1734 < _list1732.size; ++_i1734) + org.apache.thrift.protocol.TList _list1740 = iprot.readListBegin(); + struct.success = new ArrayList(_list1740.size); + String _elem1741; + for (int _i1742 = 0; _i1742 < _list1740.size; ++_i1742) { - _elem1733 = iprot.readString(); - struct.success.add(_elem1733); + _elem1741 = iprot.readString(); + struct.success.add(_elem1741); } iprot.readListEnd(); } @@ -210884,9 +213152,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, find_columns_with_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1735 : struct.success) + for (String _iter1743 : struct.success) { - oprot.writeString(_iter1735); + oprot.writeString(_iter1743); } oprot.writeListEnd(); } @@ -210917,9 +213185,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, find_columns_with_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1736 : struct.success) + for (String _iter1744 : struct.success) { - oprot.writeString(_iter1736); + oprot.writeString(_iter1744); } } } @@ -210931,13 +213199,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, find_columns_with_st BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1737 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1737.size); - String _elem1738; - for (int _i1739 = 0; _i1739 < _list1737.size; ++_i1739) + org.apache.thrift.protocol.TList _list1745 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1745.size); + String _elem1746; + for (int _i1747 = 0; _i1747 < _list1745.size; ++_i1747) { - _elem1738 = iprot.readString(); - struct.success.add(_elem1738); + _elem1746 = iprot.readString(); + struct.success.add(_elem1746); } } struct.setSuccessIsSet(true); @@ -247823,14 +250091,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_all_vers case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1740 = iprot.readListBegin(); - struct.success = new ArrayList(_list1740.size); - SchemaVersion _elem1741; - for (int _i1742 = 0; _i1742 < _list1740.size; ++_i1742) + org.apache.thrift.protocol.TList _list1748 = iprot.readListBegin(); + struct.success = new ArrayList(_list1748.size); + SchemaVersion _elem1749; + for (int _i1750 = 0; _i1750 < _list1748.size; ++_i1750) { - _elem1741 = new SchemaVersion(); - _elem1741.read(iprot); - struct.success.add(_elem1741); + _elem1749 = new SchemaVersion(); + _elem1749.read(iprot); + struct.success.add(_elem1749); } iprot.readListEnd(); } @@ -247874,9 +250142,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_all_ver oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (SchemaVersion _iter1743 : struct.success) + for (SchemaVersion _iter1751 : struct.success) { - _iter1743.write(oprot); + _iter1751.write(oprot); } oprot.writeListEnd(); } @@ -247923,9 +250191,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_all_vers if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (SchemaVersion _iter1744 : struct.success) + for (SchemaVersion _iter1752 : struct.success) { - _iter1744.write(oprot); + _iter1752.write(oprot); } } } @@ -247943,14 +250211,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_all_versi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1745 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1745.size); - SchemaVersion _elem1746; - for (int _i1747 = 0; _i1747 < _list1745.size; ++_i1747) + org.apache.thrift.protocol.TList _list1753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1753.size); + SchemaVersion _elem1754; + for (int _i1755 = 0; _i1755 < _list1753.size; ++_i1755) { - _elem1746 = new SchemaVersion(); - _elem1746.read(iprot); - struct.success.add(_elem1746); + _elem1754 = new SchemaVersion(); + _elem1754.read(iprot); + struct.success.add(_elem1754); } } struct.setSuccessIsSet(true); @@ -256493,14 +258761,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_runtime_stats_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1748 = iprot.readListBegin(); - struct.success = new ArrayList(_list1748.size); - RuntimeStat _elem1749; - for (int _i1750 = 0; _i1750 < _list1748.size; ++_i1750) + org.apache.thrift.protocol.TList _list1756 = iprot.readListBegin(); + struct.success = new ArrayList(_list1756.size); + RuntimeStat _elem1757; + for (int _i1758 = 0; _i1758 < _list1756.size; ++_i1758) { - _elem1749 = new RuntimeStat(); - _elem1749.read(iprot); - struct.success.add(_elem1749); + _elem1757 = new RuntimeStat(); + _elem1757.read(iprot); + struct.success.add(_elem1757); } iprot.readListEnd(); } @@ -256535,9 +258803,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_runtime_stats_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (RuntimeStat _iter1751 : struct.success) + for (RuntimeStat _iter1759 : struct.success) { - _iter1751.write(oprot); + _iter1759.write(oprot); } oprot.writeListEnd(); } @@ -256576,9 +258844,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (RuntimeStat _iter1752 : struct.success) + for (RuntimeStat _iter1760 : struct.success) { - _iter1752.write(oprot); + _iter1760.write(oprot); } } } @@ -256593,14 +258861,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1753.size); - RuntimeStat _elem1754; - for (int _i1755 = 0; _i1755 < _list1753.size; ++_i1755) + org.apache.thrift.protocol.TList _list1761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1761.size); + RuntimeStat _elem1762; + for (int _i1763 = 0; _i1763 < _list1761.size; ++_i1763) { - _elem1754 = new RuntimeStat(); - _elem1754.read(iprot); - struct.success.add(_elem1754); + _elem1762 = new RuntimeStat(); + _elem1762.read(iprot); + struct.success.add(_elem1762); } } struct.setSuccessIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java index 080111d85b..a3fc814746 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java @@ -755,14 +755,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 2: // POOLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list968 = iprot.readListBegin(); - struct.pools = new ArrayList(_list968.size); - WMPool _elem969; - for (int _i970 = 0; _i970 < _list968.size; ++_i970) + org.apache.thrift.protocol.TList _list976 = iprot.readListBegin(); + struct.pools = new ArrayList(_list976.size); + WMPool _elem977; + for (int _i978 = 0; _i978 < _list976.size; ++_i978) { - _elem969 = new WMPool(); - _elem969.read(iprot); - struct.pools.add(_elem969); + _elem977 = new WMPool(); + _elem977.read(iprot); + struct.pools.add(_elem977); } iprot.readListEnd(); } @@ -774,14 +774,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 3: // MAPPINGS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list971 = iprot.readListBegin(); - struct.mappings = new ArrayList(_list971.size); - WMMapping _elem972; - for (int _i973 = 0; _i973 < _list971.size; ++_i973) + org.apache.thrift.protocol.TList _list979 = iprot.readListBegin(); + struct.mappings = new ArrayList(_list979.size); + WMMapping _elem980; + for (int _i981 = 0; _i981 < _list979.size; ++_i981) { - _elem972 = new WMMapping(); - _elem972.read(iprot); - struct.mappings.add(_elem972); + _elem980 = new WMMapping(); + _elem980.read(iprot); + struct.mappings.add(_elem980); } iprot.readListEnd(); } @@ -793,14 +793,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 4: // TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list974 = iprot.readListBegin(); - struct.triggers = new ArrayList(_list974.size); - WMTrigger _elem975; - for (int _i976 = 0; _i976 < _list974.size; ++_i976) + org.apache.thrift.protocol.TList _list982 = iprot.readListBegin(); + struct.triggers = new ArrayList(_list982.size); + WMTrigger _elem983; + for (int _i984 = 0; _i984 < _list982.size; ++_i984) { - _elem975 = new WMTrigger(); - _elem975.read(iprot); - struct.triggers.add(_elem975); + _elem983 = new WMTrigger(); + _elem983.read(iprot); + struct.triggers.add(_elem983); } iprot.readListEnd(); } @@ -812,14 +812,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 5: // POOL_TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list977 = iprot.readListBegin(); - struct.poolTriggers = new ArrayList(_list977.size); - WMPoolTrigger _elem978; - for (int _i979 = 0; _i979 < _list977.size; ++_i979) + org.apache.thrift.protocol.TList _list985 = iprot.readListBegin(); + struct.poolTriggers = new ArrayList(_list985.size); + WMPoolTrigger _elem986; + for (int _i987 = 0; _i987 < _list985.size; ++_i987) { - _elem978 = new WMPoolTrigger(); - _elem978.read(iprot); - struct.poolTriggers.add(_elem978); + _elem986 = new WMPoolTrigger(); + _elem986.read(iprot); + struct.poolTriggers.add(_elem986); } iprot.readListEnd(); } @@ -850,9 +850,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(POOLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.pools.size())); - for (WMPool _iter980 : struct.pools) + for (WMPool _iter988 : struct.pools) { - _iter980.write(oprot); + _iter988.write(oprot); } oprot.writeListEnd(); } @@ -863,9 +863,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(MAPPINGS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.mappings.size())); - for (WMMapping _iter981 : struct.mappings) + for (WMMapping _iter989 : struct.mappings) { - _iter981.write(oprot); + _iter989.write(oprot); } oprot.writeListEnd(); } @@ -877,9 +877,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size())); - for (WMTrigger _iter982 : struct.triggers) + for (WMTrigger _iter990 : struct.triggers) { - _iter982.write(oprot); + _iter990.write(oprot); } oprot.writeListEnd(); } @@ -891,9 +891,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(POOL_TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.poolTriggers.size())); - for (WMPoolTrigger _iter983 : struct.poolTriggers) + for (WMPoolTrigger _iter991 : struct.poolTriggers) { - _iter983.write(oprot); + _iter991.write(oprot); } oprot.writeListEnd(); } @@ -920,9 +920,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan struct.plan.write(oprot); { oprot.writeI32(struct.pools.size()); - for (WMPool _iter984 : struct.pools) + for (WMPool _iter992 : struct.pools) { - _iter984.write(oprot); + _iter992.write(oprot); } } BitSet optionals = new BitSet(); @@ -939,27 +939,27 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan if (struct.isSetMappings()) { { oprot.writeI32(struct.mappings.size()); - for (WMMapping _iter985 : struct.mappings) + for (WMMapping _iter993 : struct.mappings) { - _iter985.write(oprot); + _iter993.write(oprot); } } } if (struct.isSetTriggers()) { { oprot.writeI32(struct.triggers.size()); - for (WMTrigger _iter986 : struct.triggers) + for (WMTrigger _iter994 : struct.triggers) { - _iter986.write(oprot); + _iter994.write(oprot); } } } if (struct.isSetPoolTriggers()) { { oprot.writeI32(struct.poolTriggers.size()); - for (WMPoolTrigger _iter987 : struct.poolTriggers) + for (WMPoolTrigger _iter995 : struct.poolTriggers) { - _iter987.write(oprot); + _iter995.write(oprot); } } } @@ -972,56 +972,56 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan s struct.plan.read(iprot); struct.setPlanIsSet(true); { - org.apache.thrift.protocol.TList _list988 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.pools = new ArrayList(_list988.size); - WMPool _elem989; - for (int _i990 = 0; _i990 < _list988.size; ++_i990) + org.apache.thrift.protocol.TList _list996 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.pools = new ArrayList(_list996.size); + WMPool _elem997; + for (int _i998 = 0; _i998 < _list996.size; ++_i998) { - _elem989 = new WMPool(); - _elem989.read(iprot); - struct.pools.add(_elem989); + _elem997 = new WMPool(); + _elem997.read(iprot); + struct.pools.add(_elem997); } } struct.setPoolsIsSet(true); BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list991 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.mappings = new ArrayList(_list991.size); - WMMapping _elem992; - for (int _i993 = 0; _i993 < _list991.size; ++_i993) + org.apache.thrift.protocol.TList _list999 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.mappings = new ArrayList(_list999.size); + WMMapping _elem1000; + for (int _i1001 = 0; _i1001 < _list999.size; ++_i1001) { - _elem992 = new WMMapping(); - _elem992.read(iprot); - struct.mappings.add(_elem992); + _elem1000 = new WMMapping(); + _elem1000.read(iprot); + struct.mappings.add(_elem1000); } } struct.setMappingsIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list994 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.triggers = new ArrayList(_list994.size); - WMTrigger _elem995; - for (int _i996 = 0; _i996 < _list994.size; ++_i996) + org.apache.thrift.protocol.TList _list1002 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.triggers = new ArrayList(_list1002.size); + WMTrigger _elem1003; + for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004) { - _elem995 = new WMTrigger(); - _elem995.read(iprot); - struct.triggers.add(_elem995); + _elem1003 = new WMTrigger(); + _elem1003.read(iprot); + struct.triggers.add(_elem1003); } } struct.setTriggersIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list997 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.poolTriggers = new ArrayList(_list997.size); - WMPoolTrigger _elem998; - for (int _i999 = 0; _i999 < _list997.size; ++_i999) + org.apache.thrift.protocol.TList _list1005 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.poolTriggers = new ArrayList(_list1005.size); + WMPoolTrigger _elem1006; + for (int _i1007 = 0; _i1007 < _list1005.size; ++_i1007) { - _elem998 = new WMPoolTrigger(); - _elem998.read(iprot); - struct.poolTriggers.add(_elem998); + _elem1006 = new WMPoolTrigger(); + _elem1006.read(iprot); + struct.poolTriggers.add(_elem1006); } } struct.setPoolTriggersIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java index d0174005ca..b98906e395 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMGetAllResourcePla case 1: // RESOURCE_PLANS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1000 = iprot.readListBegin(); - struct.resourcePlans = new ArrayList(_list1000.size); - WMResourcePlan _elem1001; - for (int _i1002 = 0; _i1002 < _list1000.size; ++_i1002) + org.apache.thrift.protocol.TList _list1008 = iprot.readListBegin(); + struct.resourcePlans = new ArrayList(_list1008.size); + WMResourcePlan _elem1009; + for (int _i1010 = 0; _i1010 < _list1008.size; ++_i1010) { - _elem1001 = new WMResourcePlan(); - _elem1001.read(iprot); - struct.resourcePlans.add(_elem1001); + _elem1009 = new WMResourcePlan(); + _elem1009.read(iprot); + struct.resourcePlans.add(_elem1009); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMGetAllResourcePl oprot.writeFieldBegin(RESOURCE_PLANS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.resourcePlans.size())); - for (WMResourcePlan _iter1003 : struct.resourcePlans) + for (WMResourcePlan _iter1011 : struct.resourcePlans) { - _iter1003.write(oprot); + _iter1011.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMGetAllResourcePla if (struct.isSetResourcePlans()) { { oprot.writeI32(struct.resourcePlans.size()); - for (WMResourcePlan _iter1004 : struct.resourcePlans) + for (WMResourcePlan _iter1012 : struct.resourcePlans) { - _iter1004.write(oprot); + _iter1012.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMGetAllResourcePlan BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1005 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.resourcePlans = new ArrayList(_list1005.size); - WMResourcePlan _elem1006; - for (int _i1007 = 0; _i1007 < _list1005.size; ++_i1007) + org.apache.thrift.protocol.TList _list1013 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.resourcePlans = new ArrayList(_list1013.size); + WMResourcePlan _elem1014; + for (int _i1015 = 0; _i1015 < _list1013.size; ++_i1015) { - _elem1006 = new WMResourcePlan(); - _elem1006.read(iprot); - struct.resourcePlans.add(_elem1006); + _elem1014 = new WMResourcePlan(); + _elem1014.read(iprot); + struct.resourcePlans.add(_elem1014); } } struct.setResourcePlansIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java index e5425909d4..52b4177f9e 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMGetTriggersForRes case 1: // TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1024 = iprot.readListBegin(); - struct.triggers = new ArrayList(_list1024.size); - WMTrigger _elem1025; - for (int _i1026 = 0; _i1026 < _list1024.size; ++_i1026) + org.apache.thrift.protocol.TList _list1032 = iprot.readListBegin(); + struct.triggers = new ArrayList(_list1032.size); + WMTrigger _elem1033; + for (int _i1034 = 0; _i1034 < _list1032.size; ++_i1034) { - _elem1025 = new WMTrigger(); - _elem1025.read(iprot); - struct.triggers.add(_elem1025); + _elem1033 = new WMTrigger(); + _elem1033.read(iprot); + struct.triggers.add(_elem1033); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMGetTriggersForRe oprot.writeFieldBegin(TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size())); - for (WMTrigger _iter1027 : struct.triggers) + for (WMTrigger _iter1035 : struct.triggers) { - _iter1027.write(oprot); + _iter1035.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMGetTriggersForRes if (struct.isSetTriggers()) { { oprot.writeI32(struct.triggers.size()); - for (WMTrigger _iter1028 : struct.triggers) + for (WMTrigger _iter1036 : struct.triggers) { - _iter1028.write(oprot); + _iter1036.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMGetTriggersForReso BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1029 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.triggers = new ArrayList(_list1029.size); - WMTrigger _elem1030; - for (int _i1031 = 0; _i1031 < _list1029.size; ++_i1031) + org.apache.thrift.protocol.TList _list1037 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.triggers = new ArrayList(_list1037.size); + WMTrigger _elem1038; + for (int _i1039 = 0; _i1039 < _list1037.size; ++_i1039) { - _elem1030 = new WMTrigger(); - _elem1030.read(iprot); - struct.triggers.add(_elem1030); + _elem1038 = new WMTrigger(); + _elem1038.read(iprot); + struct.triggers.add(_elem1038); } } struct.setTriggersIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java index b12c2284a2..cbba529992 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java @@ -441,13 +441,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMValidateResourceP case 1: // ERRORS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1008 = iprot.readListBegin(); - struct.errors = new ArrayList(_list1008.size); - String _elem1009; - for (int _i1010 = 0; _i1010 < _list1008.size; ++_i1010) + org.apache.thrift.protocol.TList _list1016 = iprot.readListBegin(); + struct.errors = new ArrayList(_list1016.size); + String _elem1017; + for (int _i1018 = 0; _i1018 < _list1016.size; ++_i1018) { - _elem1009 = iprot.readString(); - struct.errors.add(_elem1009); + _elem1017 = iprot.readString(); + struct.errors.add(_elem1017); } iprot.readListEnd(); } @@ -459,13 +459,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMValidateResourceP case 2: // WARNINGS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1011 = iprot.readListBegin(); - struct.warnings = new ArrayList(_list1011.size); - String _elem1012; - for (int _i1013 = 0; _i1013 < _list1011.size; ++_i1013) + org.apache.thrift.protocol.TList _list1019 = iprot.readListBegin(); + struct.warnings = new ArrayList(_list1019.size); + String _elem1020; + for (int _i1021 = 0; _i1021 < _list1019.size; ++_i1021) { - _elem1012 = iprot.readString(); - struct.warnings.add(_elem1012); + _elem1020 = iprot.readString(); + struct.warnings.add(_elem1020); } iprot.readListEnd(); } @@ -492,9 +492,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMValidateResource oprot.writeFieldBegin(ERRORS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.errors.size())); - for (String _iter1014 : struct.errors) + for (String _iter1022 : struct.errors) { - oprot.writeString(_iter1014); + oprot.writeString(_iter1022); } oprot.writeListEnd(); } @@ -506,9 +506,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMValidateResource oprot.writeFieldBegin(WARNINGS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.warnings.size())); - for (String _iter1015 : struct.warnings) + for (String _iter1023 : struct.warnings) { - oprot.writeString(_iter1015); + oprot.writeString(_iter1023); } oprot.writeListEnd(); } @@ -543,18 +543,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMValidateResourceP if (struct.isSetErrors()) { { oprot.writeI32(struct.errors.size()); - for (String _iter1016 : struct.errors) + for (String _iter1024 : struct.errors) { - oprot.writeString(_iter1016); + oprot.writeString(_iter1024); } } } if (struct.isSetWarnings()) { { oprot.writeI32(struct.warnings.size()); - for (String _iter1017 : struct.warnings) + for (String _iter1025 : struct.warnings) { - oprot.writeString(_iter1017); + oprot.writeString(_iter1025); } } } @@ -566,26 +566,26 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMValidateResourcePl BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1018 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.errors = new ArrayList(_list1018.size); - String _elem1019; - for (int _i1020 = 0; _i1020 < _list1018.size; ++_i1020) + org.apache.thrift.protocol.TList _list1026 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.errors = new ArrayList(_list1026.size); + String _elem1027; + for (int _i1028 = 0; _i1028 < _list1026.size; ++_i1028) { - _elem1019 = iprot.readString(); - struct.errors.add(_elem1019); + _elem1027 = iprot.readString(); + struct.errors.add(_elem1027); } } struct.setErrorsIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1021 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.warnings = new ArrayList(_list1021.size); - String _elem1022; - for (int _i1023 = 0; _i1023 < _list1021.size; ++_i1023) + org.apache.thrift.protocol.TList _list1029 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.warnings = new ArrayList(_list1029.size); + String _elem1030; + for (int _i1031 = 0; _i1031 < _list1029.size; ++_i1031) { - _elem1022 = iprot.readString(); - struct.warnings.add(_elem1022); + _elem1030 = iprot.readString(); + struct.warnings.add(_elem1030); } } struct.setWarningsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java index 726eed490c..bc6f3791f9 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java @@ -813,13 +813,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WriteNotificationLo case 6: // PARTITION_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list828 = iprot.readListBegin(); - struct.partitionVals = new ArrayList(_list828.size); - String _elem829; - for (int _i830 = 0; _i830 < _list828.size; ++_i830) + org.apache.thrift.protocol.TList _list836 = iprot.readListBegin(); + struct.partitionVals = new ArrayList(_list836.size); + String _elem837; + for (int _i838 = 0; _i838 < _list836.size; ++_i838) { - _elem829 = iprot.readString(); - struct.partitionVals.add(_elem829); + _elem837 = iprot.readString(); + struct.partitionVals.add(_elem837); } iprot.readListEnd(); } @@ -867,9 +867,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WriteNotificationL oprot.writeFieldBegin(PARTITION_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionVals.size())); - for (String _iter831 : struct.partitionVals) + for (String _iter839 : struct.partitionVals) { - oprot.writeString(_iter831); + oprot.writeString(_iter839); } oprot.writeListEnd(); } @@ -906,9 +906,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WriteNotificationLo if (struct.isSetPartitionVals()) { { oprot.writeI32(struct.partitionVals.size()); - for (String _iter832 : struct.partitionVals) + for (String _iter840 : struct.partitionVals) { - oprot.writeString(_iter832); + oprot.writeString(_iter840); } } } @@ -931,13 +931,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WriteNotificationLog BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list833 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionVals = new ArrayList(_list833.size); - String _elem834; - for (int _i835 = 0; _i835 < _list833.size; ++_i835) + org.apache.thrift.protocol.TList _list841 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionVals = new ArrayList(_list841.size); + String _elem842; + for (int _i843 = 0; _i843 < _list841.size; ++_i843) { - _elem834 = iprot.readString(); - struct.partitionVals.add(_elem834); + _elem842 = iprot.readString(); + struct.partitionVals.add(_elem842); } } struct.setPartitionValsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index da3b409b6c..9256574bf9 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -137,41 +137,45 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { /** * @param string $db_name * @param string $table_name + * @param string $validWriteIdList * @return \metastore\FieldSchema[] * @throws \metastore\MetaException * @throws \metastore\UnknownTableException * @throws \metastore\UnknownDBException */ - public function get_fields($db_name, $table_name); + public function get_fields($db_name, $table_name, $validWriteIdList); /** * @param string $db_name * @param string $table_name * @param \metastore\EnvironmentContext $environment_context + * @param string $validWriteIdList * @return \metastore\FieldSchema[] * @throws \metastore\MetaException * @throws \metastore\UnknownTableException * @throws \metastore\UnknownDBException */ - public function get_fields_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context); + public function get_fields_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context, $validWriteIdList); /** * @param string $db_name * @param string $table_name + * @param string $validWriteIdList * @return \metastore\FieldSchema[] * @throws \metastore\MetaException * @throws \metastore\UnknownTableException * @throws \metastore\UnknownDBException */ - public function get_schema($db_name, $table_name); + public function get_schema($db_name, $table_name, $validWriteIdList); /** * @param string $db_name * @param string $table_name * @param \metastore\EnvironmentContext $environment_context + * @param string $validWriteIdList * @return \metastore\FieldSchema[] * @throws \metastore\MetaException * @throws \metastore\UnknownTableException * @throws \metastore\UnknownDBException */ - public function get_schema_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context); + public function get_schema_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context, $validWriteIdList); /** * @param \metastore\Table $tbl * @throws \metastore\AlreadyExistsException @@ -326,11 +330,12 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { /** * @param string $dbname * @param string $tbl_name + * @param string $validWriteIdList * @return \metastore\Table * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_table($dbname, $tbl_name); + public function get_table($dbname, $tbl_name, $validWriteIdList); /** * @param string $dbname * @param string[] $tbl_names @@ -556,11 +561,12 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @param string $db_name * @param string $tbl_name * @param string[] $part_vals + * @param string $validTxnList * @return \metastore\Partition * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_partition($db_name, $tbl_name, array $part_vals); + public function get_partition($db_name, $tbl_name, array $part_vals, $validTxnList); /** * @param array $partitionSpecs * @param string $source_db @@ -593,58 +599,64 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @param string[] $part_vals * @param string $user_name * @param string[] $group_names + * @param string $validTxnList * @return \metastore\Partition * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_partition_with_auth($db_name, $tbl_name, array $part_vals, $user_name, array $group_names); + public function get_partition_with_auth($db_name, $tbl_name, array $part_vals, $user_name, array $group_names, $validTxnList); /** * @param string $db_name * @param string $tbl_name * @param string $part_name + * @param string $validTxnList * @return \metastore\Partition * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_partition_by_name($db_name, $tbl_name, $part_name); + public function get_partition_by_name($db_name, $tbl_name, $part_name, $validTxnList); /** * @param string $db_name * @param string $tbl_name * @param int $max_parts + * @param string $validTxnList * @return \metastore\Partition[] * @throws \metastore\NoSuchObjectException * @throws \metastore\MetaException */ - public function get_partitions($db_name, $tbl_name, $max_parts); + public function get_partitions($db_name, $tbl_name, $max_parts, $validTxnList); /** * @param string $db_name * @param string $tbl_name * @param int $max_parts * @param string $user_name * @param string[] $group_names + * @param string $validTxnList * @return \metastore\Partition[] * @throws \metastore\NoSuchObjectException * @throws \metastore\MetaException */ - public function get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, array $group_names); + public function get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, array $group_names, $validTxnList); /** * @param string $db_name * @param string $tbl_name * @param int $max_parts + * @param string $validTxnList * @return \metastore\PartitionSpec[] * @throws \metastore\NoSuchObjectException * @throws \metastore\MetaException */ - public function get_partitions_pspec($db_name, $tbl_name, $max_parts); + public function get_partitions_pspec($db_name, $tbl_name, $max_parts, $validTxnList); /** * @param string $db_name * @param string $tbl_name * @param int $max_parts + * @param string $validTxnList * @return string[] * @throws \metastore\NoSuchObjectException * @throws \metastore\MetaException */ - public function get_partition_names($db_name, $tbl_name, $max_parts); + public function get_partition_names($db_name, $tbl_name, $max_parts, $validTxnList); /** * @param \metastore\PartitionValuesRequest $request * @return \metastore\PartitionValuesResponse @@ -657,11 +669,12 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @param string $tbl_name * @param string[] $part_vals * @param int $max_parts + * @param string $validTxnList * @return \metastore\Partition[] * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_partitions_ps($db_name, $tbl_name, array $part_vals, $max_parts); + public function get_partitions_ps($db_name, $tbl_name, array $part_vals, $max_parts, $validTxnList); /** * @param string $db_name * @param string $tbl_name @@ -669,41 +682,45 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @param int $max_parts * @param string $user_name * @param string[] $group_names + * @param string $validTxnList * @return \metastore\Partition[] * @throws \metastore\NoSuchObjectException * @throws \metastore\MetaException */ - public function get_partitions_ps_with_auth($db_name, $tbl_name, array $part_vals, $max_parts, $user_name, array $group_names); + public function get_partitions_ps_with_auth($db_name, $tbl_name, array $part_vals, $max_parts, $user_name, array $group_names, $validTxnList); /** * @param string $db_name * @param string $tbl_name * @param string[] $part_vals * @param int $max_parts + * @param string $validTxnList * @return string[] * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_partition_names_ps($db_name, $tbl_name, array $part_vals, $max_parts); + public function get_partition_names_ps($db_name, $tbl_name, array $part_vals, $max_parts, $validTxnList); /** * @param string $db_name * @param string $tbl_name * @param string $filter * @param int $max_parts + * @param string $validTxnList * @return \metastore\Partition[] * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts); + public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts, $validTxnList); /** * @param string $db_name * @param string $tbl_name * @param string $filter * @param int $max_parts + * @param string $validTxnList * @return \metastore\PartitionSpec[] * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts); + public function get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts, $validTxnList); /** * @param \metastore\PartitionsByExprRequest $req * @return \metastore\PartitionsByExprResult @@ -715,20 +732,22 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @param string $db_name * @param string $tbl_name * @param string $filter + * @param string $validTxnList * @return int * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_num_partitions_by_filter($db_name, $tbl_name, $filter); + public function get_num_partitions_by_filter($db_name, $tbl_name, $filter, $validTxnList); /** * @param string $db_name * @param string $tbl_name * @param string[] $names + * @param string $validTxnList * @return \metastore\Partition[] * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_partitions_by_names($db_name, $tbl_name, array $names); + public function get_partitions_by_names($db_name, $tbl_name, array $names, $validTxnList); /** * @param \metastore\GetPartitionsByNamesRequest $req * @return \metastore\GetPartitionsByNamesResult @@ -928,25 +947,27 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @param string $db_name * @param string $tbl_name * @param string $col_name + * @param string $validWriteIdList * @return \metastore\ColumnStatistics * @throws \metastore\NoSuchObjectException * @throws \metastore\MetaException * @throws \metastore\InvalidInputException * @throws \metastore\InvalidObjectException */ - public function get_table_column_statistics($db_name, $tbl_name, $col_name); + public function get_table_column_statistics($db_name, $tbl_name, $col_name, $validWriteIdList); /** * @param string $db_name * @param string $tbl_name * @param string $part_name * @param string $col_name + * @param string $validWriteIdList * @return \metastore\ColumnStatistics * @throws \metastore\NoSuchObjectException * @throws \metastore\MetaException * @throws \metastore\InvalidInputException * @throws \metastore\InvalidObjectException */ - public function get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name); + public function get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name, $validWriteIdList); /** * @param \metastore\TableStatsRequest $request * @return \metastore\TableStatsResult @@ -2633,17 +2654,18 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_type_all failed: unknown result"); } - public function get_fields($db_name, $table_name) + public function get_fields($db_name, $table_name, $validWriteIdList) { - $this->send_get_fields($db_name, $table_name); + $this->send_get_fields($db_name, $table_name, $validWriteIdList); return $this->recv_get_fields(); } - public function send_get_fields($db_name, $table_name) + public function send_get_fields($db_name, $table_name, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_get_fields_args(); $args->db_name = $db_name; $args->table_name = $table_name; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -2694,18 +2716,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_fields failed: unknown result"); } - public function get_fields_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context) + public function get_fields_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { - $this->send_get_fields_with_environment_context($db_name, $table_name, $environment_context); + $this->send_get_fields_with_environment_context($db_name, $table_name, $environment_context, $validWriteIdList); return $this->recv_get_fields_with_environment_context(); } - public function send_get_fields_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context) + public function send_get_fields_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_get_fields_with_environment_context_args(); $args->db_name = $db_name; $args->table_name = $table_name; $args->environment_context = $environment_context; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -2756,17 +2779,18 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_fields_with_environment_context failed: unknown result"); } - public function get_schema($db_name, $table_name) + public function get_schema($db_name, $table_name, $validWriteIdList) { - $this->send_get_schema($db_name, $table_name); + $this->send_get_schema($db_name, $table_name, $validWriteIdList); return $this->recv_get_schema(); } - public function send_get_schema($db_name, $table_name) + public function send_get_schema($db_name, $table_name, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_get_schema_args(); $args->db_name = $db_name; $args->table_name = $table_name; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -2817,18 +2841,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_schema failed: unknown result"); } - public function get_schema_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context) + public function get_schema_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { - $this->send_get_schema_with_environment_context($db_name, $table_name, $environment_context); + $this->send_get_schema_with_environment_context($db_name, $table_name, $environment_context, $validWriteIdList); return $this->recv_get_schema_with_environment_context(); } - public function send_get_schema_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context) + public function send_get_schema_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_get_schema_with_environment_context_args(); $args->db_name = $db_name; $args->table_name = $table_name; $args->environment_context = $environment_context; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -4052,17 +4077,18 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_all_tables failed: unknown result"); } - public function get_table($dbname, $tbl_name) + public function get_table($dbname, $tbl_name, $validWriteIdList) { - $this->send_get_table($dbname, $tbl_name); + $this->send_get_table($dbname, $tbl_name, $validWriteIdList); return $this->recv_get_table(); } - public function send_get_table($dbname, $tbl_name) + public function send_get_table($dbname, $tbl_name, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_get_table_args(); $args->dbname = $dbname; $args->tbl_name = $tbl_name; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -5593,18 +5619,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("drop_partitions_req failed: unknown result"); } - public function get_partition($db_name, $tbl_name, array $part_vals) + public function get_partition($db_name, $tbl_name, array $part_vals, $validTxnList) { - $this->send_get_partition($db_name, $tbl_name, $part_vals); + $this->send_get_partition($db_name, $tbl_name, $part_vals, $validTxnList); return $this->recv_get_partition(); } - public function send_get_partition($db_name, $tbl_name, array $part_vals) + public function send_get_partition($db_name, $tbl_name, array $part_vals, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partition_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->part_vals = $part_vals; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -5786,13 +5813,13 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("exchange_partitions failed: unknown result"); } - public function get_partition_with_auth($db_name, $tbl_name, array $part_vals, $user_name, array $group_names) + public function get_partition_with_auth($db_name, $tbl_name, array $part_vals, $user_name, array $group_names, $validTxnList) { - $this->send_get_partition_with_auth($db_name, $tbl_name, $part_vals, $user_name, $group_names); + $this->send_get_partition_with_auth($db_name, $tbl_name, $part_vals, $user_name, $group_names, $validTxnList); return $this->recv_get_partition_with_auth(); } - public function send_get_partition_with_auth($db_name, $tbl_name, array $part_vals, $user_name, array $group_names) + public function send_get_partition_with_auth($db_name, $tbl_name, array $part_vals, $user_name, array $group_names, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partition_with_auth_args(); $args->db_name = $db_name; @@ -5800,6 +5827,7 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas $args->part_vals = $part_vals; $args->user_name = $user_name; $args->group_names = $group_names; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -5847,18 +5875,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partition_with_auth failed: unknown result"); } - public function get_partition_by_name($db_name, $tbl_name, $part_name) + public function get_partition_by_name($db_name, $tbl_name, $part_name, $validTxnList) { - $this->send_get_partition_by_name($db_name, $tbl_name, $part_name); + $this->send_get_partition_by_name($db_name, $tbl_name, $part_name, $validTxnList); return $this->recv_get_partition_by_name(); } - public function send_get_partition_by_name($db_name, $tbl_name, $part_name) + public function send_get_partition_by_name($db_name, $tbl_name, $part_name, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partition_by_name_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->part_name = $part_name; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -5906,18 +5935,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partition_by_name failed: unknown result"); } - public function get_partitions($db_name, $tbl_name, $max_parts) + public function get_partitions($db_name, $tbl_name, $max_parts, $validTxnList) { - $this->send_get_partitions($db_name, $tbl_name, $max_parts); + $this->send_get_partitions($db_name, $tbl_name, $max_parts, $validTxnList); return $this->recv_get_partitions(); } - public function send_get_partitions($db_name, $tbl_name, $max_parts) + public function send_get_partitions($db_name, $tbl_name, $max_parts, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partitions_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->max_parts = $max_parts; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -5965,13 +5995,13 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partitions failed: unknown result"); } - public function get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, array $group_names) + public function get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, array $group_names, $validTxnList) { - $this->send_get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, $group_names); + $this->send_get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, $group_names, $validTxnList); return $this->recv_get_partitions_with_auth(); } - public function send_get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, array $group_names) + public function send_get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, array $group_names, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partitions_with_auth_args(); $args->db_name = $db_name; @@ -5979,6 +6009,7 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas $args->max_parts = $max_parts; $args->user_name = $user_name; $args->group_names = $group_names; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -6026,18 +6057,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partitions_with_auth failed: unknown result"); } - public function get_partitions_pspec($db_name, $tbl_name, $max_parts) + public function get_partitions_pspec($db_name, $tbl_name, $max_parts, $validTxnList) { - $this->send_get_partitions_pspec($db_name, $tbl_name, $max_parts); + $this->send_get_partitions_pspec($db_name, $tbl_name, $max_parts, $validTxnList); return $this->recv_get_partitions_pspec(); } - public function send_get_partitions_pspec($db_name, $tbl_name, $max_parts) + public function send_get_partitions_pspec($db_name, $tbl_name, $max_parts, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partitions_pspec_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->max_parts = $max_parts; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -6085,18 +6117,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partitions_pspec failed: unknown result"); } - public function get_partition_names($db_name, $tbl_name, $max_parts) + public function get_partition_names($db_name, $tbl_name, $max_parts, $validTxnList) { - $this->send_get_partition_names($db_name, $tbl_name, $max_parts); + $this->send_get_partition_names($db_name, $tbl_name, $max_parts, $validTxnList); return $this->recv_get_partition_names(); } - public function send_get_partition_names($db_name, $tbl_name, $max_parts) + public function send_get_partition_names($db_name, $tbl_name, $max_parts, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partition_names_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->max_parts = $max_parts; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -6201,19 +6234,20 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partition_values failed: unknown result"); } - public function get_partitions_ps($db_name, $tbl_name, array $part_vals, $max_parts) + public function get_partitions_ps($db_name, $tbl_name, array $part_vals, $max_parts, $validTxnList) { - $this->send_get_partitions_ps($db_name, $tbl_name, $part_vals, $max_parts); + $this->send_get_partitions_ps($db_name, $tbl_name, $part_vals, $max_parts, $validTxnList); return $this->recv_get_partitions_ps(); } - public function send_get_partitions_ps($db_name, $tbl_name, array $part_vals, $max_parts) + public function send_get_partitions_ps($db_name, $tbl_name, array $part_vals, $max_parts, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partitions_ps_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->part_vals = $part_vals; $args->max_parts = $max_parts; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -6261,13 +6295,13 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partitions_ps failed: unknown result"); } - public function get_partitions_ps_with_auth($db_name, $tbl_name, array $part_vals, $max_parts, $user_name, array $group_names) + public function get_partitions_ps_with_auth($db_name, $tbl_name, array $part_vals, $max_parts, $user_name, array $group_names, $validTxnList) { - $this->send_get_partitions_ps_with_auth($db_name, $tbl_name, $part_vals, $max_parts, $user_name, $group_names); + $this->send_get_partitions_ps_with_auth($db_name, $tbl_name, $part_vals, $max_parts, $user_name, $group_names, $validTxnList); return $this->recv_get_partitions_ps_with_auth(); } - public function send_get_partitions_ps_with_auth($db_name, $tbl_name, array $part_vals, $max_parts, $user_name, array $group_names) + public function send_get_partitions_ps_with_auth($db_name, $tbl_name, array $part_vals, $max_parts, $user_name, array $group_names, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partitions_ps_with_auth_args(); $args->db_name = $db_name; @@ -6276,6 +6310,7 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas $args->max_parts = $max_parts; $args->user_name = $user_name; $args->group_names = $group_names; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -6323,19 +6358,20 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partitions_ps_with_auth failed: unknown result"); } - public function get_partition_names_ps($db_name, $tbl_name, array $part_vals, $max_parts) + public function get_partition_names_ps($db_name, $tbl_name, array $part_vals, $max_parts, $validTxnList) { - $this->send_get_partition_names_ps($db_name, $tbl_name, $part_vals, $max_parts); + $this->send_get_partition_names_ps($db_name, $tbl_name, $part_vals, $max_parts, $validTxnList); return $this->recv_get_partition_names_ps(); } - public function send_get_partition_names_ps($db_name, $tbl_name, array $part_vals, $max_parts) + public function send_get_partition_names_ps($db_name, $tbl_name, array $part_vals, $max_parts, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partition_names_ps_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->part_vals = $part_vals; $args->max_parts = $max_parts; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -6383,19 +6419,20 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partition_names_ps failed: unknown result"); } - public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts) + public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts, $validTxnList) { - $this->send_get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts); + $this->send_get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts, $validTxnList); return $this->recv_get_partitions_by_filter(); } - public function send_get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts) + public function send_get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partitions_by_filter_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->filter = $filter; $args->max_parts = $max_parts; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -6443,19 +6480,20 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partitions_by_filter failed: unknown result"); } - public function get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts) + public function get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts, $validTxnList) { - $this->send_get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts); + $this->send_get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts, $validTxnList); return $this->recv_get_part_specs_by_filter(); } - public function send_get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts) + public function send_get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_part_specs_by_filter_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->filter = $filter; $args->max_parts = $max_parts; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -6560,18 +6598,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partitions_by_expr failed: unknown result"); } - public function get_num_partitions_by_filter($db_name, $tbl_name, $filter) + public function get_num_partitions_by_filter($db_name, $tbl_name, $filter, $validTxnList) { - $this->send_get_num_partitions_by_filter($db_name, $tbl_name, $filter); + $this->send_get_num_partitions_by_filter($db_name, $tbl_name, $filter, $validTxnList); return $this->recv_get_num_partitions_by_filter(); } - public function send_get_num_partitions_by_filter($db_name, $tbl_name, $filter) + public function send_get_num_partitions_by_filter($db_name, $tbl_name, $filter, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_num_partitions_by_filter_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->filter = $filter; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -6619,18 +6658,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_num_partitions_by_filter failed: unknown result"); } - public function get_partitions_by_names($db_name, $tbl_name, array $names) + public function get_partitions_by_names($db_name, $tbl_name, array $names, $validTxnList) { - $this->send_get_partitions_by_names($db_name, $tbl_name, $names); + $this->send_get_partitions_by_names($db_name, $tbl_name, $names, $validTxnList); return $this->recv_get_partitions_by_names(); } - public function send_get_partitions_by_names($db_name, $tbl_name, array $names) + public function send_get_partitions_by_names($db_name, $tbl_name, array $names, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partitions_by_names_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->names = $names; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -8085,18 +8125,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("update_partition_column_statistics_req failed: unknown result"); } - public function get_table_column_statistics($db_name, $tbl_name, $col_name) + public function get_table_column_statistics($db_name, $tbl_name, $col_name, $validWriteIdList) { - $this->send_get_table_column_statistics($db_name, $tbl_name, $col_name); + $this->send_get_table_column_statistics($db_name, $tbl_name, $col_name, $validWriteIdList); return $this->recv_get_table_column_statistics(); } - public function send_get_table_column_statistics($db_name, $tbl_name, $col_name) + public function send_get_table_column_statistics($db_name, $tbl_name, $col_name, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_get_table_column_statistics_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->col_name = $col_name; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -8150,19 +8191,20 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_table_column_statistics failed: unknown result"); } - public function get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name) + public function get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name, $validWriteIdList) { - $this->send_get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name); + $this->send_get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name, $validWriteIdList); return $this->recv_get_partition_column_statistics(); } - public function send_get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name) + public function send_get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_get_partition_column_statistics_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->part_name = $part_name; $args->col_name = $col_name; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -16594,14 +16636,14 @@ class ThriftHiveMetastore_get_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1022 = 0; - $_etype1025 = 0; - $xfer += $input->readListBegin($_etype1025, $_size1022); - for ($_i1026 = 0; $_i1026 < $_size1022; ++$_i1026) + $_size1029 = 0; + $_etype1032 = 0; + $xfer += $input->readListBegin($_etype1032, $_size1029); + for ($_i1033 = 0; $_i1033 < $_size1029; ++$_i1033) { - $elem1027 = null; - $xfer += $input->readString($elem1027); - $this->success []= $elem1027; + $elem1034 = null; + $xfer += $input->readString($elem1034); + $this->success []= $elem1034; } $xfer += $input->readListEnd(); } else { @@ -16637,9 +16679,9 @@ class ThriftHiveMetastore_get_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1028) + foreach ($this->success as $iter1035) { - $xfer += $output->writeString($iter1028); + $xfer += $output->writeString($iter1035); } } $output->writeListEnd(); @@ -16770,14 +16812,14 @@ class ThriftHiveMetastore_get_all_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1029 = 0; - $_etype1032 = 0; - $xfer += $input->readListBegin($_etype1032, $_size1029); - for ($_i1033 = 0; $_i1033 < $_size1029; ++$_i1033) + $_size1036 = 0; + $_etype1039 = 0; + $xfer += $input->readListBegin($_etype1039, $_size1036); + for ($_i1040 = 0; $_i1040 < $_size1036; ++$_i1040) { - $elem1034 = null; - $xfer += $input->readString($elem1034); - $this->success []= $elem1034; + $elem1041 = null; + $xfer += $input->readString($elem1041); + $this->success []= $elem1041; } $xfer += $input->readListEnd(); } else { @@ -16813,9 +16855,9 @@ class ThriftHiveMetastore_get_all_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1035) + foreach ($this->success as $iter1042) { - $xfer += $output->writeString($iter1035); + $xfer += $output->writeString($iter1042); } } $output->writeListEnd(); @@ -17816,18 +17858,18 @@ class ThriftHiveMetastore_get_type_all_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1036 = 0; - $_ktype1037 = 0; - $_vtype1038 = 0; - $xfer += $input->readMapBegin($_ktype1037, $_vtype1038, $_size1036); - for ($_i1040 = 0; $_i1040 < $_size1036; ++$_i1040) + $_size1043 = 0; + $_ktype1044 = 0; + $_vtype1045 = 0; + $xfer += $input->readMapBegin($_ktype1044, $_vtype1045, $_size1043); + for ($_i1047 = 0; $_i1047 < $_size1043; ++$_i1047) { - $key1041 = ''; - $val1042 = new \metastore\Type(); - $xfer += $input->readString($key1041); - $val1042 = new \metastore\Type(); - $xfer += $val1042->read($input); - $this->success[$key1041] = $val1042; + $key1048 = ''; + $val1049 = new \metastore\Type(); + $xfer += $input->readString($key1048); + $val1049 = new \metastore\Type(); + $xfer += $val1049->read($input); + $this->success[$key1048] = $val1049; } $xfer += $input->readMapEnd(); } else { @@ -17863,10 +17905,10 @@ class ThriftHiveMetastore_get_type_all_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter1043 => $viter1044) + foreach ($this->success as $kiter1050 => $viter1051) { - $xfer += $output->writeString($kiter1043); - $xfer += $viter1044->write($output); + $xfer += $output->writeString($kiter1050); + $xfer += $viter1051->write($output); } } $output->writeMapEnd(); @@ -17896,6 +17938,10 @@ class ThriftHiveMetastore_get_fields_args { * @var string */ public $table_name = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -17908,6 +17954,10 @@ class ThriftHiveMetastore_get_fields_args { 'var' => 'table_name', 'type' => TType::STRING, ), + 3 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -17917,6 +17967,9 @@ class ThriftHiveMetastore_get_fields_args { if (isset($vals['table_name'])) { $this->table_name = $vals['table_name']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -17953,6 +18006,13 @@ class ThriftHiveMetastore_get_fields_args { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -17976,6 +18036,11 @@ class ThriftHiveMetastore_get_fields_args { $xfer += $output->writeString($this->table_name); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 3); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -18070,15 +18135,15 @@ class ThriftHiveMetastore_get_fields_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1045 = 0; - $_etype1048 = 0; - $xfer += $input->readListBegin($_etype1048, $_size1045); - for ($_i1049 = 0; $_i1049 < $_size1045; ++$_i1049) + $_size1052 = 0; + $_etype1055 = 0; + $xfer += $input->readListBegin($_etype1055, $_size1052); + for ($_i1056 = 0; $_i1056 < $_size1052; ++$_i1056) { - $elem1050 = null; - $elem1050 = new \metastore\FieldSchema(); - $xfer += $elem1050->read($input); - $this->success []= $elem1050; + $elem1057 = null; + $elem1057 = new \metastore\FieldSchema(); + $xfer += $elem1057->read($input); + $this->success []= $elem1057; } $xfer += $input->readListEnd(); } else { @@ -18130,9 +18195,9 @@ class ThriftHiveMetastore_get_fields_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1051) + foreach ($this->success as $iter1058) { - $xfer += $iter1051->write($output); + $xfer += $iter1058->write($output); } } $output->writeListEnd(); @@ -18176,6 +18241,10 @@ class ThriftHiveMetastore_get_fields_with_environment_context_args { * @var \metastore\EnvironmentContext */ public $environment_context = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -18193,6 +18262,10 @@ class ThriftHiveMetastore_get_fields_with_environment_context_args { 'type' => TType::STRUCT, 'class' => '\metastore\EnvironmentContext', ), + 4 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -18205,6 +18278,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_args { if (isset($vals['environment_context'])) { $this->environment_context = $vals['environment_context']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -18249,6 +18325,13 @@ class ThriftHiveMetastore_get_fields_with_environment_context_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -18280,6 +18363,11 @@ class ThriftHiveMetastore_get_fields_with_environment_context_args { $xfer += $this->environment_context->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -18374,15 +18462,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1052 = 0; - $_etype1055 = 0; - $xfer += $input->readListBegin($_etype1055, $_size1052); - for ($_i1056 = 0; $_i1056 < $_size1052; ++$_i1056) + $_size1059 = 0; + $_etype1062 = 0; + $xfer += $input->readListBegin($_etype1062, $_size1059); + for ($_i1063 = 0; $_i1063 < $_size1059; ++$_i1063) { - $elem1057 = null; - $elem1057 = new \metastore\FieldSchema(); - $xfer += $elem1057->read($input); - $this->success []= $elem1057; + $elem1064 = null; + $elem1064 = new \metastore\FieldSchema(); + $xfer += $elem1064->read($input); + $this->success []= $elem1064; } $xfer += $input->readListEnd(); } else { @@ -18434,9 +18522,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1058) + foreach ($this->success as $iter1065) { - $xfer += $iter1058->write($output); + $xfer += $iter1065->write($output); } } $output->writeListEnd(); @@ -18476,6 +18564,10 @@ class ThriftHiveMetastore_get_schema_args { * @var string */ public $table_name = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -18488,6 +18580,10 @@ class ThriftHiveMetastore_get_schema_args { 'var' => 'table_name', 'type' => TType::STRING, ), + 3 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -18497,6 +18593,9 @@ class ThriftHiveMetastore_get_schema_args { if (isset($vals['table_name'])) { $this->table_name = $vals['table_name']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -18533,6 +18632,13 @@ class ThriftHiveMetastore_get_schema_args { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -18556,6 +18662,11 @@ class ThriftHiveMetastore_get_schema_args { $xfer += $output->writeString($this->table_name); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 3); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -18650,15 +18761,15 @@ class ThriftHiveMetastore_get_schema_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1059 = 0; - $_etype1062 = 0; - $xfer += $input->readListBegin($_etype1062, $_size1059); - for ($_i1063 = 0; $_i1063 < $_size1059; ++$_i1063) + $_size1066 = 0; + $_etype1069 = 0; + $xfer += $input->readListBegin($_etype1069, $_size1066); + for ($_i1070 = 0; $_i1070 < $_size1066; ++$_i1070) { - $elem1064 = null; - $elem1064 = new \metastore\FieldSchema(); - $xfer += $elem1064->read($input); - $this->success []= $elem1064; + $elem1071 = null; + $elem1071 = new \metastore\FieldSchema(); + $xfer += $elem1071->read($input); + $this->success []= $elem1071; } $xfer += $input->readListEnd(); } else { @@ -18710,9 +18821,9 @@ class ThriftHiveMetastore_get_schema_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1065) + foreach ($this->success as $iter1072) { - $xfer += $iter1065->write($output); + $xfer += $iter1072->write($output); } } $output->writeListEnd(); @@ -18756,6 +18867,10 @@ class ThriftHiveMetastore_get_schema_with_environment_context_args { * @var \metastore\EnvironmentContext */ public $environment_context = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -18773,6 +18888,10 @@ class ThriftHiveMetastore_get_schema_with_environment_context_args { 'type' => TType::STRUCT, 'class' => '\metastore\EnvironmentContext', ), + 4 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -18785,6 +18904,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_args { if (isset($vals['environment_context'])) { $this->environment_context = $vals['environment_context']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -18829,6 +18951,13 @@ class ThriftHiveMetastore_get_schema_with_environment_context_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -18860,6 +18989,11 @@ class ThriftHiveMetastore_get_schema_with_environment_context_args { $xfer += $this->environment_context->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -18954,15 +19088,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1066 = 0; - $_etype1069 = 0; - $xfer += $input->readListBegin($_etype1069, $_size1066); - for ($_i1070 = 0; $_i1070 < $_size1066; ++$_i1070) + $_size1073 = 0; + $_etype1076 = 0; + $xfer += $input->readListBegin($_etype1076, $_size1073); + for ($_i1077 = 0; $_i1077 < $_size1073; ++$_i1077) { - $elem1071 = null; - $elem1071 = new \metastore\FieldSchema(); - $xfer += $elem1071->read($input); - $this->success []= $elem1071; + $elem1078 = null; + $elem1078 = new \metastore\FieldSchema(); + $xfer += $elem1078->read($input); + $this->success []= $elem1078; } $xfer += $input->readListEnd(); } else { @@ -19014,9 +19148,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1072) + foreach ($this->success as $iter1079) { - $xfer += $iter1072->write($output); + $xfer += $iter1079->write($output); } } $output->writeListEnd(); @@ -19688,15 +19822,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 2: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size1073 = 0; - $_etype1076 = 0; - $xfer += $input->readListBegin($_etype1076, $_size1073); - for ($_i1077 = 0; $_i1077 < $_size1073; ++$_i1077) + $_size1080 = 0; + $_etype1083 = 0; + $xfer += $input->readListBegin($_etype1083, $_size1080); + for ($_i1084 = 0; $_i1084 < $_size1080; ++$_i1084) { - $elem1078 = null; - $elem1078 = new \metastore\SQLPrimaryKey(); - $xfer += $elem1078->read($input); - $this->primaryKeys []= $elem1078; + $elem1085 = null; + $elem1085 = new \metastore\SQLPrimaryKey(); + $xfer += $elem1085->read($input); + $this->primaryKeys []= $elem1085; } $xfer += $input->readListEnd(); } else { @@ -19706,15 +19840,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 3: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size1079 = 0; - $_etype1082 = 0; - $xfer += $input->readListBegin($_etype1082, $_size1079); - for ($_i1083 = 0; $_i1083 < $_size1079; ++$_i1083) + $_size1086 = 0; + $_etype1089 = 0; + $xfer += $input->readListBegin($_etype1089, $_size1086); + for ($_i1090 = 0; $_i1090 < $_size1086; ++$_i1090) { - $elem1084 = null; - $elem1084 = new \metastore\SQLForeignKey(); - $xfer += $elem1084->read($input); - $this->foreignKeys []= $elem1084; + $elem1091 = null; + $elem1091 = new \metastore\SQLForeignKey(); + $xfer += $elem1091->read($input); + $this->foreignKeys []= $elem1091; } $xfer += $input->readListEnd(); } else { @@ -19724,15 +19858,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 4: if ($ftype == TType::LST) { $this->uniqueConstraints = array(); - $_size1085 = 0; - $_etype1088 = 0; - $xfer += $input->readListBegin($_etype1088, $_size1085); - for ($_i1089 = 0; $_i1089 < $_size1085; ++$_i1089) + $_size1092 = 0; + $_etype1095 = 0; + $xfer += $input->readListBegin($_etype1095, $_size1092); + for ($_i1096 = 0; $_i1096 < $_size1092; ++$_i1096) { - $elem1090 = null; - $elem1090 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem1090->read($input); - $this->uniqueConstraints []= $elem1090; + $elem1097 = null; + $elem1097 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem1097->read($input); + $this->uniqueConstraints []= $elem1097; } $xfer += $input->readListEnd(); } else { @@ -19742,15 +19876,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 5: if ($ftype == TType::LST) { $this->notNullConstraints = array(); - $_size1091 = 0; - $_etype1094 = 0; - $xfer += $input->readListBegin($_etype1094, $_size1091); - for ($_i1095 = 0; $_i1095 < $_size1091; ++$_i1095) + $_size1098 = 0; + $_etype1101 = 0; + $xfer += $input->readListBegin($_etype1101, $_size1098); + for ($_i1102 = 0; $_i1102 < $_size1098; ++$_i1102) { - $elem1096 = null; - $elem1096 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem1096->read($input); - $this->notNullConstraints []= $elem1096; + $elem1103 = null; + $elem1103 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem1103->read($input); + $this->notNullConstraints []= $elem1103; } $xfer += $input->readListEnd(); } else { @@ -19760,15 +19894,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 6: if ($ftype == TType::LST) { $this->defaultConstraints = array(); - $_size1097 = 0; - $_etype1100 = 0; - $xfer += $input->readListBegin($_etype1100, $_size1097); - for ($_i1101 = 0; $_i1101 < $_size1097; ++$_i1101) + $_size1104 = 0; + $_etype1107 = 0; + $xfer += $input->readListBegin($_etype1107, $_size1104); + for ($_i1108 = 0; $_i1108 < $_size1104; ++$_i1108) { - $elem1102 = null; - $elem1102 = new \metastore\SQLDefaultConstraint(); - $xfer += $elem1102->read($input); - $this->defaultConstraints []= $elem1102; + $elem1109 = null; + $elem1109 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem1109->read($input); + $this->defaultConstraints []= $elem1109; } $xfer += $input->readListEnd(); } else { @@ -19778,15 +19912,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 7: if ($ftype == TType::LST) { $this->checkConstraints = array(); - $_size1103 = 0; - $_etype1106 = 0; - $xfer += $input->readListBegin($_etype1106, $_size1103); - for ($_i1107 = 0; $_i1107 < $_size1103; ++$_i1107) + $_size1110 = 0; + $_etype1113 = 0; + $xfer += $input->readListBegin($_etype1113, $_size1110); + for ($_i1114 = 0; $_i1114 < $_size1110; ++$_i1114) { - $elem1108 = null; - $elem1108 = new \metastore\SQLCheckConstraint(); - $xfer += $elem1108->read($input); - $this->checkConstraints []= $elem1108; + $elem1115 = null; + $elem1115 = new \metastore\SQLCheckConstraint(); + $xfer += $elem1115->read($input); + $this->checkConstraints []= $elem1115; } $xfer += $input->readListEnd(); } else { @@ -19822,9 +19956,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter1109) + foreach ($this->primaryKeys as $iter1116) { - $xfer += $iter1109->write($output); + $xfer += $iter1116->write($output); } } $output->writeListEnd(); @@ -19839,9 +19973,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter1110) + foreach ($this->foreignKeys as $iter1117) { - $xfer += $iter1110->write($output); + $xfer += $iter1117->write($output); } } $output->writeListEnd(); @@ -19856,9 +19990,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints)); { - foreach ($this->uniqueConstraints as $iter1111) + foreach ($this->uniqueConstraints as $iter1118) { - $xfer += $iter1111->write($output); + $xfer += $iter1118->write($output); } } $output->writeListEnd(); @@ -19873,9 +20007,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints)); { - foreach ($this->notNullConstraints as $iter1112) + foreach ($this->notNullConstraints as $iter1119) { - $xfer += $iter1112->write($output); + $xfer += $iter1119->write($output); } } $output->writeListEnd(); @@ -19890,9 +20024,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints)); { - foreach ($this->defaultConstraints as $iter1113) + foreach ($this->defaultConstraints as $iter1120) { - $xfer += $iter1113->write($output); + $xfer += $iter1120->write($output); } } $output->writeListEnd(); @@ -19907,9 +20041,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->checkConstraints)); { - foreach ($this->checkConstraints as $iter1114) + foreach ($this->checkConstraints as $iter1121) { - $xfer += $iter1114->write($output); + $xfer += $iter1121->write($output); } } $output->writeListEnd(); @@ -22141,14 +22275,14 @@ class ThriftHiveMetastore_truncate_table_args { case 3: if ($ftype == TType::LST) { $this->partNames = array(); - $_size1115 = 0; - $_etype1118 = 0; - $xfer += $input->readListBegin($_etype1118, $_size1115); - for ($_i1119 = 0; $_i1119 < $_size1115; ++$_i1119) + $_size1122 = 0; + $_etype1125 = 0; + $xfer += $input->readListBegin($_etype1125, $_size1122); + for ($_i1126 = 0; $_i1126 < $_size1122; ++$_i1126) { - $elem1120 = null; - $xfer += $input->readString($elem1120); - $this->partNames []= $elem1120; + $elem1127 = null; + $xfer += $input->readString($elem1127); + $this->partNames []= $elem1127; } $xfer += $input->readListEnd(); } else { @@ -22186,9 +22320,9 @@ class ThriftHiveMetastore_truncate_table_args { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter1121) + foreach ($this->partNames as $iter1128) { - $xfer += $output->writeString($iter1121); + $xfer += $output->writeString($iter1128); } } $output->writeListEnd(); @@ -22624,14 +22758,14 @@ class ThriftHiveMetastore_get_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1122 = 0; - $_etype1125 = 0; - $xfer += $input->readListBegin($_etype1125, $_size1122); - for ($_i1126 = 0; $_i1126 < $_size1122; ++$_i1126) + $_size1129 = 0; + $_etype1132 = 0; + $xfer += $input->readListBegin($_etype1132, $_size1129); + for ($_i1133 = 0; $_i1133 < $_size1129; ++$_i1133) { - $elem1127 = null; - $xfer += $input->readString($elem1127); - $this->success []= $elem1127; + $elem1134 = null; + $xfer += $input->readString($elem1134); + $this->success []= $elem1134; } $xfer += $input->readListEnd(); } else { @@ -22667,9 +22801,9 @@ class ThriftHiveMetastore_get_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1128) + foreach ($this->success as $iter1135) { - $xfer += $output->writeString($iter1128); + $xfer += $output->writeString($iter1135); } } $output->writeListEnd(); @@ -22871,14 +23005,14 @@ class ThriftHiveMetastore_get_tables_by_type_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1129 = 0; - $_etype1132 = 0; - $xfer += $input->readListBegin($_etype1132, $_size1129); - for ($_i1133 = 0; $_i1133 < $_size1129; ++$_i1133) + $_size1136 = 0; + $_etype1139 = 0; + $xfer += $input->readListBegin($_etype1139, $_size1136); + for ($_i1140 = 0; $_i1140 < $_size1136; ++$_i1140) { - $elem1134 = null; - $xfer += $input->readString($elem1134); - $this->success []= $elem1134; + $elem1141 = null; + $xfer += $input->readString($elem1141); + $this->success []= $elem1141; } $xfer += $input->readListEnd(); } else { @@ -22914,9 +23048,9 @@ class ThriftHiveMetastore_get_tables_by_type_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1135) + foreach ($this->success as $iter1142) { - $xfer += $output->writeString($iter1135); + $xfer += $output->writeString($iter1142); } } $output->writeListEnd(); @@ -23048,15 +23182,15 @@ class ThriftHiveMetastore_get_all_materialized_view_objects_for_rewriting_result case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1136 = 0; - $_etype1139 = 0; - $xfer += $input->readListBegin($_etype1139, $_size1136); - for ($_i1140 = 0; $_i1140 < $_size1136; ++$_i1140) + $_size1143 = 0; + $_etype1146 = 0; + $xfer += $input->readListBegin($_etype1146, $_size1143); + for ($_i1147 = 0; $_i1147 < $_size1143; ++$_i1147) { - $elem1141 = null; - $elem1141 = new \metastore\Table(); - $xfer += $elem1141->read($input); - $this->success []= $elem1141; + $elem1148 = null; + $elem1148 = new \metastore\Table(); + $xfer += $elem1148->read($input); + $this->success []= $elem1148; } $xfer += $input->readListEnd(); } else { @@ -23092,9 +23226,9 @@ class ThriftHiveMetastore_get_all_materialized_view_objects_for_rewriting_result { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1142) + foreach ($this->success as $iter1149) { - $xfer += $iter1142->write($output); + $xfer += $iter1149->write($output); } } $output->writeListEnd(); @@ -23250,14 +23384,14 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1143 = 0; - $_etype1146 = 0; - $xfer += $input->readListBegin($_etype1146, $_size1143); - for ($_i1147 = 0; $_i1147 < $_size1143; ++$_i1147) + $_size1150 = 0; + $_etype1153 = 0; + $xfer += $input->readListBegin($_etype1153, $_size1150); + for ($_i1154 = 0; $_i1154 < $_size1150; ++$_i1154) { - $elem1148 = null; - $xfer += $input->readString($elem1148); - $this->success []= $elem1148; + $elem1155 = null; + $xfer += $input->readString($elem1155); + $this->success []= $elem1155; } $xfer += $input->readListEnd(); } else { @@ -23293,9 +23427,9 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1149) + foreach ($this->success as $iter1156) { - $xfer += $output->writeString($iter1149); + $xfer += $output->writeString($iter1156); } } $output->writeListEnd(); @@ -23400,14 +23534,14 @@ class ThriftHiveMetastore_get_table_meta_args { case 3: if ($ftype == TType::LST) { $this->tbl_types = array(); - $_size1150 = 0; - $_etype1153 = 0; - $xfer += $input->readListBegin($_etype1153, $_size1150); - for ($_i1154 = 0; $_i1154 < $_size1150; ++$_i1154) + $_size1157 = 0; + $_etype1160 = 0; + $xfer += $input->readListBegin($_etype1160, $_size1157); + for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161) { - $elem1155 = null; - $xfer += $input->readString($elem1155); - $this->tbl_types []= $elem1155; + $elem1162 = null; + $xfer += $input->readString($elem1162); + $this->tbl_types []= $elem1162; } $xfer += $input->readListEnd(); } else { @@ -23445,9 +23579,9 @@ class ThriftHiveMetastore_get_table_meta_args { { $output->writeListBegin(TType::STRING, count($this->tbl_types)); { - foreach ($this->tbl_types as $iter1156) + foreach ($this->tbl_types as $iter1163) { - $xfer += $output->writeString($iter1156); + $xfer += $output->writeString($iter1163); } } $output->writeListEnd(); @@ -23524,15 +23658,15 @@ class ThriftHiveMetastore_get_table_meta_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1157 = 0; - $_etype1160 = 0; - $xfer += $input->readListBegin($_etype1160, $_size1157); - for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161) + $_size1164 = 0; + $_etype1167 = 0; + $xfer += $input->readListBegin($_etype1167, $_size1164); + for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168) { - $elem1162 = null; - $elem1162 = new \metastore\TableMeta(); - $xfer += $elem1162->read($input); - $this->success []= $elem1162; + $elem1169 = null; + $elem1169 = new \metastore\TableMeta(); + $xfer += $elem1169->read($input); + $this->success []= $elem1169; } $xfer += $input->readListEnd(); } else { @@ -23568,9 +23702,9 @@ class ThriftHiveMetastore_get_table_meta_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1163) + foreach ($this->success as $iter1170) { - $xfer += $iter1163->write($output); + $xfer += $iter1170->write($output); } } $output->writeListEnd(); @@ -23726,14 +23860,14 @@ class ThriftHiveMetastore_get_all_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1164 = 0; - $_etype1167 = 0; - $xfer += $input->readListBegin($_etype1167, $_size1164); - for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168) + $_size1171 = 0; + $_etype1174 = 0; + $xfer += $input->readListBegin($_etype1174, $_size1171); + for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175) { - $elem1169 = null; - $xfer += $input->readString($elem1169); - $this->success []= $elem1169; + $elem1176 = null; + $xfer += $input->readString($elem1176); + $this->success []= $elem1176; } $xfer += $input->readListEnd(); } else { @@ -23769,9 +23903,9 @@ class ThriftHiveMetastore_get_all_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1170) + foreach ($this->success as $iter1177) { - $xfer += $output->writeString($iter1170); + $xfer += $output->writeString($iter1177); } } $output->writeListEnd(); @@ -23801,6 +23935,10 @@ class ThriftHiveMetastore_get_table_args { * @var string */ public $tbl_name = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -23813,6 +23951,10 @@ class ThriftHiveMetastore_get_table_args { 'var' => 'tbl_name', 'type' => TType::STRING, ), + 3 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -23822,6 +23964,9 @@ class ThriftHiveMetastore_get_table_args { if (isset($vals['tbl_name'])) { $this->tbl_name = $vals['tbl_name']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -23858,6 +24003,13 @@ class ThriftHiveMetastore_get_table_args { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -23881,6 +24033,11 @@ class ThriftHiveMetastore_get_table_args { $xfer += $output->writeString($this->tbl_name); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 3); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -24086,14 +24243,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size1171 = 0; - $_etype1174 = 0; - $xfer += $input->readListBegin($_etype1174, $_size1171); - for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175) + $_size1178 = 0; + $_etype1181 = 0; + $xfer += $input->readListBegin($_etype1181, $_size1178); + for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182) { - $elem1176 = null; - $xfer += $input->readString($elem1176); - $this->tbl_names []= $elem1176; + $elem1183 = null; + $xfer += $input->readString($elem1183); + $this->tbl_names []= $elem1183; } $xfer += $input->readListEnd(); } else { @@ -24126,9 +24283,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter1177) + foreach ($this->tbl_names as $iter1184) { - $xfer += $output->writeString($iter1177); + $xfer += $output->writeString($iter1184); } } $output->writeListEnd(); @@ -24193,15 +24350,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1178 = 0; - $_etype1181 = 0; - $xfer += $input->readListBegin($_etype1181, $_size1178); - for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182) + $_size1185 = 0; + $_etype1188 = 0; + $xfer += $input->readListBegin($_etype1188, $_size1185); + for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189) { - $elem1183 = null; - $elem1183 = new \metastore\Table(); - $xfer += $elem1183->read($input); - $this->success []= $elem1183; + $elem1190 = null; + $elem1190 = new \metastore\Table(); + $xfer += $elem1190->read($input); + $this->success []= $elem1190; } $xfer += $input->readListEnd(); } else { @@ -24229,9 +24386,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1184) + foreach ($this->success as $iter1191) { - $xfer += $iter1184->write($output); + $xfer += $iter1191->write($output); } } $output->writeListEnd(); @@ -24388,15 +24545,15 @@ class ThriftHiveMetastore_get_tables_ext_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1185 = 0; - $_etype1188 = 0; - $xfer += $input->readListBegin($_etype1188, $_size1185); - for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189) + $_size1192 = 0; + $_etype1195 = 0; + $xfer += $input->readListBegin($_etype1195, $_size1192); + for ($_i1196 = 0; $_i1196 < $_size1192; ++$_i1196) { - $elem1190 = null; - $elem1190 = new \metastore\ExtendedTableInfo(); - $xfer += $elem1190->read($input); - $this->success []= $elem1190; + $elem1197 = null; + $elem1197 = new \metastore\ExtendedTableInfo(); + $xfer += $elem1197->read($input); + $this->success []= $elem1197; } $xfer += $input->readListEnd(); } else { @@ -24432,9 +24589,9 @@ class ThriftHiveMetastore_get_tables_ext_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1191) + foreach ($this->success as $iter1198) { - $xfer += $iter1191->write($output); + $xfer += $iter1198->write($output); } } $output->writeListEnd(); @@ -25639,14 +25796,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1192 = 0; - $_etype1195 = 0; - $xfer += $input->readListBegin($_etype1195, $_size1192); - for ($_i1196 = 0; $_i1196 < $_size1192; ++$_i1196) + $_size1199 = 0; + $_etype1202 = 0; + $xfer += $input->readListBegin($_etype1202, $_size1199); + for ($_i1203 = 0; $_i1203 < $_size1199; ++$_i1203) { - $elem1197 = null; - $xfer += $input->readString($elem1197); - $this->success []= $elem1197; + $elem1204 = null; + $xfer += $input->readString($elem1204); + $this->success []= $elem1204; } $xfer += $input->readListEnd(); } else { @@ -25698,9 +25855,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1198) + foreach ($this->success as $iter1205) { - $xfer += $output->writeString($iter1198); + $xfer += $output->writeString($iter1205); } } $output->writeListEnd(); @@ -27223,15 +27380,15 @@ class ThriftHiveMetastore_add_partitions_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1199 = 0; - $_etype1202 = 0; - $xfer += $input->readListBegin($_etype1202, $_size1199); - for ($_i1203 = 0; $_i1203 < $_size1199; ++$_i1203) + $_size1206 = 0; + $_etype1209 = 0; + $xfer += $input->readListBegin($_etype1209, $_size1206); + for ($_i1210 = 0; $_i1210 < $_size1206; ++$_i1210) { - $elem1204 = null; - $elem1204 = new \metastore\Partition(); - $xfer += $elem1204->read($input); - $this->new_parts []= $elem1204; + $elem1211 = null; + $elem1211 = new \metastore\Partition(); + $xfer += $elem1211->read($input); + $this->new_parts []= $elem1211; } $xfer += $input->readListEnd(); } else { @@ -27259,9 +27416,9 @@ class ThriftHiveMetastore_add_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1205) + foreach ($this->new_parts as $iter1212) { - $xfer += $iter1205->write($output); + $xfer += $iter1212->write($output); } } $output->writeListEnd(); @@ -27476,15 +27633,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1206 = 0; - $_etype1209 = 0; - $xfer += $input->readListBegin($_etype1209, $_size1206); - for ($_i1210 = 0; $_i1210 < $_size1206; ++$_i1210) + $_size1213 = 0; + $_etype1216 = 0; + $xfer += $input->readListBegin($_etype1216, $_size1213); + for ($_i1217 = 0; $_i1217 < $_size1213; ++$_i1217) { - $elem1211 = null; - $elem1211 = new \metastore\PartitionSpec(); - $xfer += $elem1211->read($input); - $this->new_parts []= $elem1211; + $elem1218 = null; + $elem1218 = new \metastore\PartitionSpec(); + $xfer += $elem1218->read($input); + $this->new_parts []= $elem1218; } $xfer += $input->readListEnd(); } else { @@ -27512,9 +27669,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1212) + foreach ($this->new_parts as $iter1219) { - $xfer += $iter1212->write($output); + $xfer += $iter1219->write($output); } } $output->writeListEnd(); @@ -27764,14 +27921,14 @@ class ThriftHiveMetastore_append_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1213 = 0; - $_etype1216 = 0; - $xfer += $input->readListBegin($_etype1216, $_size1213); - for ($_i1217 = 0; $_i1217 < $_size1213; ++$_i1217) + $_size1220 = 0; + $_etype1223 = 0; + $xfer += $input->readListBegin($_etype1223, $_size1220); + for ($_i1224 = 0; $_i1224 < $_size1220; ++$_i1224) { - $elem1218 = null; - $xfer += $input->readString($elem1218); - $this->part_vals []= $elem1218; + $elem1225 = null; + $xfer += $input->readString($elem1225); + $this->part_vals []= $elem1225; } $xfer += $input->readListEnd(); } else { @@ -27809,9 +27966,9 @@ class ThriftHiveMetastore_append_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1219) + foreach ($this->part_vals as $iter1226) { - $xfer += $output->writeString($iter1219); + $xfer += $output->writeString($iter1226); } } $output->writeListEnd(); @@ -28313,14 +28470,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1220 = 0; - $_etype1223 = 0; - $xfer += $input->readListBegin($_etype1223, $_size1220); - for ($_i1224 = 0; $_i1224 < $_size1220; ++$_i1224) + $_size1227 = 0; + $_etype1230 = 0; + $xfer += $input->readListBegin($_etype1230, $_size1227); + for ($_i1231 = 0; $_i1231 < $_size1227; ++$_i1231) { - $elem1225 = null; - $xfer += $input->readString($elem1225); - $this->part_vals []= $elem1225; + $elem1232 = null; + $xfer += $input->readString($elem1232); + $this->part_vals []= $elem1232; } $xfer += $input->readListEnd(); } else { @@ -28366,9 +28523,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1226) + foreach ($this->part_vals as $iter1233) { - $xfer += $output->writeString($iter1226); + $xfer += $output->writeString($iter1233); } } $output->writeListEnd(); @@ -29222,14 +29379,14 @@ class ThriftHiveMetastore_drop_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1227 = 0; - $_etype1230 = 0; - $xfer += $input->readListBegin($_etype1230, $_size1227); - for ($_i1231 = 0; $_i1231 < $_size1227; ++$_i1231) + $_size1234 = 0; + $_etype1237 = 0; + $xfer += $input->readListBegin($_etype1237, $_size1234); + for ($_i1238 = 0; $_i1238 < $_size1234; ++$_i1238) { - $elem1232 = null; - $xfer += $input->readString($elem1232); - $this->part_vals []= $elem1232; + $elem1239 = null; + $xfer += $input->readString($elem1239); + $this->part_vals []= $elem1239; } $xfer += $input->readListEnd(); } else { @@ -29274,9 +29431,9 @@ class ThriftHiveMetastore_drop_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1233) + foreach ($this->part_vals as $iter1240) { - $xfer += $output->writeString($iter1233); + $xfer += $output->writeString($iter1240); } } $output->writeListEnd(); @@ -29529,14 +29686,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1234 = 0; - $_etype1237 = 0; - $xfer += $input->readListBegin($_etype1237, $_size1234); - for ($_i1238 = 0; $_i1238 < $_size1234; ++$_i1238) + $_size1241 = 0; + $_etype1244 = 0; + $xfer += $input->readListBegin($_etype1244, $_size1241); + for ($_i1245 = 0; $_i1245 < $_size1241; ++$_i1245) { - $elem1239 = null; - $xfer += $input->readString($elem1239); - $this->part_vals []= $elem1239; + $elem1246 = null; + $xfer += $input->readString($elem1246); + $this->part_vals []= $elem1246; } $xfer += $input->readListEnd(); } else { @@ -29589,9 +29746,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1240) + foreach ($this->part_vals as $iter1247) { - $xfer += $output->writeString($iter1240); + $xfer += $output->writeString($iter1247); } } $output->writeListEnd(); @@ -30534,6 +30691,10 @@ class ThriftHiveMetastore_get_partition_args { * @var string[] */ public $part_vals = null; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -30554,6 +30715,10 @@ class ThriftHiveMetastore_get_partition_args { 'type' => TType::STRING, ), ), + 4 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -30566,6 +30731,9 @@ class ThriftHiveMetastore_get_partition_args { if (isset($vals['part_vals'])) { $this->part_vals = $vals['part_vals']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -30605,20 +30773,27 @@ class ThriftHiveMetastore_get_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1241 = 0; - $_etype1244 = 0; - $xfer += $input->readListBegin($_etype1244, $_size1241); - for ($_i1245 = 0; $_i1245 < $_size1241; ++$_i1245) + $_size1248 = 0; + $_etype1251 = 0; + $xfer += $input->readListBegin($_etype1251, $_size1248); + for ($_i1252 = 0; $_i1252 < $_size1248; ++$_i1252) { - $elem1246 = null; - $xfer += $input->readString($elem1246); - $this->part_vals []= $elem1246; + $elem1253 = null; + $xfer += $input->readString($elem1253); + $this->part_vals []= $elem1253; } $xfer += $input->readListEnd(); } else { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -30650,15 +30825,20 @@ class ThriftHiveMetastore_get_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1247) + foreach ($this->part_vals as $iter1254) { - $xfer += $output->writeString($iter1247); + $xfer += $output->writeString($iter1254); } } $output->writeListEnd(); } $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 4); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -30894,17 +31074,17 @@ class ThriftHiveMetastore_exchange_partition_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1248 = 0; - $_ktype1249 = 0; - $_vtype1250 = 0; - $xfer += $input->readMapBegin($_ktype1249, $_vtype1250, $_size1248); - for ($_i1252 = 0; $_i1252 < $_size1248; ++$_i1252) + $_size1255 = 0; + $_ktype1256 = 0; + $_vtype1257 = 0; + $xfer += $input->readMapBegin($_ktype1256, $_vtype1257, $_size1255); + for ($_i1259 = 0; $_i1259 < $_size1255; ++$_i1259) { - $key1253 = ''; - $val1254 = ''; - $xfer += $input->readString($key1253); - $xfer += $input->readString($val1254); - $this->partitionSpecs[$key1253] = $val1254; + $key1260 = ''; + $val1261 = ''; + $xfer += $input->readString($key1260); + $xfer += $input->readString($val1261); + $this->partitionSpecs[$key1260] = $val1261; } $xfer += $input->readMapEnd(); } else { @@ -30960,10 +31140,10 @@ class ThriftHiveMetastore_exchange_partition_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1255 => $viter1256) + foreach ($this->partitionSpecs as $kiter1262 => $viter1263) { - $xfer += $output->writeString($kiter1255); - $xfer += $output->writeString($viter1256); + $xfer += $output->writeString($kiter1262); + $xfer += $output->writeString($viter1263); } } $output->writeMapEnd(); @@ -31275,17 +31455,17 @@ class ThriftHiveMetastore_exchange_partitions_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1257 = 0; - $_ktype1258 = 0; - $_vtype1259 = 0; - $xfer += $input->readMapBegin($_ktype1258, $_vtype1259, $_size1257); - for ($_i1261 = 0; $_i1261 < $_size1257; ++$_i1261) + $_size1264 = 0; + $_ktype1265 = 0; + $_vtype1266 = 0; + $xfer += $input->readMapBegin($_ktype1265, $_vtype1266, $_size1264); + for ($_i1268 = 0; $_i1268 < $_size1264; ++$_i1268) { - $key1262 = ''; - $val1263 = ''; - $xfer += $input->readString($key1262); - $xfer += $input->readString($val1263); - $this->partitionSpecs[$key1262] = $val1263; + $key1269 = ''; + $val1270 = ''; + $xfer += $input->readString($key1269); + $xfer += $input->readString($val1270); + $this->partitionSpecs[$key1269] = $val1270; } $xfer += $input->readMapEnd(); } else { @@ -31341,10 +31521,10 @@ class ThriftHiveMetastore_exchange_partitions_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1264 => $viter1265) + foreach ($this->partitionSpecs as $kiter1271 => $viter1272) { - $xfer += $output->writeString($kiter1264); - $xfer += $output->writeString($viter1265); + $xfer += $output->writeString($kiter1271); + $xfer += $output->writeString($viter1272); } } $output->writeMapEnd(); @@ -31477,15 +31657,15 @@ class ThriftHiveMetastore_exchange_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1266 = 0; - $_etype1269 = 0; - $xfer += $input->readListBegin($_etype1269, $_size1266); - for ($_i1270 = 0; $_i1270 < $_size1266; ++$_i1270) + $_size1273 = 0; + $_etype1276 = 0; + $xfer += $input->readListBegin($_etype1276, $_size1273); + for ($_i1277 = 0; $_i1277 < $_size1273; ++$_i1277) { - $elem1271 = null; - $elem1271 = new \metastore\Partition(); - $xfer += $elem1271->read($input); - $this->success []= $elem1271; + $elem1278 = null; + $elem1278 = new \metastore\Partition(); + $xfer += $elem1278->read($input); + $this->success []= $elem1278; } $xfer += $input->readListEnd(); } else { @@ -31545,9 +31725,9 @@ class ThriftHiveMetastore_exchange_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1272) + foreach ($this->success as $iter1279) { - $xfer += $iter1272->write($output); + $xfer += $iter1279->write($output); } } $output->writeListEnd(); @@ -31604,6 +31784,10 @@ class ThriftHiveMetastore_get_partition_with_auth_args { * @var string[] */ public $group_names = null; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -31636,6 +31820,10 @@ class ThriftHiveMetastore_get_partition_with_auth_args { 'type' => TType::STRING, ), ), + 6 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -31654,6 +31842,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { if (isset($vals['group_names'])) { $this->group_names = $vals['group_names']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -31693,14 +31884,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1273 = 0; - $_etype1276 = 0; - $xfer += $input->readListBegin($_etype1276, $_size1273); - for ($_i1277 = 0; $_i1277 < $_size1273; ++$_i1277) + $_size1280 = 0; + $_etype1283 = 0; + $xfer += $input->readListBegin($_etype1283, $_size1280); + for ($_i1284 = 0; $_i1284 < $_size1280; ++$_i1284) { - $elem1278 = null; - $xfer += $input->readString($elem1278); - $this->part_vals []= $elem1278; + $elem1285 = null; + $xfer += $input->readString($elem1285); + $this->part_vals []= $elem1285; } $xfer += $input->readListEnd(); } else { @@ -31717,20 +31908,27 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1279 = 0; - $_etype1282 = 0; - $xfer += $input->readListBegin($_etype1282, $_size1279); - for ($_i1283 = 0; $_i1283 < $_size1279; ++$_i1283) + $_size1286 = 0; + $_etype1289 = 0; + $xfer += $input->readListBegin($_etype1289, $_size1286); + for ($_i1290 = 0; $_i1290 < $_size1286; ++$_i1290) { - $elem1284 = null; - $xfer += $input->readString($elem1284); - $this->group_names []= $elem1284; + $elem1291 = null; + $xfer += $input->readString($elem1291); + $this->group_names []= $elem1291; } $xfer += $input->readListEnd(); } else { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -31762,9 +31960,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1285) + foreach ($this->part_vals as $iter1292) { - $xfer += $output->writeString($iter1285); + $xfer += $output->writeString($iter1292); } } $output->writeListEnd(); @@ -31784,15 +31982,20 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1286) + foreach ($this->group_names as $iter1293) { - $xfer += $output->writeString($iter1286); + $xfer += $output->writeString($iter1293); } } $output->writeListEnd(); } $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 6); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -31945,6 +32148,10 @@ class ThriftHiveMetastore_get_partition_by_name_args { * @var string */ public $part_name = null; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -31961,6 +32168,10 @@ class ThriftHiveMetastore_get_partition_by_name_args { 'var' => 'part_name', 'type' => TType::STRING, ), + 4 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -31973,6 +32184,9 @@ class ThriftHiveMetastore_get_partition_by_name_args { if (isset($vals['part_name'])) { $this->part_name = $vals['part_name']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -32016,6 +32230,13 @@ class ThriftHiveMetastore_get_partition_by_name_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -32044,6 +32265,11 @@ class ThriftHiveMetastore_get_partition_by_name_args { $xfer += $output->writeString($this->part_name); $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 4); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -32196,6 +32422,10 @@ class ThriftHiveMetastore_get_partitions_args { * @var int */ public $max_parts = -1; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -32212,6 +32442,10 @@ class ThriftHiveMetastore_get_partitions_args { 'var' => 'max_parts', 'type' => TType::I16, ), + 4 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -32224,6 +32458,9 @@ class ThriftHiveMetastore_get_partitions_args { if (isset($vals['max_parts'])) { $this->max_parts = $vals['max_parts']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -32267,6 +32504,13 @@ class ThriftHiveMetastore_get_partitions_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -32295,6 +32539,11 @@ class ThriftHiveMetastore_get_partitions_args { $xfer += $output->writeI16($this->max_parts); $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 4); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -32377,15 +32626,15 @@ class ThriftHiveMetastore_get_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1287 = 0; - $_etype1290 = 0; - $xfer += $input->readListBegin($_etype1290, $_size1287); - for ($_i1291 = 0; $_i1291 < $_size1287; ++$_i1291) + $_size1294 = 0; + $_etype1297 = 0; + $xfer += $input->readListBegin($_etype1297, $_size1294); + for ($_i1298 = 0; $_i1298 < $_size1294; ++$_i1298) { - $elem1292 = null; - $elem1292 = new \metastore\Partition(); - $xfer += $elem1292->read($input); - $this->success []= $elem1292; + $elem1299 = null; + $elem1299 = new \metastore\Partition(); + $xfer += $elem1299->read($input); + $this->success []= $elem1299; } $xfer += $input->readListEnd(); } else { @@ -32429,9 +32678,9 @@ class ThriftHiveMetastore_get_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1293) + foreach ($this->success as $iter1300) { - $xfer += $iter1293->write($output); + $xfer += $iter1300->write($output); } } $output->writeListEnd(); @@ -32478,6 +32727,10 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { * @var string[] */ public $group_names = null; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -32506,6 +32759,10 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { 'type' => TType::STRING, ), ), + 6 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -32524,6 +32781,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { if (isset($vals['group_names'])) { $this->group_names = $vals['group_names']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -32577,20 +32837,27 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1294 = 0; - $_etype1297 = 0; - $xfer += $input->readListBegin($_etype1297, $_size1294); - for ($_i1298 = 0; $_i1298 < $_size1294; ++$_i1298) + $_size1301 = 0; + $_etype1304 = 0; + $xfer += $input->readListBegin($_etype1304, $_size1301); + for ($_i1305 = 0; $_i1305 < $_size1301; ++$_i1305) { - $elem1299 = null; - $xfer += $input->readString($elem1299); - $this->group_names []= $elem1299; + $elem1306 = null; + $xfer += $input->readString($elem1306); + $this->group_names []= $elem1306; } $xfer += $input->readListEnd(); } else { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -32632,15 +32899,20 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1300) + foreach ($this->group_names as $iter1307) { - $xfer += $output->writeString($iter1300); + $xfer += $output->writeString($iter1307); } } $output->writeListEnd(); } $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 6); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -32723,15 +32995,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1301 = 0; - $_etype1304 = 0; - $xfer += $input->readListBegin($_etype1304, $_size1301); - for ($_i1305 = 0; $_i1305 < $_size1301; ++$_i1305) + $_size1308 = 0; + $_etype1311 = 0; + $xfer += $input->readListBegin($_etype1311, $_size1308); + for ($_i1312 = 0; $_i1312 < $_size1308; ++$_i1312) { - $elem1306 = null; - $elem1306 = new \metastore\Partition(); - $xfer += $elem1306->read($input); - $this->success []= $elem1306; + $elem1313 = null; + $elem1313 = new \metastore\Partition(); + $xfer += $elem1313->read($input); + $this->success []= $elem1313; } $xfer += $input->readListEnd(); } else { @@ -32775,9 +33047,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1307) + foreach ($this->success as $iter1314) { - $xfer += $iter1307->write($output); + $xfer += $iter1314->write($output); } } $output->writeListEnd(); @@ -32816,6 +33088,10 @@ class ThriftHiveMetastore_get_partitions_pspec_args { * @var int */ public $max_parts = -1; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -32832,6 +33108,10 @@ class ThriftHiveMetastore_get_partitions_pspec_args { 'var' => 'max_parts', 'type' => TType::I32, ), + 4 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -32844,6 +33124,9 @@ class ThriftHiveMetastore_get_partitions_pspec_args { if (isset($vals['max_parts'])) { $this->max_parts = $vals['max_parts']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -32887,6 +33170,13 @@ class ThriftHiveMetastore_get_partitions_pspec_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -32915,6 +33205,11 @@ class ThriftHiveMetastore_get_partitions_pspec_args { $xfer += $output->writeI32($this->max_parts); $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 4); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -32997,15 +33292,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1308 = 0; - $_etype1311 = 0; - $xfer += $input->readListBegin($_etype1311, $_size1308); - for ($_i1312 = 0; $_i1312 < $_size1308; ++$_i1312) + $_size1315 = 0; + $_etype1318 = 0; + $xfer += $input->readListBegin($_etype1318, $_size1315); + for ($_i1319 = 0; $_i1319 < $_size1315; ++$_i1319) { - $elem1313 = null; - $elem1313 = new \metastore\PartitionSpec(); - $xfer += $elem1313->read($input); - $this->success []= $elem1313; + $elem1320 = null; + $elem1320 = new \metastore\PartitionSpec(); + $xfer += $elem1320->read($input); + $this->success []= $elem1320; } $xfer += $input->readListEnd(); } else { @@ -33049,9 +33344,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1314) + foreach ($this->success as $iter1321) { - $xfer += $iter1314->write($output); + $xfer += $iter1321->write($output); } } $output->writeListEnd(); @@ -33090,6 +33385,10 @@ class ThriftHiveMetastore_get_partition_names_args { * @var int */ public $max_parts = -1; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -33106,6 +33405,10 @@ class ThriftHiveMetastore_get_partition_names_args { 'var' => 'max_parts', 'type' => TType::I16, ), + 4 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -33118,6 +33421,9 @@ class ThriftHiveMetastore_get_partition_names_args { if (isset($vals['max_parts'])) { $this->max_parts = $vals['max_parts']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -33161,6 +33467,13 @@ class ThriftHiveMetastore_get_partition_names_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -33189,6 +33502,11 @@ class ThriftHiveMetastore_get_partition_names_args { $xfer += $output->writeI16($this->max_parts); $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 4); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -33270,14 +33588,14 @@ class ThriftHiveMetastore_get_partition_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1315 = 0; - $_etype1318 = 0; - $xfer += $input->readListBegin($_etype1318, $_size1315); - for ($_i1319 = 0; $_i1319 < $_size1315; ++$_i1319) + $_size1322 = 0; + $_etype1325 = 0; + $xfer += $input->readListBegin($_etype1325, $_size1322); + for ($_i1326 = 0; $_i1326 < $_size1322; ++$_i1326) { - $elem1320 = null; - $xfer += $input->readString($elem1320); - $this->success []= $elem1320; + $elem1327 = null; + $xfer += $input->readString($elem1327); + $this->success []= $elem1327; } $xfer += $input->readListEnd(); } else { @@ -33321,9 +33639,9 @@ class ThriftHiveMetastore_get_partition_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1321) + foreach ($this->success as $iter1328) { - $xfer += $output->writeString($iter1321); + $xfer += $output->writeString($iter1328); } } $output->writeListEnd(); @@ -33576,6 +33894,10 @@ class ThriftHiveMetastore_get_partitions_ps_args { * @var int */ public $max_parts = -1; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -33600,6 +33922,10 @@ class ThriftHiveMetastore_get_partitions_ps_args { 'var' => 'max_parts', 'type' => TType::I16, ), + 5 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -33615,6 +33941,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { if (isset($vals['max_parts'])) { $this->max_parts = $vals['max_parts']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -33654,14 +33983,14 @@ class ThriftHiveMetastore_get_partitions_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1322 = 0; - $_etype1325 = 0; - $xfer += $input->readListBegin($_etype1325, $_size1322); - for ($_i1326 = 0; $_i1326 < $_size1322; ++$_i1326) + $_size1329 = 0; + $_etype1332 = 0; + $xfer += $input->readListBegin($_etype1332, $_size1329); + for ($_i1333 = 0; $_i1333 < $_size1329; ++$_i1333) { - $elem1327 = null; - $xfer += $input->readString($elem1327); - $this->part_vals []= $elem1327; + $elem1334 = null; + $xfer += $input->readString($elem1334); + $this->part_vals []= $elem1334; } $xfer += $input->readListEnd(); } else { @@ -33675,6 +34004,13 @@ class ThriftHiveMetastore_get_partitions_ps_args { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -33706,9 +34042,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1328) + foreach ($this->part_vals as $iter1335) { - $xfer += $output->writeString($iter1328); + $xfer += $output->writeString($iter1335); } } $output->writeListEnd(); @@ -33720,6 +34056,11 @@ class ThriftHiveMetastore_get_partitions_ps_args { $xfer += $output->writeI16($this->max_parts); $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 5); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -33802,15 +34143,15 @@ class ThriftHiveMetastore_get_partitions_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1329 = 0; - $_etype1332 = 0; - $xfer += $input->readListBegin($_etype1332, $_size1329); - for ($_i1333 = 0; $_i1333 < $_size1329; ++$_i1333) + $_size1336 = 0; + $_etype1339 = 0; + $xfer += $input->readListBegin($_etype1339, $_size1336); + for ($_i1340 = 0; $_i1340 < $_size1336; ++$_i1340) { - $elem1334 = null; - $elem1334 = new \metastore\Partition(); - $xfer += $elem1334->read($input); - $this->success []= $elem1334; + $elem1341 = null; + $elem1341 = new \metastore\Partition(); + $xfer += $elem1341->read($input); + $this->success []= $elem1341; } $xfer += $input->readListEnd(); } else { @@ -33854,9 +34195,9 @@ class ThriftHiveMetastore_get_partitions_ps_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1335) + foreach ($this->success as $iter1342) { - $xfer += $iter1335->write($output); + $xfer += $iter1342->write($output); } } $output->writeListEnd(); @@ -33907,6 +34248,10 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { * @var string[] */ public $group_names = null; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -33943,6 +34288,10 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { 'type' => TType::STRING, ), ), + 7 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -33964,6 +34313,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { if (isset($vals['group_names'])) { $this->group_names = $vals['group_names']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -34003,14 +34355,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1336 = 0; - $_etype1339 = 0; - $xfer += $input->readListBegin($_etype1339, $_size1336); - for ($_i1340 = 0; $_i1340 < $_size1336; ++$_i1340) + $_size1343 = 0; + $_etype1346 = 0; + $xfer += $input->readListBegin($_etype1346, $_size1343); + for ($_i1347 = 0; $_i1347 < $_size1343; ++$_i1347) { - $elem1341 = null; - $xfer += $input->readString($elem1341); - $this->part_vals []= $elem1341; + $elem1348 = null; + $xfer += $input->readString($elem1348); + $this->part_vals []= $elem1348; } $xfer += $input->readListEnd(); } else { @@ -34034,20 +34386,27 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1342 = 0; - $_etype1345 = 0; - $xfer += $input->readListBegin($_etype1345, $_size1342); - for ($_i1346 = 0; $_i1346 < $_size1342; ++$_i1346) + $_size1349 = 0; + $_etype1352 = 0; + $xfer += $input->readListBegin($_etype1352, $_size1349); + for ($_i1353 = 0; $_i1353 < $_size1349; ++$_i1353) { - $elem1347 = null; - $xfer += $input->readString($elem1347); - $this->group_names []= $elem1347; + $elem1354 = null; + $xfer += $input->readString($elem1354); + $this->group_names []= $elem1354; } $xfer += $input->readListEnd(); } else { $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -34079,9 +34438,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1348) + foreach ($this->part_vals as $iter1355) { - $xfer += $output->writeString($iter1348); + $xfer += $output->writeString($iter1355); } } $output->writeListEnd(); @@ -34106,15 +34465,20 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1349) + foreach ($this->group_names as $iter1356) { - $xfer += $output->writeString($iter1349); + $xfer += $output->writeString($iter1356); } } $output->writeListEnd(); } $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 7); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -34197,15 +34561,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1350 = 0; - $_etype1353 = 0; - $xfer += $input->readListBegin($_etype1353, $_size1350); - for ($_i1354 = 0; $_i1354 < $_size1350; ++$_i1354) + $_size1357 = 0; + $_etype1360 = 0; + $xfer += $input->readListBegin($_etype1360, $_size1357); + for ($_i1361 = 0; $_i1361 < $_size1357; ++$_i1361) { - $elem1355 = null; - $elem1355 = new \metastore\Partition(); - $xfer += $elem1355->read($input); - $this->success []= $elem1355; + $elem1362 = null; + $elem1362 = new \metastore\Partition(); + $xfer += $elem1362->read($input); + $this->success []= $elem1362; } $xfer += $input->readListEnd(); } else { @@ -34249,9 +34613,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1356) + foreach ($this->success as $iter1363) { - $xfer += $iter1356->write($output); + $xfer += $iter1363->write($output); } } $output->writeListEnd(); @@ -34294,6 +34658,10 @@ class ThriftHiveMetastore_get_partition_names_ps_args { * @var int */ public $max_parts = -1; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -34318,6 +34686,10 @@ class ThriftHiveMetastore_get_partition_names_ps_args { 'var' => 'max_parts', 'type' => TType::I16, ), + 5 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -34333,6 +34705,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { if (isset($vals['max_parts'])) { $this->max_parts = $vals['max_parts']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -34372,14 +34747,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1357 = 0; - $_etype1360 = 0; - $xfer += $input->readListBegin($_etype1360, $_size1357); - for ($_i1361 = 0; $_i1361 < $_size1357; ++$_i1361) + $_size1364 = 0; + $_etype1367 = 0; + $xfer += $input->readListBegin($_etype1367, $_size1364); + for ($_i1368 = 0; $_i1368 < $_size1364; ++$_i1368) { - $elem1362 = null; - $xfer += $input->readString($elem1362); - $this->part_vals []= $elem1362; + $elem1369 = null; + $xfer += $input->readString($elem1369); + $this->part_vals []= $elem1369; } $xfer += $input->readListEnd(); } else { @@ -34393,6 +34768,13 @@ class ThriftHiveMetastore_get_partition_names_ps_args { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -34424,9 +34806,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1363) + foreach ($this->part_vals as $iter1370) { - $xfer += $output->writeString($iter1363); + $xfer += $output->writeString($iter1370); } } $output->writeListEnd(); @@ -34438,6 +34820,11 @@ class ThriftHiveMetastore_get_partition_names_ps_args { $xfer += $output->writeI16($this->max_parts); $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 5); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -34519,14 +34906,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1364 = 0; - $_etype1367 = 0; - $xfer += $input->readListBegin($_etype1367, $_size1364); - for ($_i1368 = 0; $_i1368 < $_size1364; ++$_i1368) + $_size1371 = 0; + $_etype1374 = 0; + $xfer += $input->readListBegin($_etype1374, $_size1371); + for ($_i1375 = 0; $_i1375 < $_size1371; ++$_i1375) { - $elem1369 = null; - $xfer += $input->readString($elem1369); - $this->success []= $elem1369; + $elem1376 = null; + $xfer += $input->readString($elem1376); + $this->success []= $elem1376; } $xfer += $input->readListEnd(); } else { @@ -34570,9 +34957,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1370) + foreach ($this->success as $iter1377) { - $xfer += $output->writeString($iter1370); + $xfer += $output->writeString($iter1377); } } $output->writeListEnd(); @@ -34615,6 +35002,10 @@ class ThriftHiveMetastore_get_partitions_by_filter_args { * @var int */ public $max_parts = -1; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -34635,6 +35026,10 @@ class ThriftHiveMetastore_get_partitions_by_filter_args { 'var' => 'max_parts', 'type' => TType::I16, ), + 5 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -34650,6 +35045,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_args { if (isset($vals['max_parts'])) { $this->max_parts = $vals['max_parts']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -34700,6 +35098,13 @@ class ThriftHiveMetastore_get_partitions_by_filter_args { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -34733,6 +35138,11 @@ class ThriftHiveMetastore_get_partitions_by_filter_args { $xfer += $output->writeI16($this->max_parts); $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 5); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -34815,15 +35225,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1371 = 0; - $_etype1374 = 0; - $xfer += $input->readListBegin($_etype1374, $_size1371); - for ($_i1375 = 0; $_i1375 < $_size1371; ++$_i1375) + $_size1378 = 0; + $_etype1381 = 0; + $xfer += $input->readListBegin($_etype1381, $_size1378); + for ($_i1382 = 0; $_i1382 < $_size1378; ++$_i1382) { - $elem1376 = null; - $elem1376 = new \metastore\Partition(); - $xfer += $elem1376->read($input); - $this->success []= $elem1376; + $elem1383 = null; + $elem1383 = new \metastore\Partition(); + $xfer += $elem1383->read($input); + $this->success []= $elem1383; } $xfer += $input->readListEnd(); } else { @@ -34867,9 +35277,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1377) + foreach ($this->success as $iter1384) { - $xfer += $iter1377->write($output); + $xfer += $iter1384->write($output); } } $output->writeListEnd(); @@ -34912,6 +35322,10 @@ class ThriftHiveMetastore_get_part_specs_by_filter_args { * @var int */ public $max_parts = -1; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -34932,6 +35346,10 @@ class ThriftHiveMetastore_get_part_specs_by_filter_args { 'var' => 'max_parts', 'type' => TType::I32, ), + 5 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -34947,6 +35365,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_args { if (isset($vals['max_parts'])) { $this->max_parts = $vals['max_parts']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -34997,6 +35418,13 @@ class ThriftHiveMetastore_get_part_specs_by_filter_args { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -35030,6 +35458,11 @@ class ThriftHiveMetastore_get_part_specs_by_filter_args { $xfer += $output->writeI32($this->max_parts); $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 5); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -35112,15 +35545,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1378 = 0; - $_etype1381 = 0; - $xfer += $input->readListBegin($_etype1381, $_size1378); - for ($_i1382 = 0; $_i1382 < $_size1378; ++$_i1382) + $_size1385 = 0; + $_etype1388 = 0; + $xfer += $input->readListBegin($_etype1388, $_size1385); + for ($_i1389 = 0; $_i1389 < $_size1385; ++$_i1389) { - $elem1383 = null; - $elem1383 = new \metastore\PartitionSpec(); - $xfer += $elem1383->read($input); - $this->success []= $elem1383; + $elem1390 = null; + $elem1390 = new \metastore\PartitionSpec(); + $xfer += $elem1390->read($input); + $this->success []= $elem1390; } $xfer += $input->readListEnd(); } else { @@ -35164,9 +35597,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1384) + foreach ($this->success as $iter1391) { - $xfer += $iter1384->write($output); + $xfer += $iter1391->write($output); } } $output->writeListEnd(); @@ -35415,6 +35848,10 @@ class ThriftHiveMetastore_get_num_partitions_by_filter_args { * @var string */ public $filter = null; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -35431,6 +35868,10 @@ class ThriftHiveMetastore_get_num_partitions_by_filter_args { 'var' => 'filter', 'type' => TType::STRING, ), + 4 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -35443,6 +35884,9 @@ class ThriftHiveMetastore_get_num_partitions_by_filter_args { if (isset($vals['filter'])) { $this->filter = $vals['filter']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -35486,6 +35930,13 @@ class ThriftHiveMetastore_get_num_partitions_by_filter_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -35514,6 +35965,11 @@ class ThriftHiveMetastore_get_num_partitions_by_filter_args { $xfer += $output->writeString($this->filter); $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 4); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -35661,6 +36117,10 @@ class ThriftHiveMetastore_get_partitions_by_names_args { * @var string[] */ public $names = null; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -35681,6 +36141,10 @@ class ThriftHiveMetastore_get_partitions_by_names_args { 'type' => TType::STRING, ), ), + 4 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -35693,6 +36157,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args { if (isset($vals['names'])) { $this->names = $vals['names']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -35732,20 +36199,27 @@ class ThriftHiveMetastore_get_partitions_by_names_args { case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size1385 = 0; - $_etype1388 = 0; - $xfer += $input->readListBegin($_etype1388, $_size1385); - for ($_i1389 = 0; $_i1389 < $_size1385; ++$_i1389) + $_size1392 = 0; + $_etype1395 = 0; + $xfer += $input->readListBegin($_etype1395, $_size1392); + for ($_i1396 = 0; $_i1396 < $_size1392; ++$_i1396) { - $elem1390 = null; - $xfer += $input->readString($elem1390); - $this->names []= $elem1390; + $elem1397 = null; + $xfer += $input->readString($elem1397); + $this->names []= $elem1397; } $xfer += $input->readListEnd(); } else { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -35777,15 +36251,20 @@ class ThriftHiveMetastore_get_partitions_by_names_args { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter1391) + foreach ($this->names as $iter1398) { - $xfer += $output->writeString($iter1391); + $xfer += $output->writeString($iter1398); } } $output->writeListEnd(); } $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 4); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -35868,15 +36347,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1392 = 0; - $_etype1395 = 0; - $xfer += $input->readListBegin($_etype1395, $_size1392); - for ($_i1396 = 0; $_i1396 < $_size1392; ++$_i1396) + $_size1399 = 0; + $_etype1402 = 0; + $xfer += $input->readListBegin($_etype1402, $_size1399); + for ($_i1403 = 0; $_i1403 < $_size1399; ++$_i1403) { - $elem1397 = null; - $elem1397 = new \metastore\Partition(); - $xfer += $elem1397->read($input); - $this->success []= $elem1397; + $elem1404 = null; + $elem1404 = new \metastore\Partition(); + $xfer += $elem1404->read($input); + $this->success []= $elem1404; } $xfer += $input->readListEnd(); } else { @@ -35920,9 +36399,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1398) + foreach ($this->success as $iter1405) { - $xfer += $iter1398->write($output); + $xfer += $iter1405->write($output); } } $output->writeListEnd(); @@ -36471,15 +36950,15 @@ class ThriftHiveMetastore_alter_partitions_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1399 = 0; - $_etype1402 = 0; - $xfer += $input->readListBegin($_etype1402, $_size1399); - for ($_i1403 = 0; $_i1403 < $_size1399; ++$_i1403) + $_size1406 = 0; + $_etype1409 = 0; + $xfer += $input->readListBegin($_etype1409, $_size1406); + for ($_i1410 = 0; $_i1410 < $_size1406; ++$_i1410) { - $elem1404 = null; - $elem1404 = new \metastore\Partition(); - $xfer += $elem1404->read($input); - $this->new_parts []= $elem1404; + $elem1411 = null; + $elem1411 = new \metastore\Partition(); + $xfer += $elem1411->read($input); + $this->new_parts []= $elem1411; } $xfer += $input->readListEnd(); } else { @@ -36517,9 +36996,9 @@ class ThriftHiveMetastore_alter_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1405) + foreach ($this->new_parts as $iter1412) { - $xfer += $iter1405->write($output); + $xfer += $iter1412->write($output); } } $output->writeListEnd(); @@ -36734,15 +37213,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1406 = 0; - $_etype1409 = 0; - $xfer += $input->readListBegin($_etype1409, $_size1406); - for ($_i1410 = 0; $_i1410 < $_size1406; ++$_i1410) + $_size1413 = 0; + $_etype1416 = 0; + $xfer += $input->readListBegin($_etype1416, $_size1413); + for ($_i1417 = 0; $_i1417 < $_size1413; ++$_i1417) { - $elem1411 = null; - $elem1411 = new \metastore\Partition(); - $xfer += $elem1411->read($input); - $this->new_parts []= $elem1411; + $elem1418 = null; + $elem1418 = new \metastore\Partition(); + $xfer += $elem1418->read($input); + $this->new_parts []= $elem1418; } $xfer += $input->readListEnd(); } else { @@ -36788,9 +37267,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1412) + foreach ($this->new_parts as $iter1419) { - $xfer += $iter1412->write($output); + $xfer += $iter1419->write($output); } } $output->writeListEnd(); @@ -37478,14 +37957,14 @@ class ThriftHiveMetastore_rename_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1413 = 0; - $_etype1416 = 0; - $xfer += $input->readListBegin($_etype1416, $_size1413); - for ($_i1417 = 0; $_i1417 < $_size1413; ++$_i1417) + $_size1420 = 0; + $_etype1423 = 0; + $xfer += $input->readListBegin($_etype1423, $_size1420); + for ($_i1424 = 0; $_i1424 < $_size1420; ++$_i1424) { - $elem1418 = null; - $xfer += $input->readString($elem1418); - $this->part_vals []= $elem1418; + $elem1425 = null; + $xfer += $input->readString($elem1425); + $this->part_vals []= $elem1425; } $xfer += $input->readListEnd(); } else { @@ -37531,9 +38010,9 @@ class ThriftHiveMetastore_rename_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1419) + foreach ($this->part_vals as $iter1426) { - $xfer += $output->writeString($iter1419); + $xfer += $output->writeString($iter1426); } } $output->writeListEnd(); @@ -37928,14 +38407,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1420 = 0; - $_etype1423 = 0; - $xfer += $input->readListBegin($_etype1423, $_size1420); - for ($_i1424 = 0; $_i1424 < $_size1420; ++$_i1424) + $_size1427 = 0; + $_etype1430 = 0; + $xfer += $input->readListBegin($_etype1430, $_size1427); + for ($_i1431 = 0; $_i1431 < $_size1427; ++$_i1431) { - $elem1425 = null; - $xfer += $input->readString($elem1425); - $this->part_vals []= $elem1425; + $elem1432 = null; + $xfer += $input->readString($elem1432); + $this->part_vals []= $elem1432; } $xfer += $input->readListEnd(); } else { @@ -37970,9 +38449,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1426) + foreach ($this->part_vals as $iter1433) { - $xfer += $output->writeString($iter1426); + $xfer += $output->writeString($iter1433); } } $output->writeListEnd(); @@ -38426,14 +38905,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1427 = 0; - $_etype1430 = 0; - $xfer += $input->readListBegin($_etype1430, $_size1427); - for ($_i1431 = 0; $_i1431 < $_size1427; ++$_i1431) + $_size1434 = 0; + $_etype1437 = 0; + $xfer += $input->readListBegin($_etype1437, $_size1434); + for ($_i1438 = 0; $_i1438 < $_size1434; ++$_i1438) { - $elem1432 = null; - $xfer += $input->readString($elem1432); - $this->success []= $elem1432; + $elem1439 = null; + $xfer += $input->readString($elem1439); + $this->success []= $elem1439; } $xfer += $input->readListEnd(); } else { @@ -38469,9 +38948,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1433) + foreach ($this->success as $iter1440) { - $xfer += $output->writeString($iter1433); + $xfer += $output->writeString($iter1440); } } $output->writeListEnd(); @@ -38631,17 +39110,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1434 = 0; - $_ktype1435 = 0; - $_vtype1436 = 0; - $xfer += $input->readMapBegin($_ktype1435, $_vtype1436, $_size1434); - for ($_i1438 = 0; $_i1438 < $_size1434; ++$_i1438) + $_size1441 = 0; + $_ktype1442 = 0; + $_vtype1443 = 0; + $xfer += $input->readMapBegin($_ktype1442, $_vtype1443, $_size1441); + for ($_i1445 = 0; $_i1445 < $_size1441; ++$_i1445) { - $key1439 = ''; - $val1440 = ''; - $xfer += $input->readString($key1439); - $xfer += $input->readString($val1440); - $this->success[$key1439] = $val1440; + $key1446 = ''; + $val1447 = ''; + $xfer += $input->readString($key1446); + $xfer += $input->readString($val1447); + $this->success[$key1446] = $val1447; } $xfer += $input->readMapEnd(); } else { @@ -38677,10 +39156,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter1441 => $viter1442) + foreach ($this->success as $kiter1448 => $viter1449) { - $xfer += $output->writeString($kiter1441); - $xfer += $output->writeString($viter1442); + $xfer += $output->writeString($kiter1448); + $xfer += $output->writeString($viter1449); } } $output->writeMapEnd(); @@ -38800,17 +39279,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1443 = 0; - $_ktype1444 = 0; - $_vtype1445 = 0; - $xfer += $input->readMapBegin($_ktype1444, $_vtype1445, $_size1443); - for ($_i1447 = 0; $_i1447 < $_size1443; ++$_i1447) + $_size1450 = 0; + $_ktype1451 = 0; + $_vtype1452 = 0; + $xfer += $input->readMapBegin($_ktype1451, $_vtype1452, $_size1450); + for ($_i1454 = 0; $_i1454 < $_size1450; ++$_i1454) { - $key1448 = ''; - $val1449 = ''; - $xfer += $input->readString($key1448); - $xfer += $input->readString($val1449); - $this->part_vals[$key1448] = $val1449; + $key1455 = ''; + $val1456 = ''; + $xfer += $input->readString($key1455); + $xfer += $input->readString($val1456); + $this->part_vals[$key1455] = $val1456; } $xfer += $input->readMapEnd(); } else { @@ -38855,10 +39334,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1450 => $viter1451) + foreach ($this->part_vals as $kiter1457 => $viter1458) { - $xfer += $output->writeString($kiter1450); - $xfer += $output->writeString($viter1451); + $xfer += $output->writeString($kiter1457); + $xfer += $output->writeString($viter1458); } } $output->writeMapEnd(); @@ -39180,17 +39659,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1452 = 0; - $_ktype1453 = 0; - $_vtype1454 = 0; - $xfer += $input->readMapBegin($_ktype1453, $_vtype1454, $_size1452); - for ($_i1456 = 0; $_i1456 < $_size1452; ++$_i1456) + $_size1459 = 0; + $_ktype1460 = 0; + $_vtype1461 = 0; + $xfer += $input->readMapBegin($_ktype1460, $_vtype1461, $_size1459); + for ($_i1463 = 0; $_i1463 < $_size1459; ++$_i1463) { - $key1457 = ''; - $val1458 = ''; - $xfer += $input->readString($key1457); - $xfer += $input->readString($val1458); - $this->part_vals[$key1457] = $val1458; + $key1464 = ''; + $val1465 = ''; + $xfer += $input->readString($key1464); + $xfer += $input->readString($val1465); + $this->part_vals[$key1464] = $val1465; } $xfer += $input->readMapEnd(); } else { @@ -39235,10 +39714,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1459 => $viter1460) + foreach ($this->part_vals as $kiter1466 => $viter1467) { - $xfer += $output->writeString($kiter1459); - $xfer += $output->writeString($viter1460); + $xfer += $output->writeString($kiter1466); + $xfer += $output->writeString($viter1467); } } $output->writeMapEnd(); @@ -41787,6 +42266,10 @@ class ThriftHiveMetastore_get_table_column_statistics_args { * @var string */ public $col_name = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -41803,6 +42286,10 @@ class ThriftHiveMetastore_get_table_column_statistics_args { 'var' => 'col_name', 'type' => TType::STRING, ), + 4 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -41815,6 +42302,9 @@ class ThriftHiveMetastore_get_table_column_statistics_args { if (isset($vals['col_name'])) { $this->col_name = $vals['col_name']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -41858,6 +42348,13 @@ class ThriftHiveMetastore_get_table_column_statistics_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -41886,6 +42383,11 @@ class ThriftHiveMetastore_get_table_column_statistics_args { $xfer += $output->writeString($this->col_name); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -42092,6 +42594,10 @@ class ThriftHiveMetastore_get_partition_column_statistics_args { * @var string */ public $col_name = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -42112,6 +42618,10 @@ class ThriftHiveMetastore_get_partition_column_statistics_args { 'var' => 'col_name', 'type' => TType::STRING, ), + 5 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -42127,6 +42637,9 @@ class ThriftHiveMetastore_get_partition_column_statistics_args { if (isset($vals['col_name'])) { $this->col_name = $vals['col_name']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -42177,6 +42690,13 @@ class ThriftHiveMetastore_get_partition_column_statistics_args { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -42210,6 +42730,11 @@ class ThriftHiveMetastore_get_partition_column_statistics_args { $xfer += $output->writeString($this->col_name); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 5); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -44717,14 +45242,14 @@ class ThriftHiveMetastore_get_functions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1461 = 0; - $_etype1464 = 0; - $xfer += $input->readListBegin($_etype1464, $_size1461); - for ($_i1465 = 0; $_i1465 < $_size1461; ++$_i1465) + $_size1468 = 0; + $_etype1471 = 0; + $xfer += $input->readListBegin($_etype1471, $_size1468); + for ($_i1472 = 0; $_i1472 < $_size1468; ++$_i1472) { - $elem1466 = null; - $xfer += $input->readString($elem1466); - $this->success []= $elem1466; + $elem1473 = null; + $xfer += $input->readString($elem1473); + $this->success []= $elem1473; } $xfer += $input->readListEnd(); } else { @@ -44760,9 +45285,9 @@ class ThriftHiveMetastore_get_functions_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1467) + foreach ($this->success as $iter1474) { - $xfer += $output->writeString($iter1467); + $xfer += $output->writeString($iter1474); } } $output->writeListEnd(); @@ -45631,14 +46156,14 @@ class ThriftHiveMetastore_get_role_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1468 = 0; - $_etype1471 = 0; - $xfer += $input->readListBegin($_etype1471, $_size1468); - for ($_i1472 = 0; $_i1472 < $_size1468; ++$_i1472) + $_size1475 = 0; + $_etype1478 = 0; + $xfer += $input->readListBegin($_etype1478, $_size1475); + for ($_i1479 = 0; $_i1479 < $_size1475; ++$_i1479) { - $elem1473 = null; - $xfer += $input->readString($elem1473); - $this->success []= $elem1473; + $elem1480 = null; + $xfer += $input->readString($elem1480); + $this->success []= $elem1480; } $xfer += $input->readListEnd(); } else { @@ -45674,9 +46199,9 @@ class ThriftHiveMetastore_get_role_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1474) + foreach ($this->success as $iter1481) { - $xfer += $output->writeString($iter1474); + $xfer += $output->writeString($iter1481); } } $output->writeListEnd(); @@ -46367,15 +46892,15 @@ class ThriftHiveMetastore_list_roles_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1475 = 0; - $_etype1478 = 0; - $xfer += $input->readListBegin($_etype1478, $_size1475); - for ($_i1479 = 0; $_i1479 < $_size1475; ++$_i1479) + $_size1482 = 0; + $_etype1485 = 0; + $xfer += $input->readListBegin($_etype1485, $_size1482); + for ($_i1486 = 0; $_i1486 < $_size1482; ++$_i1486) { - $elem1480 = null; - $elem1480 = new \metastore\Role(); - $xfer += $elem1480->read($input); - $this->success []= $elem1480; + $elem1487 = null; + $elem1487 = new \metastore\Role(); + $xfer += $elem1487->read($input); + $this->success []= $elem1487; } $xfer += $input->readListEnd(); } else { @@ -46411,9 +46936,9 @@ class ThriftHiveMetastore_list_roles_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1481) + foreach ($this->success as $iter1488) { - $xfer += $iter1481->write($output); + $xfer += $iter1488->write($output); } } $output->writeListEnd(); @@ -47075,14 +47600,14 @@ class ThriftHiveMetastore_get_privilege_set_args { case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1482 = 0; - $_etype1485 = 0; - $xfer += $input->readListBegin($_etype1485, $_size1482); - for ($_i1486 = 0; $_i1486 < $_size1482; ++$_i1486) + $_size1489 = 0; + $_etype1492 = 0; + $xfer += $input->readListBegin($_etype1492, $_size1489); + for ($_i1493 = 0; $_i1493 < $_size1489; ++$_i1493) { - $elem1487 = null; - $xfer += $input->readString($elem1487); - $this->group_names []= $elem1487; + $elem1494 = null; + $xfer += $input->readString($elem1494); + $this->group_names []= $elem1494; } $xfer += $input->readListEnd(); } else { @@ -47123,9 +47648,9 @@ class ThriftHiveMetastore_get_privilege_set_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1488) + foreach ($this->group_names as $iter1495) { - $xfer += $output->writeString($iter1488); + $xfer += $output->writeString($iter1495); } } $output->writeListEnd(); @@ -47433,15 +47958,15 @@ class ThriftHiveMetastore_list_privileges_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1489 = 0; - $_etype1492 = 0; - $xfer += $input->readListBegin($_etype1492, $_size1489); - for ($_i1493 = 0; $_i1493 < $_size1489; ++$_i1493) + $_size1496 = 0; + $_etype1499 = 0; + $xfer += $input->readListBegin($_etype1499, $_size1496); + for ($_i1500 = 0; $_i1500 < $_size1496; ++$_i1500) { - $elem1494 = null; - $elem1494 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem1494->read($input); - $this->success []= $elem1494; + $elem1501 = null; + $elem1501 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem1501->read($input); + $this->success []= $elem1501; } $xfer += $input->readListEnd(); } else { @@ -47477,9 +48002,9 @@ class ThriftHiveMetastore_list_privileges_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1495) + foreach ($this->success as $iter1502) { - $xfer += $iter1495->write($output); + $xfer += $iter1502->write($output); } } $output->writeListEnd(); @@ -48347,14 +48872,14 @@ class ThriftHiveMetastore_set_ugi_args { case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1496 = 0; - $_etype1499 = 0; - $xfer += $input->readListBegin($_etype1499, $_size1496); - for ($_i1500 = 0; $_i1500 < $_size1496; ++$_i1500) + $_size1503 = 0; + $_etype1506 = 0; + $xfer += $input->readListBegin($_etype1506, $_size1503); + for ($_i1507 = 0; $_i1507 < $_size1503; ++$_i1507) { - $elem1501 = null; - $xfer += $input->readString($elem1501); - $this->group_names []= $elem1501; + $elem1508 = null; + $xfer += $input->readString($elem1508); + $this->group_names []= $elem1508; } $xfer += $input->readListEnd(); } else { @@ -48387,9 +48912,9 @@ class ThriftHiveMetastore_set_ugi_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1502) + foreach ($this->group_names as $iter1509) { - $xfer += $output->writeString($iter1502); + $xfer += $output->writeString($iter1509); } } $output->writeListEnd(); @@ -48465,14 +48990,14 @@ class ThriftHiveMetastore_set_ugi_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1503 = 0; - $_etype1506 = 0; - $xfer += $input->readListBegin($_etype1506, $_size1503); - for ($_i1507 = 0; $_i1507 < $_size1503; ++$_i1507) + $_size1510 = 0; + $_etype1513 = 0; + $xfer += $input->readListBegin($_etype1513, $_size1510); + for ($_i1514 = 0; $_i1514 < $_size1510; ++$_i1514) { - $elem1508 = null; - $xfer += $input->readString($elem1508); - $this->success []= $elem1508; + $elem1515 = null; + $xfer += $input->readString($elem1515); + $this->success []= $elem1515; } $xfer += $input->readListEnd(); } else { @@ -48508,9 +49033,9 @@ class ThriftHiveMetastore_set_ugi_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1509) + foreach ($this->success as $iter1516) { - $xfer += $output->writeString($iter1509); + $xfer += $output->writeString($iter1516); } } $output->writeListEnd(); @@ -49627,14 +50152,14 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1510 = 0; - $_etype1513 = 0; - $xfer += $input->readListBegin($_etype1513, $_size1510); - for ($_i1514 = 0; $_i1514 < $_size1510; ++$_i1514) + $_size1517 = 0; + $_etype1520 = 0; + $xfer += $input->readListBegin($_etype1520, $_size1517); + for ($_i1521 = 0; $_i1521 < $_size1517; ++$_i1521) { - $elem1515 = null; - $xfer += $input->readString($elem1515); - $this->success []= $elem1515; + $elem1522 = null; + $xfer += $input->readString($elem1522); + $this->success []= $elem1522; } $xfer += $input->readListEnd(); } else { @@ -49662,9 +50187,9 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1516) + foreach ($this->success as $iter1523) { - $xfer += $output->writeString($iter1516); + $xfer += $output->writeString($iter1523); } } $output->writeListEnd(); @@ -50303,14 +50828,14 @@ class ThriftHiveMetastore_get_master_keys_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1517 = 0; - $_etype1520 = 0; - $xfer += $input->readListBegin($_etype1520, $_size1517); - for ($_i1521 = 0; $_i1521 < $_size1517; ++$_i1521) + $_size1524 = 0; + $_etype1527 = 0; + $xfer += $input->readListBegin($_etype1527, $_size1524); + for ($_i1528 = 0; $_i1528 < $_size1524; ++$_i1528) { - $elem1522 = null; - $xfer += $input->readString($elem1522); - $this->success []= $elem1522; + $elem1529 = null; + $xfer += $input->readString($elem1529); + $this->success []= $elem1529; } $xfer += $input->readListEnd(); } else { @@ -50338,9 +50863,9 @@ class ThriftHiveMetastore_get_master_keys_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1523) + foreach ($this->success as $iter1530) { - $xfer += $output->writeString($iter1523); + $xfer += $output->writeString($iter1530); } } $output->writeListEnd(); @@ -54094,14 +54619,14 @@ class ThriftHiveMetastore_find_columns_with_stats_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1524 = 0; - $_etype1527 = 0; - $xfer += $input->readListBegin($_etype1527, $_size1524); - for ($_i1528 = 0; $_i1528 < $_size1524; ++$_i1528) + $_size1531 = 0; + $_etype1534 = 0; + $xfer += $input->readListBegin($_etype1534, $_size1531); + for ($_i1535 = 0; $_i1535 < $_size1531; ++$_i1535) { - $elem1529 = null; - $xfer += $input->readString($elem1529); - $this->success []= $elem1529; + $elem1536 = null; + $xfer += $input->readString($elem1536); + $this->success []= $elem1536; } $xfer += $input->readListEnd(); } else { @@ -54129,9 +54654,9 @@ class ThriftHiveMetastore_find_columns_with_stats_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1530) + foreach ($this->success as $iter1537) { - $xfer += $output->writeString($iter1530); + $xfer += $output->writeString($iter1537); } } $output->writeListEnd(); @@ -62302,15 +62827,15 @@ class ThriftHiveMetastore_get_schema_all_versions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1531 = 0; - $_etype1534 = 0; - $xfer += $input->readListBegin($_etype1534, $_size1531); - for ($_i1535 = 0; $_i1535 < $_size1531; ++$_i1535) + $_size1538 = 0; + $_etype1541 = 0; + $xfer += $input->readListBegin($_etype1541, $_size1538); + for ($_i1542 = 0; $_i1542 < $_size1538; ++$_i1542) { - $elem1536 = null; - $elem1536 = new \metastore\SchemaVersion(); - $xfer += $elem1536->read($input); - $this->success []= $elem1536; + $elem1543 = null; + $elem1543 = new \metastore\SchemaVersion(); + $xfer += $elem1543->read($input); + $this->success []= $elem1543; } $xfer += $input->readListEnd(); } else { @@ -62354,9 +62879,9 @@ class ThriftHiveMetastore_get_schema_all_versions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1537) + foreach ($this->success as $iter1544) { - $xfer += $iter1537->write($output); + $xfer += $iter1544->write($output); } } $output->writeListEnd(); @@ -64225,15 +64750,15 @@ class ThriftHiveMetastore_get_runtime_stats_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1538 = 0; - $_etype1541 = 0; - $xfer += $input->readListBegin($_etype1541, $_size1538); - for ($_i1542 = 0; $_i1542 < $_size1538; ++$_i1542) + $_size1545 = 0; + $_etype1548 = 0; + $xfer += $input->readListBegin($_etype1548, $_size1545); + for ($_i1549 = 0; $_i1549 < $_size1545; ++$_i1549) { - $elem1543 = null; - $elem1543 = new \metastore\RuntimeStat(); - $xfer += $elem1543->read($input); - $this->success []= $elem1543; + $elem1550 = null; + $elem1550 = new \metastore\RuntimeStat(); + $xfer += $elem1550->read($input); + $this->success []= $elem1550; } $xfer += $input->readListEnd(); } else { @@ -64269,9 +64794,9 @@ class ThriftHiveMetastore_get_runtime_stats_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1544) + foreach ($this->success as $iter1551) { - $xfer += $iter1544->write($output); + $xfer += $iter1551->write($output); } } $output->writeListEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php index 6e1d41b8ae..fbe8192038 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php @@ -13893,6 +13893,10 @@ class PartitionsByExprRequest { * @var string */ public $catName = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13921,6 +13925,10 @@ class PartitionsByExprRequest { 'var' => 'catName', 'type' => TType::STRING, ), + 7 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -13942,6 +13950,9 @@ class PartitionsByExprRequest { if (isset($vals['catName'])) { $this->catName = $vals['catName']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -14006,6 +14017,13 @@ class PartitionsByExprRequest { $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -14049,6 +14067,11 @@ class PartitionsByExprRequest { $xfer += $output->writeString($this->catName); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 7); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -15802,6 +15825,10 @@ class PartitionValuesRequest { * @var string */ public $catName = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -15852,6 +15879,10 @@ class PartitionValuesRequest { 'var' => 'catName', 'type' => TType::STRING, ), + 10 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -15882,6 +15913,9 @@ class PartitionValuesRequest { if (isset($vals['catName'])) { $this->catName = $vals['catName']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -15989,6 +16023,13 @@ class PartitionValuesRequest { $xfer += $input->skip($ftype); } break; + case 10: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -16071,6 +16112,11 @@ class PartitionValuesRequest { $xfer += $output->writeString($this->catName); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 10); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -16309,6 +16355,10 @@ class GetPartitionsByNamesRequest { * @var string */ public $processorIdentifier = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -16345,6 +16395,10 @@ class GetPartitionsByNamesRequest { 'var' => 'processorIdentifier', 'type' => TType::STRING, ), + 7 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -16366,6 +16420,9 @@ class GetPartitionsByNamesRequest { if (isset($vals['processorIdentifier'])) { $this->processorIdentifier = $vals['processorIdentifier']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -16450,6 +16507,13 @@ class GetPartitionsByNamesRequest { $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -16517,6 +16581,11 @@ class GetPartitionsByNamesRequest { $xfer += $output->writeString($this->processorIdentifier); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 7); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -19394,6 +19463,207 @@ class TableValidWriteIds { } +class TableWriteId { + static $_TSPEC; + + /** + * @var string + */ + public $fullTableName = null; + /** + * @var int + */ + public $writeId = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'fullTableName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['fullTableName'])) { + $this->fullTableName = $vals['fullTableName']; + } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } + } + } + + public function getName() { + return 'TableWriteId'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->fullTableName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('TableWriteId'); + if ($this->fullTableName !== null) { + $xfer += $output->writeFieldBegin('fullTableName', TType::STRING, 1); + $xfer += $output->writeString($this->fullTableName); + $xfer += $output->writeFieldEnd(); + } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 2); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetTxnTableWriteIdsResponse { + static $_TSPEC; + + /** + * @var \metastore\TableWriteId[] + */ + public $tableWriteIds = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'tableWriteIds', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\TableWriteId', + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['tableWriteIds'])) { + $this->tableWriteIds = $vals['tableWriteIds']; + } + } + } + + public function getName() { + return 'GetTxnTableWriteIdsResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->tableWriteIds = array(); + $_size608 = 0; + $_etype611 = 0; + $xfer += $input->readListBegin($_etype611, $_size608); + for ($_i612 = 0; $_i612 < $_size608; ++$_i612) + { + $elem613 = null; + $elem613 = new \metastore\TableWriteId(); + $xfer += $elem613->read($input); + $this->tableWriteIds []= $elem613; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetTxnTableWriteIdsResponse'); + if ($this->tableWriteIds !== null) { + if (!is_array($this->tableWriteIds)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('tableWriteIds', TType::LST, 1); + { + $output->writeListBegin(TType::STRUCT, count($this->tableWriteIds)); + { + foreach ($this->tableWriteIds as $iter614) + { + $xfer += $iter614->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class GetValidWriteIdsResponse { static $_TSPEC; @@ -19445,15 +19715,15 @@ class GetValidWriteIdsResponse { case 1: if ($ftype == TType::LST) { $this->tblValidWriteIds = array(); - $_size608 = 0; - $_etype611 = 0; - $xfer += $input->readListBegin($_etype611, $_size608); - for ($_i612 = 0; $_i612 < $_size608; ++$_i612) + $_size615 = 0; + $_etype618 = 0; + $xfer += $input->readListBegin($_etype618, $_size615); + for ($_i619 = 0; $_i619 < $_size615; ++$_i619) { - $elem613 = null; - $elem613 = new \metastore\TableValidWriteIds(); - $xfer += $elem613->read($input); - $this->tblValidWriteIds []= $elem613; + $elem620 = null; + $elem620 = new \metastore\TableValidWriteIds(); + $xfer += $elem620->read($input); + $this->tblValidWriteIds []= $elem620; } $xfer += $input->readListEnd(); } else { @@ -19481,9 +19751,9 @@ class GetValidWriteIdsResponse { { $output->writeListBegin(TType::STRUCT, count($this->tblValidWriteIds)); { - foreach ($this->tblValidWriteIds as $iter614) + foreach ($this->tblValidWriteIds as $iter621) { - $xfer += $iter614->write($output); + $xfer += $iter621->write($output); } } $output->writeListEnd(); @@ -19708,14 +19978,14 @@ class AllocateTableWriteIdsRequest { case 3: if ($ftype == TType::LST) { $this->txnIds = array(); - $_size615 = 0; - $_etype618 = 0; - $xfer += $input->readListBegin($_etype618, $_size615); - for ($_i619 = 0; $_i619 < $_size615; ++$_i619) + $_size622 = 0; + $_etype625 = 0; + $xfer += $input->readListBegin($_etype625, $_size622); + for ($_i626 = 0; $_i626 < $_size622; ++$_i626) { - $elem620 = null; - $xfer += $input->readI64($elem620); - $this->txnIds []= $elem620; + $elem627 = null; + $xfer += $input->readI64($elem627); + $this->txnIds []= $elem627; } $xfer += $input->readListEnd(); } else { @@ -19732,15 +20002,15 @@ class AllocateTableWriteIdsRequest { case 5: if ($ftype == TType::LST) { $this->srcTxnToWriteIdList = array(); - $_size621 = 0; - $_etype624 = 0; - $xfer += $input->readListBegin($_etype624, $_size621); - for ($_i625 = 0; $_i625 < $_size621; ++$_i625) + $_size628 = 0; + $_etype631 = 0; + $xfer += $input->readListBegin($_etype631, $_size628); + for ($_i632 = 0; $_i632 < $_size628; ++$_i632) { - $elem626 = null; - $elem626 = new \metastore\TxnToWriteId(); - $xfer += $elem626->read($input); - $this->srcTxnToWriteIdList []= $elem626; + $elem633 = null; + $elem633 = new \metastore\TxnToWriteId(); + $xfer += $elem633->read($input); + $this->srcTxnToWriteIdList []= $elem633; } $xfer += $input->readListEnd(); } else { @@ -19778,9 +20048,9 @@ class AllocateTableWriteIdsRequest { { $output->writeListBegin(TType::I64, count($this->txnIds)); { - foreach ($this->txnIds as $iter627) + foreach ($this->txnIds as $iter634) { - $xfer += $output->writeI64($iter627); + $xfer += $output->writeI64($iter634); } } $output->writeListEnd(); @@ -19800,9 +20070,9 @@ class AllocateTableWriteIdsRequest { { $output->writeListBegin(TType::STRUCT, count($this->srcTxnToWriteIdList)); { - foreach ($this->srcTxnToWriteIdList as $iter628) + foreach ($this->srcTxnToWriteIdList as $iter635) { - $xfer += $iter628->write($output); + $xfer += $iter635->write($output); } } $output->writeListEnd(); @@ -19867,15 +20137,15 @@ class AllocateTableWriteIdsResponse { case 1: if ($ftype == TType::LST) { $this->txnToWriteIds = array(); - $_size629 = 0; - $_etype632 = 0; - $xfer += $input->readListBegin($_etype632, $_size629); - for ($_i633 = 0; $_i633 < $_size629; ++$_i633) + $_size636 = 0; + $_etype639 = 0; + $xfer += $input->readListBegin($_etype639, $_size636); + for ($_i640 = 0; $_i640 < $_size636; ++$_i640) { - $elem634 = null; - $elem634 = new \metastore\TxnToWriteId(); - $xfer += $elem634->read($input); - $this->txnToWriteIds []= $elem634; + $elem641 = null; + $elem641 = new \metastore\TxnToWriteId(); + $xfer += $elem641->read($input); + $this->txnToWriteIds []= $elem641; } $xfer += $input->readListEnd(); } else { @@ -19903,9 +20173,9 @@ class AllocateTableWriteIdsResponse { { $output->writeListBegin(TType::STRUCT, count($this->txnToWriteIds)); { - foreach ($this->txnToWriteIds as $iter635) + foreach ($this->txnToWriteIds as $iter642) { - $xfer += $iter635->write($output); + $xfer += $iter642->write($output); } } $output->writeListEnd(); @@ -20250,15 +20520,15 @@ class LockRequest { case 1: if ($ftype == TType::LST) { $this->component = array(); - $_size636 = 0; - $_etype639 = 0; - $xfer += $input->readListBegin($_etype639, $_size636); - for ($_i640 = 0; $_i640 < $_size636; ++$_i640) + $_size643 = 0; + $_etype646 = 0; + $xfer += $input->readListBegin($_etype646, $_size643); + for ($_i647 = 0; $_i647 < $_size643; ++$_i647) { - $elem641 = null; - $elem641 = new \metastore\LockComponent(); - $xfer += $elem641->read($input); - $this->component []= $elem641; + $elem648 = null; + $elem648 = new \metastore\LockComponent(); + $xfer += $elem648->read($input); + $this->component []= $elem648; } $xfer += $input->readListEnd(); } else { @@ -20314,9 +20584,9 @@ class LockRequest { { $output->writeListBegin(TType::STRUCT, count($this->component)); { - foreach ($this->component as $iter642) + foreach ($this->component as $iter649) { - $xfer += $iter642->write($output); + $xfer += $iter649->write($output); } } $output->writeListEnd(); @@ -21259,15 +21529,15 @@ class ShowLocksResponse { case 1: if ($ftype == TType::LST) { $this->locks = array(); - $_size643 = 0; - $_etype646 = 0; - $xfer += $input->readListBegin($_etype646, $_size643); - for ($_i647 = 0; $_i647 < $_size643; ++$_i647) + $_size650 = 0; + $_etype653 = 0; + $xfer += $input->readListBegin($_etype653, $_size650); + for ($_i654 = 0; $_i654 < $_size650; ++$_i654) { - $elem648 = null; - $elem648 = new \metastore\ShowLocksResponseElement(); - $xfer += $elem648->read($input); - $this->locks []= $elem648; + $elem655 = null; + $elem655 = new \metastore\ShowLocksResponseElement(); + $xfer += $elem655->read($input); + $this->locks []= $elem655; } $xfer += $input->readListEnd(); } else { @@ -21295,9 +21565,9 @@ class ShowLocksResponse { { $output->writeListBegin(TType::STRUCT, count($this->locks)); { - foreach ($this->locks as $iter649) + foreach ($this->locks as $iter656) { - $xfer += $iter649->write($output); + $xfer += $iter656->write($output); } } $output->writeListEnd(); @@ -21572,17 +21842,17 @@ class HeartbeatTxnRangeResponse { case 1: if ($ftype == TType::SET) { $this->aborted = array(); - $_size650 = 0; - $_etype653 = 0; - $xfer += $input->readSetBegin($_etype653, $_size650); - for ($_i654 = 0; $_i654 < $_size650; ++$_i654) + $_size657 = 0; + $_etype660 = 0; + $xfer += $input->readSetBegin($_etype660, $_size657); + for ($_i661 = 0; $_i661 < $_size657; ++$_i661) { - $elem655 = null; - $xfer += $input->readI64($elem655); - if (is_scalar($elem655)) { - $this->aborted[$elem655] = true; + $elem662 = null; + $xfer += $input->readI64($elem662); + if (is_scalar($elem662)) { + $this->aborted[$elem662] = true; } else { - $this->aborted []= $elem655; + $this->aborted []= $elem662; } } $xfer += $input->readSetEnd(); @@ -21593,17 +21863,17 @@ class HeartbeatTxnRangeResponse { case 2: if ($ftype == TType::SET) { $this->nosuch = array(); - $_size656 = 0; - $_etype659 = 0; - $xfer += $input->readSetBegin($_etype659, $_size656); - for ($_i660 = 0; $_i660 < $_size656; ++$_i660) + $_size663 = 0; + $_etype666 = 0; + $xfer += $input->readSetBegin($_etype666, $_size663); + for ($_i667 = 0; $_i667 < $_size663; ++$_i667) { - $elem661 = null; - $xfer += $input->readI64($elem661); - if (is_scalar($elem661)) { - $this->nosuch[$elem661] = true; + $elem668 = null; + $xfer += $input->readI64($elem668); + if (is_scalar($elem668)) { + $this->nosuch[$elem668] = true; } else { - $this->nosuch []= $elem661; + $this->nosuch []= $elem668; } } $xfer += $input->readSetEnd(); @@ -21632,12 +21902,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->aborted)); { - foreach ($this->aborted as $iter662 => $iter663) + foreach ($this->aborted as $iter669 => $iter670) { - if (is_scalar($iter663)) { - $xfer += $output->writeI64($iter662); + if (is_scalar($iter670)) { + $xfer += $output->writeI64($iter669); } else { - $xfer += $output->writeI64($iter663); + $xfer += $output->writeI64($iter670); } } } @@ -21653,12 +21923,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->nosuch)); { - foreach ($this->nosuch as $iter664 => $iter665) + foreach ($this->nosuch as $iter671 => $iter672) { - if (is_scalar($iter665)) { - $xfer += $output->writeI64($iter664); + if (is_scalar($iter672)) { + $xfer += $output->writeI64($iter671); } else { - $xfer += $output->writeI64($iter665); + $xfer += $output->writeI64($iter672); } } } @@ -21817,17 +22087,17 @@ class CompactionRequest { case 6: if ($ftype == TType::MAP) { $this->properties = array(); - $_size666 = 0; - $_ktype667 = 0; - $_vtype668 = 0; - $xfer += $input->readMapBegin($_ktype667, $_vtype668, $_size666); - for ($_i670 = 0; $_i670 < $_size666; ++$_i670) + $_size673 = 0; + $_ktype674 = 0; + $_vtype675 = 0; + $xfer += $input->readMapBegin($_ktype674, $_vtype675, $_size673); + for ($_i677 = 0; $_i677 < $_size673; ++$_i677) { - $key671 = ''; - $val672 = ''; - $xfer += $input->readString($key671); - $xfer += $input->readString($val672); - $this->properties[$key671] = $val672; + $key678 = ''; + $val679 = ''; + $xfer += $input->readString($key678); + $xfer += $input->readString($val679); + $this->properties[$key678] = $val679; } $xfer += $input->readMapEnd(); } else { @@ -21880,10 +22150,10 @@ class CompactionRequest { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter673 => $viter674) + foreach ($this->properties as $kiter680 => $viter681) { - $xfer += $output->writeString($kiter673); - $xfer += $output->writeString($viter674); + $xfer += $output->writeString($kiter680); + $xfer += $output->writeString($viter681); } } $output->writeMapEnd(); @@ -22878,15 +23148,15 @@ class ShowCompactResponse { case 1: if ($ftype == TType::LST) { $this->compacts = array(); - $_size675 = 0; - $_etype678 = 0; - $xfer += $input->readListBegin($_etype678, $_size675); - for ($_i679 = 0; $_i679 < $_size675; ++$_i679) + $_size682 = 0; + $_etype685 = 0; + $xfer += $input->readListBegin($_etype685, $_size682); + for ($_i686 = 0; $_i686 < $_size682; ++$_i686) { - $elem680 = null; - $elem680 = new \metastore\ShowCompactResponseElement(); - $xfer += $elem680->read($input); - $this->compacts []= $elem680; + $elem687 = null; + $elem687 = new \metastore\ShowCompactResponseElement(); + $xfer += $elem687->read($input); + $this->compacts []= $elem687; } $xfer += $input->readListEnd(); } else { @@ -22914,9 +23184,9 @@ class ShowCompactResponse { { $output->writeListBegin(TType::STRUCT, count($this->compacts)); { - foreach ($this->compacts as $iter681) + foreach ($this->compacts as $iter688) { - $xfer += $iter681->write($output); + $xfer += $iter688->write($output); } } $output->writeListEnd(); @@ -23063,14 +23333,14 @@ class AddDynamicPartitions { case 5: if ($ftype == TType::LST) { $this->partitionnames = array(); - $_size682 = 0; - $_etype685 = 0; - $xfer += $input->readListBegin($_etype685, $_size682); - for ($_i686 = 0; $_i686 < $_size682; ++$_i686) + $_size689 = 0; + $_etype692 = 0; + $xfer += $input->readListBegin($_etype692, $_size689); + for ($_i693 = 0; $_i693 < $_size689; ++$_i693) { - $elem687 = null; - $xfer += $input->readString($elem687); - $this->partitionnames []= $elem687; + $elem694 = null; + $xfer += $input->readString($elem694); + $this->partitionnames []= $elem694; } $xfer += $input->readListEnd(); } else { @@ -23125,9 +23395,9 @@ class AddDynamicPartitions { { $output->writeListBegin(TType::STRING, count($this->partitionnames)); { - foreach ($this->partitionnames as $iter688) + foreach ($this->partitionnames as $iter695) { - $xfer += $output->writeString($iter688); + $xfer += $output->writeString($iter695); } } $output->writeListEnd(); @@ -23422,14 +23692,14 @@ class NotificationEventRequest { case 3: if ($ftype == TType::LST) { $this->eventTypeSkipList = array(); - $_size689 = 0; - $_etype692 = 0; - $xfer += $input->readListBegin($_etype692, $_size689); - for ($_i693 = 0; $_i693 < $_size689; ++$_i693) - { - $elem694 = null; - $xfer += $input->readString($elem694); - $this->eventTypeSkipList []= $elem694; + $_size696 = 0; + $_etype699 = 0; + $xfer += $input->readListBegin($_etype699, $_size696); + for ($_i700 = 0; $_i700 < $_size696; ++$_i700) + { + $elem701 = null; + $xfer += $input->readString($elem701); + $this->eventTypeSkipList []= $elem701; } $xfer += $input->readListEnd(); } else { @@ -23467,9 +23737,9 @@ class NotificationEventRequest { { $output->writeListBegin(TType::STRING, count($this->eventTypeSkipList)); { - foreach ($this->eventTypeSkipList as $iter695) + foreach ($this->eventTypeSkipList as $iter702) { - $xfer += $output->writeString($iter695); + $xfer += $output->writeString($iter702); } } $output->writeListEnd(); @@ -23770,15 +24040,15 @@ class NotificationEventResponse { case 1: if ($ftype == TType::LST) { $this->events = array(); - $_size696 = 0; - $_etype699 = 0; - $xfer += $input->readListBegin($_etype699, $_size696); - for ($_i700 = 0; $_i700 < $_size696; ++$_i700) + $_size703 = 0; + $_etype706 = 0; + $xfer += $input->readListBegin($_etype706, $_size703); + for ($_i707 = 0; $_i707 < $_size703; ++$_i707) { - $elem701 = null; - $elem701 = new \metastore\NotificationEvent(); - $xfer += $elem701->read($input); - $this->events []= $elem701; + $elem708 = null; + $elem708 = new \metastore\NotificationEvent(); + $xfer += $elem708->read($input); + $this->events []= $elem708; } $xfer += $input->readListEnd(); } else { @@ -23806,9 +24076,9 @@ class NotificationEventResponse { { $output->writeListBegin(TType::STRUCT, count($this->events)); { - foreach ($this->events as $iter702) + foreach ($this->events as $iter709) { - $xfer += $iter702->write($output); + $xfer += $iter709->write($output); } } $output->writeListEnd(); @@ -24237,14 +24507,14 @@ class InsertEventRequestData { case 2: if ($ftype == TType::LST) { $this->filesAdded = array(); - $_size703 = 0; - $_etype706 = 0; - $xfer += $input->readListBegin($_etype706, $_size703); - for ($_i707 = 0; $_i707 < $_size703; ++$_i707) + $_size710 = 0; + $_etype713 = 0; + $xfer += $input->readListBegin($_etype713, $_size710); + for ($_i714 = 0; $_i714 < $_size710; ++$_i714) { - $elem708 = null; - $xfer += $input->readString($elem708); - $this->filesAdded []= $elem708; + $elem715 = null; + $xfer += $input->readString($elem715); + $this->filesAdded []= $elem715; } $xfer += $input->readListEnd(); } else { @@ -24254,14 +24524,14 @@ class InsertEventRequestData { case 3: if ($ftype == TType::LST) { $this->filesAddedChecksum = array(); - $_size709 = 0; - $_etype712 = 0; - $xfer += $input->readListBegin($_etype712, $_size709); - for ($_i713 = 0; $_i713 < $_size709; ++$_i713) + $_size716 = 0; + $_etype719 = 0; + $xfer += $input->readListBegin($_etype719, $_size716); + for ($_i720 = 0; $_i720 < $_size716; ++$_i720) { - $elem714 = null; - $xfer += $input->readString($elem714); - $this->filesAddedChecksum []= $elem714; + $elem721 = null; + $xfer += $input->readString($elem721); + $this->filesAddedChecksum []= $elem721; } $xfer += $input->readListEnd(); } else { @@ -24271,14 +24541,14 @@ class InsertEventRequestData { case 4: if ($ftype == TType::LST) { $this->subDirectoryList = array(); - $_size715 = 0; - $_etype718 = 0; - $xfer += $input->readListBegin($_etype718, $_size715); - for ($_i719 = 0; $_i719 < $_size715; ++$_i719) + $_size722 = 0; + $_etype725 = 0; + $xfer += $input->readListBegin($_etype725, $_size722); + for ($_i726 = 0; $_i726 < $_size722; ++$_i726) { - $elem720 = null; - $xfer += $input->readString($elem720); - $this->subDirectoryList []= $elem720; + $elem727 = null; + $xfer += $input->readString($elem727); + $this->subDirectoryList []= $elem727; } $xfer += $input->readListEnd(); } else { @@ -24311,9 +24581,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->filesAdded)); { - foreach ($this->filesAdded as $iter721) + foreach ($this->filesAdded as $iter728) { - $xfer += $output->writeString($iter721); + $xfer += $output->writeString($iter728); } } $output->writeListEnd(); @@ -24328,9 +24598,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->filesAddedChecksum)); { - foreach ($this->filesAddedChecksum as $iter722) + foreach ($this->filesAddedChecksum as $iter729) { - $xfer += $output->writeString($iter722); + $xfer += $output->writeString($iter729); } } $output->writeListEnd(); @@ -24345,9 +24615,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->subDirectoryList)); { - foreach ($this->subDirectoryList as $iter723) + foreach ($this->subDirectoryList as $iter730) { - $xfer += $output->writeString($iter723); + $xfer += $output->writeString($iter730); } } $output->writeListEnd(); @@ -24576,14 +24846,14 @@ class FireEventRequest { case 5: if ($ftype == TType::LST) { $this->partitionVals = array(); - $_size724 = 0; - $_etype727 = 0; - $xfer += $input->readListBegin($_etype727, $_size724); - for ($_i728 = 0; $_i728 < $_size724; ++$_i728) + $_size731 = 0; + $_etype734 = 0; + $xfer += $input->readListBegin($_etype734, $_size731); + for ($_i735 = 0; $_i735 < $_size731; ++$_i735) { - $elem729 = null; - $xfer += $input->readString($elem729); - $this->partitionVals []= $elem729; + $elem736 = null; + $xfer += $input->readString($elem736); + $this->partitionVals []= $elem736; } $xfer += $input->readListEnd(); } else { @@ -24641,9 +24911,9 @@ class FireEventRequest { { $output->writeListBegin(TType::STRING, count($this->partitionVals)); { - foreach ($this->partitionVals as $iter730) + foreach ($this->partitionVals as $iter737) { - $xfer += $output->writeString($iter730); + $xfer += $output->writeString($iter737); } } $output->writeListEnd(); @@ -24854,14 +25124,14 @@ class WriteNotificationLogRequest { case 6: if ($ftype == TType::LST) { $this->partitionVals = array(); - $_size731 = 0; - $_etype734 = 0; - $xfer += $input->readListBegin($_etype734, $_size731); - for ($_i735 = 0; $_i735 < $_size731; ++$_i735) + $_size738 = 0; + $_etype741 = 0; + $xfer += $input->readListBegin($_etype741, $_size738); + for ($_i742 = 0; $_i742 < $_size738; ++$_i742) { - $elem736 = null; - $xfer += $input->readString($elem736); - $this->partitionVals []= $elem736; + $elem743 = null; + $xfer += $input->readString($elem743); + $this->partitionVals []= $elem743; } $xfer += $input->readListEnd(); } else { @@ -24917,9 +25187,9 @@ class WriteNotificationLogRequest { { $output->writeListBegin(TType::STRING, count($this->partitionVals)); { - foreach ($this->partitionVals as $iter737) + foreach ($this->partitionVals as $iter744) { - $xfer += $output->writeString($iter737); + $xfer += $output->writeString($iter744); } } $output->writeListEnd(); @@ -25147,18 +25417,18 @@ class GetFileMetadataByExprResult { case 1: if ($ftype == TType::MAP) { $this->metadata = array(); - $_size738 = 0; - $_ktype739 = 0; - $_vtype740 = 0; - $xfer += $input->readMapBegin($_ktype739, $_vtype740, $_size738); - for ($_i742 = 0; $_i742 < $_size738; ++$_i742) + $_size745 = 0; + $_ktype746 = 0; + $_vtype747 = 0; + $xfer += $input->readMapBegin($_ktype746, $_vtype747, $_size745); + for ($_i749 = 0; $_i749 < $_size745; ++$_i749) { - $key743 = 0; - $val744 = new \metastore\MetadataPpdResult(); - $xfer += $input->readI64($key743); - $val744 = new \metastore\MetadataPpdResult(); - $xfer += $val744->read($input); - $this->metadata[$key743] = $val744; + $key750 = 0; + $val751 = new \metastore\MetadataPpdResult(); + $xfer += $input->readI64($key750); + $val751 = new \metastore\MetadataPpdResult(); + $xfer += $val751->read($input); + $this->metadata[$key750] = $val751; } $xfer += $input->readMapEnd(); } else { @@ -25193,10 +25463,10 @@ class GetFileMetadataByExprResult { { $output->writeMapBegin(TType::I64, TType::STRUCT, count($this->metadata)); { - foreach ($this->metadata as $kiter745 => $viter746) + foreach ($this->metadata as $kiter752 => $viter753) { - $xfer += $output->writeI64($kiter745); - $xfer += $viter746->write($output); + $xfer += $output->writeI64($kiter752); + $xfer += $viter753->write($output); } } $output->writeMapEnd(); @@ -25298,14 +25568,14 @@ class GetFileMetadataByExprRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size747 = 0; - $_etype750 = 0; - $xfer += $input->readListBegin($_etype750, $_size747); - for ($_i751 = 0; $_i751 < $_size747; ++$_i751) + $_size754 = 0; + $_etype757 = 0; + $xfer += $input->readListBegin($_etype757, $_size754); + for ($_i758 = 0; $_i758 < $_size754; ++$_i758) { - $elem752 = null; - $xfer += $input->readI64($elem752); - $this->fileIds []= $elem752; + $elem759 = null; + $xfer += $input->readI64($elem759); + $this->fileIds []= $elem759; } $xfer += $input->readListEnd(); } else { @@ -25354,9 +25624,9 @@ class GetFileMetadataByExprRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter753) + foreach ($this->fileIds as $iter760) { - $xfer += $output->writeI64($iter753); + $xfer += $output->writeI64($iter760); } } $output->writeListEnd(); @@ -25450,17 +25720,17 @@ class GetFileMetadataResult { case 1: if ($ftype == TType::MAP) { $this->metadata = array(); - $_size754 = 0; - $_ktype755 = 0; - $_vtype756 = 0; - $xfer += $input->readMapBegin($_ktype755, $_vtype756, $_size754); - for ($_i758 = 0; $_i758 < $_size754; ++$_i758) + $_size761 = 0; + $_ktype762 = 0; + $_vtype763 = 0; + $xfer += $input->readMapBegin($_ktype762, $_vtype763, $_size761); + for ($_i765 = 0; $_i765 < $_size761; ++$_i765) { - $key759 = 0; - $val760 = ''; - $xfer += $input->readI64($key759); - $xfer += $input->readString($val760); - $this->metadata[$key759] = $val760; + $key766 = 0; + $val767 = ''; + $xfer += $input->readI64($key766); + $xfer += $input->readString($val767); + $this->metadata[$key766] = $val767; } $xfer += $input->readMapEnd(); } else { @@ -25495,10 +25765,10 @@ class GetFileMetadataResult { { $output->writeMapBegin(TType::I64, TType::STRING, count($this->metadata)); { - foreach ($this->metadata as $kiter761 => $viter762) + foreach ($this->metadata as $kiter768 => $viter769) { - $xfer += $output->writeI64($kiter761); - $xfer += $output->writeString($viter762); + $xfer += $output->writeI64($kiter768); + $xfer += $output->writeString($viter769); } } $output->writeMapEnd(); @@ -25567,14 +25837,14 @@ class GetFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size763 = 0; - $_etype766 = 0; - $xfer += $input->readListBegin($_etype766, $_size763); - for ($_i767 = 0; $_i767 < $_size763; ++$_i767) + $_size770 = 0; + $_etype773 = 0; + $xfer += $input->readListBegin($_etype773, $_size770); + for ($_i774 = 0; $_i774 < $_size770; ++$_i774) { - $elem768 = null; - $xfer += $input->readI64($elem768); - $this->fileIds []= $elem768; + $elem775 = null; + $xfer += $input->readI64($elem775); + $this->fileIds []= $elem775; } $xfer += $input->readListEnd(); } else { @@ -25602,9 +25872,9 @@ class GetFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter769) + foreach ($this->fileIds as $iter776) { - $xfer += $output->writeI64($iter769); + $xfer += $output->writeI64($iter776); } } $output->writeListEnd(); @@ -25744,14 +26014,14 @@ class PutFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size770 = 0; - $_etype773 = 0; - $xfer += $input->readListBegin($_etype773, $_size770); - for ($_i774 = 0; $_i774 < $_size770; ++$_i774) + $_size777 = 0; + $_etype780 = 0; + $xfer += $input->readListBegin($_etype780, $_size777); + for ($_i781 = 0; $_i781 < $_size777; ++$_i781) { - $elem775 = null; - $xfer += $input->readI64($elem775); - $this->fileIds []= $elem775; + $elem782 = null; + $xfer += $input->readI64($elem782); + $this->fileIds []= $elem782; } $xfer += $input->readListEnd(); } else { @@ -25761,14 +26031,14 @@ class PutFileMetadataRequest { case 2: if ($ftype == TType::LST) { $this->metadata = array(); - $_size776 = 0; - $_etype779 = 0; - $xfer += $input->readListBegin($_etype779, $_size776); - for ($_i780 = 0; $_i780 < $_size776; ++$_i780) + $_size783 = 0; + $_etype786 = 0; + $xfer += $input->readListBegin($_etype786, $_size783); + for ($_i787 = 0; $_i787 < $_size783; ++$_i787) { - $elem781 = null; - $xfer += $input->readString($elem781); - $this->metadata []= $elem781; + $elem788 = null; + $xfer += $input->readString($elem788); + $this->metadata []= $elem788; } $xfer += $input->readListEnd(); } else { @@ -25803,9 +26073,9 @@ class PutFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter782) + foreach ($this->fileIds as $iter789) { - $xfer += $output->writeI64($iter782); + $xfer += $output->writeI64($iter789); } } $output->writeListEnd(); @@ -25820,9 +26090,9 @@ class PutFileMetadataRequest { { $output->writeListBegin(TType::STRING, count($this->metadata)); { - foreach ($this->metadata as $iter783) + foreach ($this->metadata as $iter790) { - $xfer += $output->writeString($iter783); + $xfer += $output->writeString($iter790); } } $output->writeListEnd(); @@ -25941,14 +26211,14 @@ class ClearFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size784 = 0; - $_etype787 = 0; - $xfer += $input->readListBegin($_etype787, $_size784); - for ($_i788 = 0; $_i788 < $_size784; ++$_i788) + $_size791 = 0; + $_etype794 = 0; + $xfer += $input->readListBegin($_etype794, $_size791); + for ($_i795 = 0; $_i795 < $_size791; ++$_i795) { - $elem789 = null; - $xfer += $input->readI64($elem789); - $this->fileIds []= $elem789; + $elem796 = null; + $xfer += $input->readI64($elem796); + $this->fileIds []= $elem796; } $xfer += $input->readListEnd(); } else { @@ -25976,9 +26246,9 @@ class ClearFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter790) + foreach ($this->fileIds as $iter797) { - $xfer += $output->writeI64($iter790); + $xfer += $output->writeI64($iter797); } } $output->writeListEnd(); @@ -26262,15 +26532,15 @@ class GetAllFunctionsResponse { case 1: if ($ftype == TType::LST) { $this->functions = array(); - $_size791 = 0; - $_etype794 = 0; - $xfer += $input->readListBegin($_etype794, $_size791); - for ($_i795 = 0; $_i795 < $_size791; ++$_i795) + $_size798 = 0; + $_etype801 = 0; + $xfer += $input->readListBegin($_etype801, $_size798); + for ($_i802 = 0; $_i802 < $_size798; ++$_i802) { - $elem796 = null; - $elem796 = new \metastore\Function(); - $xfer += $elem796->read($input); - $this->functions []= $elem796; + $elem803 = null; + $elem803 = new \metastore\Function(); + $xfer += $elem803->read($input); + $this->functions []= $elem803; } $xfer += $input->readListEnd(); } else { @@ -26298,9 +26568,9 @@ class GetAllFunctionsResponse { { $output->writeListBegin(TType::STRUCT, count($this->functions)); { - foreach ($this->functions as $iter797) + foreach ($this->functions as $iter804) { - $xfer += $iter797->write($output); + $xfer += $iter804->write($output); } } $output->writeListEnd(); @@ -26364,14 +26634,14 @@ class ClientCapabilities { case 1: if ($ftype == TType::LST) { $this->values = array(); - $_size798 = 0; - $_etype801 = 0; - $xfer += $input->readListBegin($_etype801, $_size798); - for ($_i802 = 0; $_i802 < $_size798; ++$_i802) + $_size805 = 0; + $_etype808 = 0; + $xfer += $input->readListBegin($_etype808, $_size805); + for ($_i809 = 0; $_i809 < $_size805; ++$_i809) { - $elem803 = null; - $xfer += $input->readI32($elem803); - $this->values []= $elem803; + $elem810 = null; + $xfer += $input->readI32($elem810); + $this->values []= $elem810; } $xfer += $input->readListEnd(); } else { @@ -26399,9 +26669,9 @@ class ClientCapabilities { { $output->writeListBegin(TType::I32, count($this->values)); { - foreach ($this->values as $iter804) + foreach ($this->values as $iter811) { - $xfer += $output->writeI32($iter804); + $xfer += $output->writeI32($iter811); } } $output->writeListEnd(); @@ -26586,14 +26856,14 @@ class GetTableRequest { case 8: if ($ftype == TType::LST) { $this->processorCapabilities = array(); - $_size805 = 0; - $_etype808 = 0; - $xfer += $input->readListBegin($_etype808, $_size805); - for ($_i809 = 0; $_i809 < $_size805; ++$_i809) + $_size812 = 0; + $_etype815 = 0; + $xfer += $input->readListBegin($_etype815, $_size812); + for ($_i816 = 0; $_i816 < $_size812; ++$_i816) { - $elem810 = null; - $xfer += $input->readString($elem810); - $this->processorCapabilities []= $elem810; + $elem817 = null; + $xfer += $input->readString($elem817); + $this->processorCapabilities []= $elem817; } $xfer += $input->readListEnd(); } else { @@ -26661,9 +26931,9 @@ class GetTableRequest { { $output->writeListBegin(TType::STRING, count($this->processorCapabilities)); { - foreach ($this->processorCapabilities as $iter811) + foreach ($this->processorCapabilities as $iter818) { - $xfer += $output->writeString($iter811); + $xfer += $output->writeString($iter818); } } $output->writeListEnd(); @@ -26902,14 +27172,14 @@ class GetTablesRequest { case 2: if ($ftype == TType::LST) { $this->tblNames = array(); - $_size812 = 0; - $_etype815 = 0; - $xfer += $input->readListBegin($_etype815, $_size812); - for ($_i816 = 0; $_i816 < $_size812; ++$_i816) + $_size819 = 0; + $_etype822 = 0; + $xfer += $input->readListBegin($_etype822, $_size819); + for ($_i823 = 0; $_i823 < $_size819; ++$_i823) { - $elem817 = null; - $xfer += $input->readString($elem817); - $this->tblNames []= $elem817; + $elem824 = null; + $xfer += $input->readString($elem824); + $this->tblNames []= $elem824; } $xfer += $input->readListEnd(); } else { @@ -26934,14 +27204,14 @@ class GetTablesRequest { case 5: if ($ftype == TType::LST) { $this->processorCapabilities = array(); - $_size818 = 0; - $_etype821 = 0; - $xfer += $input->readListBegin($_etype821, $_size818); - for ($_i822 = 0; $_i822 < $_size818; ++$_i822) + $_size825 = 0; + $_etype828 = 0; + $xfer += $input->readListBegin($_etype828, $_size825); + for ($_i829 = 0; $_i829 < $_size825; ++$_i829) { - $elem823 = null; - $xfer += $input->readString($elem823); - $this->processorCapabilities []= $elem823; + $elem830 = null; + $xfer += $input->readString($elem830); + $this->processorCapabilities []= $elem830; } $xfer += $input->readListEnd(); } else { @@ -26981,9 +27251,9 @@ class GetTablesRequest { { $output->writeListBegin(TType::STRING, count($this->tblNames)); { - foreach ($this->tblNames as $iter824) + foreach ($this->tblNames as $iter831) { - $xfer += $output->writeString($iter824); + $xfer += $output->writeString($iter831); } } $output->writeListEnd(); @@ -27011,9 +27281,9 @@ class GetTablesRequest { { $output->writeListBegin(TType::STRING, count($this->processorCapabilities)); { - foreach ($this->processorCapabilities as $iter825) + foreach ($this->processorCapabilities as $iter832) { - $xfer += $output->writeString($iter825); + $xfer += $output->writeString($iter832); } } $output->writeListEnd(); @@ -27083,15 +27353,15 @@ class GetTablesResult { case 1: if ($ftype == TType::LST) { $this->tables = array(); - $_size826 = 0; - $_etype829 = 0; - $xfer += $input->readListBegin($_etype829, $_size826); - for ($_i830 = 0; $_i830 < $_size826; ++$_i830) + $_size833 = 0; + $_etype836 = 0; + $xfer += $input->readListBegin($_etype836, $_size833); + for ($_i837 = 0; $_i837 < $_size833; ++$_i837) { - $elem831 = null; - $elem831 = new \metastore\Table(); - $xfer += $elem831->read($input); - $this->tables []= $elem831; + $elem838 = null; + $elem838 = new \metastore\Table(); + $xfer += $elem838->read($input); + $this->tables []= $elem838; } $xfer += $input->readListEnd(); } else { @@ -27119,9 +27389,9 @@ class GetTablesResult { { $output->writeListBegin(TType::STRUCT, count($this->tables)); { - foreach ($this->tables as $iter832) + foreach ($this->tables as $iter839) { - $xfer += $iter832->write($output); + $xfer += $iter839->write($output); } } $output->writeListEnd(); @@ -27286,14 +27556,14 @@ class GetTablesExtRequest { case 6: if ($ftype == TType::LST) { $this->processorCapabilities = array(); - $_size833 = 0; - $_etype836 = 0; - $xfer += $input->readListBegin($_etype836, $_size833); - for ($_i837 = 0; $_i837 < $_size833; ++$_i837) + $_size840 = 0; + $_etype843 = 0; + $xfer += $input->readListBegin($_etype843, $_size840); + for ($_i844 = 0; $_i844 < $_size840; ++$_i844) { - $elem838 = null; - $xfer += $input->readString($elem838); - $this->processorCapabilities []= $elem838; + $elem845 = null; + $xfer += $input->readString($elem845); + $this->processorCapabilities []= $elem845; } $xfer += $input->readListEnd(); } else { @@ -27353,9 +27623,9 @@ class GetTablesExtRequest { { $output->writeListBegin(TType::STRING, count($this->processorCapabilities)); { - foreach ($this->processorCapabilities as $iter839) + foreach ($this->processorCapabilities as $iter846) { - $xfer += $output->writeString($iter839); + $xfer += $output->writeString($iter846); } } $output->writeListEnd(); @@ -27475,14 +27745,14 @@ class ExtendedTableInfo { case 3: if ($ftype == TType::LST) { $this->requiredReadCapabilities = array(); - $_size840 = 0; - $_etype843 = 0; - $xfer += $input->readListBegin($_etype843, $_size840); - for ($_i844 = 0; $_i844 < $_size840; ++$_i844) + $_size847 = 0; + $_etype850 = 0; + $xfer += $input->readListBegin($_etype850, $_size847); + for ($_i851 = 0; $_i851 < $_size847; ++$_i851) { - $elem845 = null; - $xfer += $input->readString($elem845); - $this->requiredReadCapabilities []= $elem845; + $elem852 = null; + $xfer += $input->readString($elem852); + $this->requiredReadCapabilities []= $elem852; } $xfer += $input->readListEnd(); } else { @@ -27492,14 +27762,14 @@ class ExtendedTableInfo { case 4: if ($ftype == TType::LST) { $this->requiredWriteCapabilities = array(); - $_size846 = 0; - $_etype849 = 0; - $xfer += $input->readListBegin($_etype849, $_size846); - for ($_i850 = 0; $_i850 < $_size846; ++$_i850) + $_size853 = 0; + $_etype856 = 0; + $xfer += $input->readListBegin($_etype856, $_size853); + for ($_i857 = 0; $_i857 < $_size853; ++$_i857) { - $elem851 = null; - $xfer += $input->readString($elem851); - $this->requiredWriteCapabilities []= $elem851; + $elem858 = null; + $xfer += $input->readString($elem858); + $this->requiredWriteCapabilities []= $elem858; } $xfer += $input->readListEnd(); } else { @@ -27537,9 +27807,9 @@ class ExtendedTableInfo { { $output->writeListBegin(TType::STRING, count($this->requiredReadCapabilities)); { - foreach ($this->requiredReadCapabilities as $iter852) + foreach ($this->requiredReadCapabilities as $iter859) { - $xfer += $output->writeString($iter852); + $xfer += $output->writeString($iter859); } } $output->writeListEnd(); @@ -27554,9 +27824,9 @@ class ExtendedTableInfo { { $output->writeListBegin(TType::STRING, count($this->requiredWriteCapabilities)); { - foreach ($this->requiredWriteCapabilities as $iter853) + foreach ($this->requiredWriteCapabilities as $iter860) { - $xfer += $output->writeString($iter853); + $xfer += $output->writeString($iter860); } } $output->writeListEnd(); @@ -29363,15 +29633,15 @@ class WMFullResourcePlan { case 2: if ($ftype == TType::LST) { $this->pools = array(); - $_size854 = 0; - $_etype857 = 0; - $xfer += $input->readListBegin($_etype857, $_size854); - for ($_i858 = 0; $_i858 < $_size854; ++$_i858) + $_size861 = 0; + $_etype864 = 0; + $xfer += $input->readListBegin($_etype864, $_size861); + for ($_i865 = 0; $_i865 < $_size861; ++$_i865) { - $elem859 = null; - $elem859 = new \metastore\WMPool(); - $xfer += $elem859->read($input); - $this->pools []= $elem859; + $elem866 = null; + $elem866 = new \metastore\WMPool(); + $xfer += $elem866->read($input); + $this->pools []= $elem866; } $xfer += $input->readListEnd(); } else { @@ -29381,15 +29651,15 @@ class WMFullResourcePlan { case 3: if ($ftype == TType::LST) { $this->mappings = array(); - $_size860 = 0; - $_etype863 = 0; - $xfer += $input->readListBegin($_etype863, $_size860); - for ($_i864 = 0; $_i864 < $_size860; ++$_i864) + $_size867 = 0; + $_etype870 = 0; + $xfer += $input->readListBegin($_etype870, $_size867); + for ($_i871 = 0; $_i871 < $_size867; ++$_i871) { - $elem865 = null; - $elem865 = new \metastore\WMMapping(); - $xfer += $elem865->read($input); - $this->mappings []= $elem865; + $elem872 = null; + $elem872 = new \metastore\WMMapping(); + $xfer += $elem872->read($input); + $this->mappings []= $elem872; } $xfer += $input->readListEnd(); } else { @@ -29399,15 +29669,15 @@ class WMFullResourcePlan { case 4: if ($ftype == TType::LST) { $this->triggers = array(); - $_size866 = 0; - $_etype869 = 0; - $xfer += $input->readListBegin($_etype869, $_size866); - for ($_i870 = 0; $_i870 < $_size866; ++$_i870) + $_size873 = 0; + $_etype876 = 0; + $xfer += $input->readListBegin($_etype876, $_size873); + for ($_i877 = 0; $_i877 < $_size873; ++$_i877) { - $elem871 = null; - $elem871 = new \metastore\WMTrigger(); - $xfer += $elem871->read($input); - $this->triggers []= $elem871; + $elem878 = null; + $elem878 = new \metastore\WMTrigger(); + $xfer += $elem878->read($input); + $this->triggers []= $elem878; } $xfer += $input->readListEnd(); } else { @@ -29417,15 +29687,15 @@ class WMFullResourcePlan { case 5: if ($ftype == TType::LST) { $this->poolTriggers = array(); - $_size872 = 0; - $_etype875 = 0; - $xfer += $input->readListBegin($_etype875, $_size872); - for ($_i876 = 0; $_i876 < $_size872; ++$_i876) + $_size879 = 0; + $_etype882 = 0; + $xfer += $input->readListBegin($_etype882, $_size879); + for ($_i883 = 0; $_i883 < $_size879; ++$_i883) { - $elem877 = null; - $elem877 = new \metastore\WMPoolTrigger(); - $xfer += $elem877->read($input); - $this->poolTriggers []= $elem877; + $elem884 = null; + $elem884 = new \metastore\WMPoolTrigger(); + $xfer += $elem884->read($input); + $this->poolTriggers []= $elem884; } $xfer += $input->readListEnd(); } else { @@ -29461,9 +29731,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->pools)); { - foreach ($this->pools as $iter878) + foreach ($this->pools as $iter885) { - $xfer += $iter878->write($output); + $xfer += $iter885->write($output); } } $output->writeListEnd(); @@ -29478,9 +29748,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->mappings)); { - foreach ($this->mappings as $iter879) + foreach ($this->mappings as $iter886) { - $xfer += $iter879->write($output); + $xfer += $iter886->write($output); } } $output->writeListEnd(); @@ -29495,9 +29765,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->triggers)); { - foreach ($this->triggers as $iter880) + foreach ($this->triggers as $iter887) { - $xfer += $iter880->write($output); + $xfer += $iter887->write($output); } } $output->writeListEnd(); @@ -29512,9 +29782,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->poolTriggers)); { - foreach ($this->poolTriggers as $iter881) + foreach ($this->poolTriggers as $iter888) { - $xfer += $iter881->write($output); + $xfer += $iter888->write($output); } } $output->writeListEnd(); @@ -30140,15 +30410,15 @@ class WMGetAllResourcePlanResponse { case 1: if ($ftype == TType::LST) { $this->resourcePlans = array(); - $_size882 = 0; - $_etype885 = 0; - $xfer += $input->readListBegin($_etype885, $_size882); - for ($_i886 = 0; $_i886 < $_size882; ++$_i886) + $_size889 = 0; + $_etype892 = 0; + $xfer += $input->readListBegin($_etype892, $_size889); + for ($_i893 = 0; $_i893 < $_size889; ++$_i893) { - $elem887 = null; - $elem887 = new \metastore\WMResourcePlan(); - $xfer += $elem887->read($input); - $this->resourcePlans []= $elem887; + $elem894 = null; + $elem894 = new \metastore\WMResourcePlan(); + $xfer += $elem894->read($input); + $this->resourcePlans []= $elem894; } $xfer += $input->readListEnd(); } else { @@ -30176,9 +30446,9 @@ class WMGetAllResourcePlanResponse { { $output->writeListBegin(TType::STRUCT, count($this->resourcePlans)); { - foreach ($this->resourcePlans as $iter888) + foreach ($this->resourcePlans as $iter895) { - $xfer += $iter888->write($output); + $xfer += $iter895->write($output); } } $output->writeListEnd(); @@ -30630,14 +30900,14 @@ class WMValidateResourcePlanResponse { case 1: if ($ftype == TType::LST) { $this->errors = array(); - $_size889 = 0; - $_etype892 = 0; - $xfer += $input->readListBegin($_etype892, $_size889); - for ($_i893 = 0; $_i893 < $_size889; ++$_i893) + $_size896 = 0; + $_etype899 = 0; + $xfer += $input->readListBegin($_etype899, $_size896); + for ($_i900 = 0; $_i900 < $_size896; ++$_i900) { - $elem894 = null; - $xfer += $input->readString($elem894); - $this->errors []= $elem894; + $elem901 = null; + $xfer += $input->readString($elem901); + $this->errors []= $elem901; } $xfer += $input->readListEnd(); } else { @@ -30647,14 +30917,14 @@ class WMValidateResourcePlanResponse { case 2: if ($ftype == TType::LST) { $this->warnings = array(); - $_size895 = 0; - $_etype898 = 0; - $xfer += $input->readListBegin($_etype898, $_size895); - for ($_i899 = 0; $_i899 < $_size895; ++$_i899) + $_size902 = 0; + $_etype905 = 0; + $xfer += $input->readListBegin($_etype905, $_size902); + for ($_i906 = 0; $_i906 < $_size902; ++$_i906) { - $elem900 = null; - $xfer += $input->readString($elem900); - $this->warnings []= $elem900; + $elem907 = null; + $xfer += $input->readString($elem907); + $this->warnings []= $elem907; } $xfer += $input->readListEnd(); } else { @@ -30682,9 +30952,9 @@ class WMValidateResourcePlanResponse { { $output->writeListBegin(TType::STRING, count($this->errors)); { - foreach ($this->errors as $iter901) + foreach ($this->errors as $iter908) { - $xfer += $output->writeString($iter901); + $xfer += $output->writeString($iter908); } } $output->writeListEnd(); @@ -30699,9 +30969,9 @@ class WMValidateResourcePlanResponse { { $output->writeListBegin(TType::STRING, count($this->warnings)); { - foreach ($this->warnings as $iter902) + foreach ($this->warnings as $iter909) { - $xfer += $output->writeString($iter902); + $xfer += $output->writeString($iter909); } } $output->writeListEnd(); @@ -31443,15 +31713,15 @@ class WMGetTriggersForResourePlanResponse { case 1: if ($ftype == TType::LST) { $this->triggers = array(); - $_size903 = 0; - $_etype906 = 0; - $xfer += $input->readListBegin($_etype906, $_size903); - for ($_i907 = 0; $_i907 < $_size903; ++$_i907) + $_size910 = 0; + $_etype913 = 0; + $xfer += $input->readListBegin($_etype913, $_size910); + for ($_i914 = 0; $_i914 < $_size910; ++$_i914) { - $elem908 = null; - $elem908 = new \metastore\WMTrigger(); - $xfer += $elem908->read($input); - $this->triggers []= $elem908; + $elem915 = null; + $elem915 = new \metastore\WMTrigger(); + $xfer += $elem915->read($input); + $this->triggers []= $elem915; } $xfer += $input->readListEnd(); } else { @@ -31479,9 +31749,9 @@ class WMGetTriggersForResourePlanResponse { { $output->writeListBegin(TType::STRUCT, count($this->triggers)); { - foreach ($this->triggers as $iter909) + foreach ($this->triggers as $iter916) { - $xfer += $iter909->write($output); + $xfer += $iter916->write($output); } } $output->writeListEnd(); @@ -33111,15 +33381,15 @@ class SchemaVersion { case 4: if ($ftype == TType::LST) { $this->cols = array(); - $_size910 = 0; - $_etype913 = 0; - $xfer += $input->readListBegin($_etype913, $_size910); - for ($_i914 = 0; $_i914 < $_size910; ++$_i914) + $_size917 = 0; + $_etype920 = 0; + $xfer += $input->readListBegin($_etype920, $_size917); + for ($_i921 = 0; $_i921 < $_size917; ++$_i921) { - $elem915 = null; - $elem915 = new \metastore\FieldSchema(); - $xfer += $elem915->read($input); - $this->cols []= $elem915; + $elem922 = null; + $elem922 = new \metastore\FieldSchema(); + $xfer += $elem922->read($input); + $this->cols []= $elem922; } $xfer += $input->readListEnd(); } else { @@ -33208,9 +33478,9 @@ class SchemaVersion { { $output->writeListBegin(TType::STRUCT, count($this->cols)); { - foreach ($this->cols as $iter916) + foreach ($this->cols as $iter923) { - $xfer += $iter916->write($output); + $xfer += $iter923->write($output); } } $output->writeListEnd(); @@ -33532,15 +33802,15 @@ class FindSchemasByColsResp { case 1: if ($ftype == TType::LST) { $this->schemaVersions = array(); - $_size917 = 0; - $_etype920 = 0; - $xfer += $input->readListBegin($_etype920, $_size917); - for ($_i921 = 0; $_i921 < $_size917; ++$_i921) + $_size924 = 0; + $_etype927 = 0; + $xfer += $input->readListBegin($_etype927, $_size924); + for ($_i928 = 0; $_i928 < $_size924; ++$_i928) { - $elem922 = null; - $elem922 = new \metastore\SchemaVersionDescriptor(); - $xfer += $elem922->read($input); - $this->schemaVersions []= $elem922; + $elem929 = null; + $elem929 = new \metastore\SchemaVersionDescriptor(); + $xfer += $elem929->read($input); + $this->schemaVersions []= $elem929; } $xfer += $input->readListEnd(); } else { @@ -33568,9 +33838,9 @@ class FindSchemasByColsResp { { $output->writeListBegin(TType::STRUCT, count($this->schemaVersions)); { - foreach ($this->schemaVersions as $iter923) + foreach ($this->schemaVersions as $iter930) { - $xfer += $iter923->write($output); + $xfer += $iter930->write($output); } } $output->writeListEnd(); @@ -34281,15 +34551,15 @@ class CreateTableRequest { case 3: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size924 = 0; - $_etype927 = 0; - $xfer += $input->readListBegin($_etype927, $_size924); - for ($_i928 = 0; $_i928 < $_size924; ++$_i928) + $_size931 = 0; + $_etype934 = 0; + $xfer += $input->readListBegin($_etype934, $_size931); + for ($_i935 = 0; $_i935 < $_size931; ++$_i935) { - $elem929 = null; - $elem929 = new \metastore\SQLPrimaryKey(); - $xfer += $elem929->read($input); - $this->primaryKeys []= $elem929; + $elem936 = null; + $elem936 = new \metastore\SQLPrimaryKey(); + $xfer += $elem936->read($input); + $this->primaryKeys []= $elem936; } $xfer += $input->readListEnd(); } else { @@ -34299,15 +34569,15 @@ class CreateTableRequest { case 4: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size930 = 0; - $_etype933 = 0; - $xfer += $input->readListBegin($_etype933, $_size930); - for ($_i934 = 0; $_i934 < $_size930; ++$_i934) + $_size937 = 0; + $_etype940 = 0; + $xfer += $input->readListBegin($_etype940, $_size937); + for ($_i941 = 0; $_i941 < $_size937; ++$_i941) { - $elem935 = null; - $elem935 = new \metastore\SQLForeignKey(); - $xfer += $elem935->read($input); - $this->foreignKeys []= $elem935; + $elem942 = null; + $elem942 = new \metastore\SQLForeignKey(); + $xfer += $elem942->read($input); + $this->foreignKeys []= $elem942; } $xfer += $input->readListEnd(); } else { @@ -34317,15 +34587,15 @@ class CreateTableRequest { case 5: if ($ftype == TType::LST) { $this->uniqueConstraints = array(); - $_size936 = 0; - $_etype939 = 0; - $xfer += $input->readListBegin($_etype939, $_size936); - for ($_i940 = 0; $_i940 < $_size936; ++$_i940) + $_size943 = 0; + $_etype946 = 0; + $xfer += $input->readListBegin($_etype946, $_size943); + for ($_i947 = 0; $_i947 < $_size943; ++$_i947) { - $elem941 = null; - $elem941 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem941->read($input); - $this->uniqueConstraints []= $elem941; + $elem948 = null; + $elem948 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem948->read($input); + $this->uniqueConstraints []= $elem948; } $xfer += $input->readListEnd(); } else { @@ -34335,15 +34605,15 @@ class CreateTableRequest { case 6: if ($ftype == TType::LST) { $this->notNullConstraints = array(); - $_size942 = 0; - $_etype945 = 0; - $xfer += $input->readListBegin($_etype945, $_size942); - for ($_i946 = 0; $_i946 < $_size942; ++$_i946) + $_size949 = 0; + $_etype952 = 0; + $xfer += $input->readListBegin($_etype952, $_size949); + for ($_i953 = 0; $_i953 < $_size949; ++$_i953) { - $elem947 = null; - $elem947 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem947->read($input); - $this->notNullConstraints []= $elem947; + $elem954 = null; + $elem954 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem954->read($input); + $this->notNullConstraints []= $elem954; } $xfer += $input->readListEnd(); } else { @@ -34353,15 +34623,15 @@ class CreateTableRequest { case 7: if ($ftype == TType::LST) { $this->defaultConstraints = array(); - $_size948 = 0; - $_etype951 = 0; - $xfer += $input->readListBegin($_etype951, $_size948); - for ($_i952 = 0; $_i952 < $_size948; ++$_i952) + $_size955 = 0; + $_etype958 = 0; + $xfer += $input->readListBegin($_etype958, $_size955); + for ($_i959 = 0; $_i959 < $_size955; ++$_i959) { - $elem953 = null; - $elem953 = new \metastore\SQLDefaultConstraint(); - $xfer += $elem953->read($input); - $this->defaultConstraints []= $elem953; + $elem960 = null; + $elem960 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem960->read($input); + $this->defaultConstraints []= $elem960; } $xfer += $input->readListEnd(); } else { @@ -34371,15 +34641,15 @@ class CreateTableRequest { case 8: if ($ftype == TType::LST) { $this->checkConstraints = array(); - $_size954 = 0; - $_etype957 = 0; - $xfer += $input->readListBegin($_etype957, $_size954); - for ($_i958 = 0; $_i958 < $_size954; ++$_i958) + $_size961 = 0; + $_etype964 = 0; + $xfer += $input->readListBegin($_etype964, $_size961); + for ($_i965 = 0; $_i965 < $_size961; ++$_i965) { - $elem959 = null; - $elem959 = new \metastore\SQLCheckConstraint(); - $xfer += $elem959->read($input); - $this->checkConstraints []= $elem959; + $elem966 = null; + $elem966 = new \metastore\SQLCheckConstraint(); + $xfer += $elem966->read($input); + $this->checkConstraints []= $elem966; } $xfer += $input->readListEnd(); } else { @@ -34389,14 +34659,14 @@ class CreateTableRequest { case 9: if ($ftype == TType::LST) { $this->processorCapabilities = array(); - $_size960 = 0; - $_etype963 = 0; - $xfer += $input->readListBegin($_etype963, $_size960); - for ($_i964 = 0; $_i964 < $_size960; ++$_i964) + $_size967 = 0; + $_etype970 = 0; + $xfer += $input->readListBegin($_etype970, $_size967); + for ($_i971 = 0; $_i971 < $_size967; ++$_i971) { - $elem965 = null; - $xfer += $input->readString($elem965); - $this->processorCapabilities []= $elem965; + $elem972 = null; + $xfer += $input->readString($elem972); + $this->processorCapabilities []= $elem972; } $xfer += $input->readListEnd(); } else { @@ -34447,9 +34717,9 @@ class CreateTableRequest { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter966) + foreach ($this->primaryKeys as $iter973) { - $xfer += $iter966->write($output); + $xfer += $iter973->write($output); } } $output->writeListEnd(); @@ -34464,9 +34734,9 @@ class CreateTableRequest { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter967) + foreach ($this->foreignKeys as $iter974) { - $xfer += $iter967->write($output); + $xfer += $iter974->write($output); } } $output->writeListEnd(); @@ -34481,9 +34751,9 @@ class CreateTableRequest { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints)); { - foreach ($this->uniqueConstraints as $iter968) + foreach ($this->uniqueConstraints as $iter975) { - $xfer += $iter968->write($output); + $xfer += $iter975->write($output); } } $output->writeListEnd(); @@ -34498,9 +34768,9 @@ class CreateTableRequest { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints)); { - foreach ($this->notNullConstraints as $iter969) + foreach ($this->notNullConstraints as $iter976) { - $xfer += $iter969->write($output); + $xfer += $iter976->write($output); } } $output->writeListEnd(); @@ -34515,9 +34785,9 @@ class CreateTableRequest { { $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints)); { - foreach ($this->defaultConstraints as $iter970) + foreach ($this->defaultConstraints as $iter977) { - $xfer += $iter970->write($output); + $xfer += $iter977->write($output); } } $output->writeListEnd(); @@ -34532,9 +34802,9 @@ class CreateTableRequest { { $output->writeListBegin(TType::STRUCT, count($this->checkConstraints)); { - foreach ($this->checkConstraints as $iter971) + foreach ($this->checkConstraints as $iter978) { - $xfer += $iter971->write($output); + $xfer += $iter978->write($output); } } $output->writeListEnd(); @@ -34549,9 +34819,9 @@ class CreateTableRequest { { $output->writeListBegin(TType::STRING, count($this->processorCapabilities)); { - foreach ($this->processorCapabilities as $iter972) + foreach ($this->processorCapabilities as $iter979) { - $xfer += $output->writeString($iter972); + $xfer += $output->writeString($iter979); } } $output->writeListEnd(); @@ -34709,15 +34979,15 @@ class AlterPartitionsRequest { case 4: if ($ftype == TType::LST) { $this->partitions = array(); - $_size973 = 0; - $_etype976 = 0; - $xfer += $input->readListBegin($_etype976, $_size973); - for ($_i977 = 0; $_i977 < $_size973; ++$_i977) + $_size980 = 0; + $_etype983 = 0; + $xfer += $input->readListBegin($_etype983, $_size980); + for ($_i984 = 0; $_i984 < $_size980; ++$_i984) { - $elem978 = null; - $elem978 = new \metastore\Partition(); - $xfer += $elem978->read($input); - $this->partitions []= $elem978; + $elem985 = null; + $elem985 = new \metastore\Partition(); + $xfer += $elem985->read($input); + $this->partitions []= $elem985; } $xfer += $input->readListEnd(); } else { @@ -34782,9 +35052,9 @@ class AlterPartitionsRequest { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter979) + foreach ($this->partitions as $iter986) { - $xfer += $iter979->write($output); + $xfer += $iter986->write($output); } } $output->writeListEnd(); @@ -34993,14 +35263,14 @@ class RenamePartitionRequest { case 4: if ($ftype == TType::LST) { $this->partVals = array(); - $_size980 = 0; - $_etype983 = 0; - $xfer += $input->readListBegin($_etype983, $_size980); - for ($_i984 = 0; $_i984 < $_size980; ++$_i984) + $_size987 = 0; + $_etype990 = 0; + $xfer += $input->readListBegin($_etype990, $_size987); + for ($_i991 = 0; $_i991 < $_size987; ++$_i991) { - $elem985 = null; - $xfer += $input->readString($elem985); - $this->partVals []= $elem985; + $elem992 = null; + $xfer += $input->readString($elem992); + $this->partVals []= $elem992; } $xfer += $input->readListEnd(); } else { @@ -35058,9 +35328,9 @@ class RenamePartitionRequest { { $output->writeListBegin(TType::STRING, count($this->partVals)); { - foreach ($this->partVals as $iter986) + foreach ($this->partVals as $iter993) { - $xfer += $output->writeString($iter986); + $xfer += $output->writeString($iter993); } } $output->writeListEnd(); @@ -35482,14 +35752,14 @@ class GetPartitionsProjectionSpec { case 1: if ($ftype == TType::LST) { $this->fieldList = array(); - $_size987 = 0; - $_etype990 = 0; - $xfer += $input->readListBegin($_etype990, $_size987); - for ($_i991 = 0; $_i991 < $_size987; ++$_i991) + $_size994 = 0; + $_etype997 = 0; + $xfer += $input->readListBegin($_etype997, $_size994); + for ($_i998 = 0; $_i998 < $_size994; ++$_i998) { - $elem992 = null; - $xfer += $input->readString($elem992); - $this->fieldList []= $elem992; + $elem999 = null; + $xfer += $input->readString($elem999); + $this->fieldList []= $elem999; } $xfer += $input->readListEnd(); } else { @@ -35531,9 +35801,9 @@ class GetPartitionsProjectionSpec { { $output->writeListBegin(TType::STRING, count($this->fieldList)); { - foreach ($this->fieldList as $iter993) + foreach ($this->fieldList as $iter1000) { - $xfer += $output->writeString($iter993); + $xfer += $output->writeString($iter1000); } } $output->writeListEnd(); @@ -35625,14 +35895,14 @@ class GetPartitionsFilterSpec { case 8: if ($ftype == TType::LST) { $this->filters = array(); - $_size994 = 0; - $_etype997 = 0; - $xfer += $input->readListBegin($_etype997, $_size994); - for ($_i998 = 0; $_i998 < $_size994; ++$_i998) + $_size1001 = 0; + $_etype1004 = 0; + $xfer += $input->readListBegin($_etype1004, $_size1001); + for ($_i1005 = 0; $_i1005 < $_size1001; ++$_i1005) { - $elem999 = null; - $xfer += $input->readString($elem999); - $this->filters []= $elem999; + $elem1006 = null; + $xfer += $input->readString($elem1006); + $this->filters []= $elem1006; } $xfer += $input->readListEnd(); } else { @@ -35665,9 +35935,9 @@ class GetPartitionsFilterSpec { { $output->writeListBegin(TType::STRING, count($this->filters)); { - foreach ($this->filters as $iter1000) + foreach ($this->filters as $iter1007) { - $xfer += $output->writeString($iter1000); + $xfer += $output->writeString($iter1007); } } $output->writeListEnd(); @@ -35732,15 +36002,15 @@ class GetPartitionsResponse { case 1: if ($ftype == TType::LST) { $this->partitionSpec = array(); - $_size1001 = 0; - $_etype1004 = 0; - $xfer += $input->readListBegin($_etype1004, $_size1001); - for ($_i1005 = 0; $_i1005 < $_size1001; ++$_i1005) + $_size1008 = 0; + $_etype1011 = 0; + $xfer += $input->readListBegin($_etype1011, $_size1008); + for ($_i1012 = 0; $_i1012 < $_size1008; ++$_i1012) { - $elem1006 = null; - $elem1006 = new \metastore\PartitionSpec(); - $xfer += $elem1006->read($input); - $this->partitionSpec []= $elem1006; + $elem1013 = null; + $elem1013 = new \metastore\PartitionSpec(); + $xfer += $elem1013->read($input); + $this->partitionSpec []= $elem1013; } $xfer += $input->readListEnd(); } else { @@ -35768,9 +36038,9 @@ class GetPartitionsResponse { { $output->writeListBegin(TType::STRUCT, count($this->partitionSpec)); { - foreach ($this->partitionSpec as $iter1007) + foreach ($this->partitionSpec as $iter1014) { - $xfer += $iter1007->write($output); + $xfer += $iter1014->write($output); } } $output->writeListEnd(); @@ -35827,6 +36097,10 @@ class GetPartitionsRequest { * @var string */ public $processorIdentifier = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -35881,6 +36155,10 @@ class GetPartitionsRequest { 'var' => 'processorIdentifier', 'type' => TType::STRING, ), + 11 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -35914,6 +36192,9 @@ class GetPartitionsRequest { if (isset($vals['processorIdentifier'])) { $this->processorIdentifier = $vals['processorIdentifier']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -35974,14 +36255,14 @@ class GetPartitionsRequest { case 6: if ($ftype == TType::LST) { $this->groupNames = array(); - $_size1008 = 0; - $_etype1011 = 0; - $xfer += $input->readListBegin($_etype1011, $_size1008); - for ($_i1012 = 0; $_i1012 < $_size1008; ++$_i1012) + $_size1015 = 0; + $_etype1018 = 0; + $xfer += $input->readListBegin($_etype1018, $_size1015); + for ($_i1019 = 0; $_i1019 < $_size1015; ++$_i1019) { - $elem1013 = null; - $xfer += $input->readString($elem1013); - $this->groupNames []= $elem1013; + $elem1020 = null; + $xfer += $input->readString($elem1020); + $this->groupNames []= $elem1020; } $xfer += $input->readListEnd(); } else { @@ -36007,14 +36288,14 @@ class GetPartitionsRequest { case 9: if ($ftype == TType::LST) { $this->processorCapabilities = array(); - $_size1014 = 0; - $_etype1017 = 0; - $xfer += $input->readListBegin($_etype1017, $_size1014); - for ($_i1018 = 0; $_i1018 < $_size1014; ++$_i1018) + $_size1021 = 0; + $_etype1024 = 0; + $xfer += $input->readListBegin($_etype1024, $_size1021); + for ($_i1025 = 0; $_i1025 < $_size1021; ++$_i1025) { - $elem1019 = null; - $xfer += $input->readString($elem1019); - $this->processorCapabilities []= $elem1019; + $elem1026 = null; + $xfer += $input->readString($elem1026); + $this->processorCapabilities []= $elem1026; } $xfer += $input->readListEnd(); } else { @@ -36028,6 +36309,13 @@ class GetPartitionsRequest { $xfer += $input->skip($ftype); } break; + case 11: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -36074,9 +36362,9 @@ class GetPartitionsRequest { { $output->writeListBegin(TType::STRING, count($this->groupNames)); { - foreach ($this->groupNames as $iter1020) + foreach ($this->groupNames as $iter1027) { - $xfer += $output->writeString($iter1020); + $xfer += $output->writeString($iter1027); } } $output->writeListEnd(); @@ -36107,9 +36395,9 @@ class GetPartitionsRequest { { $output->writeListBegin(TType::STRING, count($this->processorCapabilities)); { - foreach ($this->processorCapabilities as $iter1021) + foreach ($this->processorCapabilities as $iter1028) { - $xfer += $output->writeString($iter1021); + $xfer += $output->writeString($iter1028); } } $output->writeListEnd(); @@ -36121,6 +36409,11 @@ class GetPartitionsRequest { $xfer += $output->writeString($this->processorIdentifier); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 11); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index 8e9a2c6718..d836469c70 100755 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -41,10 +41,10 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' bool create_type(Type type)') print(' bool drop_type(string type)') print(' get_type_all(string name)') - print(' get_fields(string db_name, string table_name)') - print(' get_fields_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)') - print(' get_schema(string db_name, string table_name)') - print(' get_schema_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)') + print(' get_fields(string db_name, string table_name, string validWriteIdList)') + print(' get_fields_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context, string validWriteIdList)') + print(' get_schema(string db_name, string table_name, string validWriteIdList)') + print(' get_schema_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context, string validWriteIdList)') print(' void create_table(Table tbl)') print(' void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context)') print(' void create_table_with_constraints(Table tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints)') @@ -66,7 +66,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' get_materialized_views_for_rewriting(string db_name)') print(' get_table_meta(string db_patterns, string tbl_patterns, tbl_types)') print(' get_all_tables(string db_name)') - print(' Table get_table(string dbname, string tbl_name)') + print(' Table get_table(string dbname, string tbl_name, string validWriteIdList)') print(' get_table_objects_by_name(string dbname, tbl_names)') print(' get_tables_ext(GetTablesExtRequest req)') print(' GetTableResult get_table_req(GetTableRequest req)') @@ -92,24 +92,24 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' bool drop_partition_by_name(string db_name, string tbl_name, string part_name, bool deleteData)') print(' bool drop_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, bool deleteData, EnvironmentContext environment_context)') print(' DropPartitionsResult drop_partitions_req(DropPartitionsRequest req)') - print(' Partition get_partition(string db_name, string tbl_name, part_vals)') + print(' Partition get_partition(string db_name, string tbl_name, part_vals, string validTxnList)') print(' Partition exchange_partition( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)') print(' exchange_partitions( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)') - print(' Partition get_partition_with_auth(string db_name, string tbl_name, part_vals, string user_name, group_names)') - print(' Partition get_partition_by_name(string db_name, string tbl_name, string part_name)') - print(' get_partitions(string db_name, string tbl_name, i16 max_parts)') - print(' get_partitions_with_auth(string db_name, string tbl_name, i16 max_parts, string user_name, group_names)') - print(' get_partitions_pspec(string db_name, string tbl_name, i32 max_parts)') - print(' get_partition_names(string db_name, string tbl_name, i16 max_parts)') + print(' Partition get_partition_with_auth(string db_name, string tbl_name, part_vals, string user_name, group_names, string validTxnList)') + print(' Partition get_partition_by_name(string db_name, string tbl_name, string part_name, string validTxnList)') + print(' get_partitions(string db_name, string tbl_name, i16 max_parts, string validTxnList)') + print(' get_partitions_with_auth(string db_name, string tbl_name, i16 max_parts, string user_name, group_names, string validTxnList)') + print(' get_partitions_pspec(string db_name, string tbl_name, i32 max_parts, string validTxnList)') + print(' get_partition_names(string db_name, string tbl_name, i16 max_parts, string validTxnList)') print(' PartitionValuesResponse get_partition_values(PartitionValuesRequest request)') - print(' get_partitions_ps(string db_name, string tbl_name, part_vals, i16 max_parts)') - print(' get_partitions_ps_with_auth(string db_name, string tbl_name, part_vals, i16 max_parts, string user_name, group_names)') - print(' get_partition_names_ps(string db_name, string tbl_name, part_vals, i16 max_parts)') - print(' get_partitions_by_filter(string db_name, string tbl_name, string filter, i16 max_parts)') - print(' get_part_specs_by_filter(string db_name, string tbl_name, string filter, i32 max_parts)') + print(' get_partitions_ps(string db_name, string tbl_name, part_vals, i16 max_parts, string validTxnList)') + print(' get_partitions_ps_with_auth(string db_name, string tbl_name, part_vals, i16 max_parts, string user_name, group_names, string validTxnList)') + print(' get_partition_names_ps(string db_name, string tbl_name, part_vals, i16 max_parts, string validTxnList)') + print(' get_partitions_by_filter(string db_name, string tbl_name, string filter, i16 max_parts, string validTxnList)') + print(' get_part_specs_by_filter(string db_name, string tbl_name, string filter, i32 max_parts, string validTxnList)') print(' PartitionsByExprResult get_partitions_by_expr(PartitionsByExprRequest req)') - print(' i32 get_num_partitions_by_filter(string db_name, string tbl_name, string filter)') - print(' get_partitions_by_names(string db_name, string tbl_name, names)') + print(' i32 get_num_partitions_by_filter(string db_name, string tbl_name, string filter, string validTxnList)') + print(' get_partitions_by_names(string db_name, string tbl_name, names, string validTxnList)') print(' GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNamesRequest req)') print(' void alter_partition(string db_name, string tbl_name, Partition new_part)') print(' void alter_partitions(string db_name, string tbl_name, new_parts)') @@ -134,8 +134,8 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' bool update_partition_column_statistics(ColumnStatistics stats_obj)') print(' SetPartitionsStatsResponse update_table_column_statistics_req(SetPartitionsStatsRequest req)') print(' SetPartitionsStatsResponse update_partition_column_statistics_req(SetPartitionsStatsRequest req)') - print(' ColumnStatistics get_table_column_statistics(string db_name, string tbl_name, string col_name)') - print(' ColumnStatistics get_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)') + print(' ColumnStatistics get_table_column_statistics(string db_name, string tbl_name, string col_name, string validWriteIdList)') + print(' ColumnStatistics get_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name, string validWriteIdList)') print(' TableStatsResult get_table_statistics_req(TableStatsRequest request)') print(' PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsRequest request)') print(' AggrStats get_aggr_stats_for(PartitionsStatsRequest request)') @@ -422,28 +422,28 @@ elif cmd == 'get_type_all': pp.pprint(client.get_type_all(args[0],)) elif cmd == 'get_fields': - if len(args) != 2: - print('get_fields requires 2 args') + if len(args) != 3: + print('get_fields requires 3 args') sys.exit(1) - pp.pprint(client.get_fields(args[0],args[1],)) + pp.pprint(client.get_fields(args[0],args[1],args[2],)) elif cmd == 'get_fields_with_environment_context': - if len(args) != 3: - print('get_fields_with_environment_context requires 3 args') + if len(args) != 4: + print('get_fields_with_environment_context requires 4 args') sys.exit(1) - pp.pprint(client.get_fields_with_environment_context(args[0],args[1],eval(args[2]),)) + pp.pprint(client.get_fields_with_environment_context(args[0],args[1],eval(args[2]),args[3],)) elif cmd == 'get_schema': - if len(args) != 2: - print('get_schema requires 2 args') + if len(args) != 3: + print('get_schema requires 3 args') sys.exit(1) - pp.pprint(client.get_schema(args[0],args[1],)) + pp.pprint(client.get_schema(args[0],args[1],args[2],)) elif cmd == 'get_schema_with_environment_context': - if len(args) != 3: - print('get_schema_with_environment_context requires 3 args') + if len(args) != 4: + print('get_schema_with_environment_context requires 4 args') sys.exit(1) - pp.pprint(client.get_schema_with_environment_context(args[0],args[1],eval(args[2]),)) + pp.pprint(client.get_schema_with_environment_context(args[0],args[1],eval(args[2]),args[3],)) elif cmd == 'create_table': if len(args) != 1: @@ -572,10 +572,10 @@ elif cmd == 'get_all_tables': pp.pprint(client.get_all_tables(args[0],)) elif cmd == 'get_table': - if len(args) != 2: - print('get_table requires 2 args') + if len(args) != 3: + print('get_table requires 3 args') sys.exit(1) - pp.pprint(client.get_table(args[0],args[1],)) + pp.pprint(client.get_table(args[0],args[1],args[2],)) elif cmd == 'get_table_objects_by_name': if len(args) != 2: @@ -728,10 +728,10 @@ elif cmd == 'drop_partitions_req': pp.pprint(client.drop_partitions_req(eval(args[0]),)) elif cmd == 'get_partition': - if len(args) != 3: - print('get_partition requires 3 args') + if len(args) != 4: + print('get_partition requires 4 args') sys.exit(1) - pp.pprint(client.get_partition(args[0],args[1],eval(args[2]),)) + pp.pprint(client.get_partition(args[0],args[1],eval(args[2]),args[3],)) elif cmd == 'exchange_partition': if len(args) != 5: @@ -746,40 +746,40 @@ elif cmd == 'exchange_partitions': pp.pprint(client.exchange_partitions(eval(args[0]),args[1],args[2],args[3],args[4],)) elif cmd == 'get_partition_with_auth': - if len(args) != 5: - print('get_partition_with_auth requires 5 args') + if len(args) != 6: + print('get_partition_with_auth requires 6 args') sys.exit(1) - pp.pprint(client.get_partition_with_auth(args[0],args[1],eval(args[2]),args[3],eval(args[4]),)) + pp.pprint(client.get_partition_with_auth(args[0],args[1],eval(args[2]),args[3],eval(args[4]),args[5],)) elif cmd == 'get_partition_by_name': - if len(args) != 3: - print('get_partition_by_name requires 3 args') + if len(args) != 4: + print('get_partition_by_name requires 4 args') sys.exit(1) - pp.pprint(client.get_partition_by_name(args[0],args[1],args[2],)) + pp.pprint(client.get_partition_by_name(args[0],args[1],args[2],args[3],)) elif cmd == 'get_partitions': - if len(args) != 3: - print('get_partitions requires 3 args') + if len(args) != 4: + print('get_partitions requires 4 args') sys.exit(1) - pp.pprint(client.get_partitions(args[0],args[1],eval(args[2]),)) + pp.pprint(client.get_partitions(args[0],args[1],eval(args[2]),args[3],)) elif cmd == 'get_partitions_with_auth': - if len(args) != 5: - print('get_partitions_with_auth requires 5 args') + if len(args) != 6: + print('get_partitions_with_auth requires 6 args') sys.exit(1) - pp.pprint(client.get_partitions_with_auth(args[0],args[1],eval(args[2]),args[3],eval(args[4]),)) + pp.pprint(client.get_partitions_with_auth(args[0],args[1],eval(args[2]),args[3],eval(args[4]),args[5],)) elif cmd == 'get_partitions_pspec': - if len(args) != 3: - print('get_partitions_pspec requires 3 args') + if len(args) != 4: + print('get_partitions_pspec requires 4 args') sys.exit(1) - pp.pprint(client.get_partitions_pspec(args[0],args[1],eval(args[2]),)) + pp.pprint(client.get_partitions_pspec(args[0],args[1],eval(args[2]),args[3],)) elif cmd == 'get_partition_names': - if len(args) != 3: - print('get_partition_names requires 3 args') + if len(args) != 4: + print('get_partition_names requires 4 args') sys.exit(1) - pp.pprint(client.get_partition_names(args[0],args[1],eval(args[2]),)) + pp.pprint(client.get_partition_names(args[0],args[1],eval(args[2]),args[3],)) elif cmd == 'get_partition_values': if len(args) != 1: @@ -788,34 +788,34 @@ elif cmd == 'get_partition_values': pp.pprint(client.get_partition_values(eval(args[0]),)) elif cmd == 'get_partitions_ps': - if len(args) != 4: - print('get_partitions_ps requires 4 args') + if len(args) != 5: + print('get_partitions_ps requires 5 args') sys.exit(1) - pp.pprint(client.get_partitions_ps(args[0],args[1],eval(args[2]),eval(args[3]),)) + pp.pprint(client.get_partitions_ps(args[0],args[1],eval(args[2]),eval(args[3]),args[4],)) elif cmd == 'get_partitions_ps_with_auth': - if len(args) != 6: - print('get_partitions_ps_with_auth requires 6 args') + if len(args) != 7: + print('get_partitions_ps_with_auth requires 7 args') sys.exit(1) - pp.pprint(client.get_partitions_ps_with_auth(args[0],args[1],eval(args[2]),eval(args[3]),args[4],eval(args[5]),)) + pp.pprint(client.get_partitions_ps_with_auth(args[0],args[1],eval(args[2]),eval(args[3]),args[4],eval(args[5]),args[6],)) elif cmd == 'get_partition_names_ps': - if len(args) != 4: - print('get_partition_names_ps requires 4 args') + if len(args) != 5: + print('get_partition_names_ps requires 5 args') sys.exit(1) - pp.pprint(client.get_partition_names_ps(args[0],args[1],eval(args[2]),eval(args[3]),)) + pp.pprint(client.get_partition_names_ps(args[0],args[1],eval(args[2]),eval(args[3]),args[4],)) elif cmd == 'get_partitions_by_filter': - if len(args) != 4: - print('get_partitions_by_filter requires 4 args') + if len(args) != 5: + print('get_partitions_by_filter requires 5 args') sys.exit(1) - pp.pprint(client.get_partitions_by_filter(args[0],args[1],args[2],eval(args[3]),)) + pp.pprint(client.get_partitions_by_filter(args[0],args[1],args[2],eval(args[3]),args[4],)) elif cmd == 'get_part_specs_by_filter': - if len(args) != 4: - print('get_part_specs_by_filter requires 4 args') + if len(args) != 5: + print('get_part_specs_by_filter requires 5 args') sys.exit(1) - pp.pprint(client.get_part_specs_by_filter(args[0],args[1],args[2],eval(args[3]),)) + pp.pprint(client.get_part_specs_by_filter(args[0],args[1],args[2],eval(args[3]),args[4],)) elif cmd == 'get_partitions_by_expr': if len(args) != 1: @@ -824,16 +824,16 @@ elif cmd == 'get_partitions_by_expr': pp.pprint(client.get_partitions_by_expr(eval(args[0]),)) elif cmd == 'get_num_partitions_by_filter': - if len(args) != 3: - print('get_num_partitions_by_filter requires 3 args') + if len(args) != 4: + print('get_num_partitions_by_filter requires 4 args') sys.exit(1) - pp.pprint(client.get_num_partitions_by_filter(args[0],args[1],args[2],)) + pp.pprint(client.get_num_partitions_by_filter(args[0],args[1],args[2],args[3],)) elif cmd == 'get_partitions_by_names': - if len(args) != 3: - print('get_partitions_by_names requires 3 args') + if len(args) != 4: + print('get_partitions_by_names requires 4 args') sys.exit(1) - pp.pprint(client.get_partitions_by_names(args[0],args[1],eval(args[2]),)) + pp.pprint(client.get_partitions_by_names(args[0],args[1],eval(args[2]),args[3],)) elif cmd == 'get_partitions_by_names_req': if len(args) != 1: @@ -980,16 +980,16 @@ elif cmd == 'update_partition_column_statistics_req': pp.pprint(client.update_partition_column_statistics_req(eval(args[0]),)) elif cmd == 'get_table_column_statistics': - if len(args) != 3: - print('get_table_column_statistics requires 3 args') + if len(args) != 4: + print('get_table_column_statistics requires 4 args') sys.exit(1) - pp.pprint(client.get_table_column_statistics(args[0],args[1],args[2],)) + pp.pprint(client.get_table_column_statistics(args[0],args[1],args[2],args[3],)) elif cmd == 'get_partition_column_statistics': - if len(args) != 4: - print('get_partition_column_statistics requires 4 args') + if len(args) != 5: + print('get_partition_column_statistics requires 5 args') sys.exit(1) - pp.pprint(client.get_partition_column_statistics(args[0],args[1],args[2],args[3],)) + pp.pprint(client.get_partition_column_statistics(args[0],args[1],args[2],args[3],args[4],)) elif cmd == 'get_table_statistics_req': if len(args) != 1: diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 11164a056f..64f9534fdb 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -138,37 +138,41 @@ def get_type_all(self, name): """ pass - def get_fields(self, db_name, table_name): + def get_fields(self, db_name, table_name, validWriteIdList): """ Parameters: - db_name - table_name + - validWriteIdList """ pass - def get_fields_with_environment_context(self, db_name, table_name, environment_context): + def get_fields_with_environment_context(self, db_name, table_name, environment_context, validWriteIdList): """ Parameters: - db_name - table_name - environment_context + - validWriteIdList """ pass - def get_schema(self, db_name, table_name): + def get_schema(self, db_name, table_name, validWriteIdList): """ Parameters: - db_name - table_name + - validWriteIdList """ pass - def get_schema_with_environment_context(self, db_name, table_name, environment_context): + def get_schema_with_environment_context(self, db_name, table_name, environment_context, validWriteIdList): """ Parameters: - db_name - table_name - environment_context + - validWriteIdList """ pass @@ -334,11 +338,12 @@ def get_all_tables(self, db_name): """ pass - def get_table(self, dbname, tbl_name): + def get_table(self, dbname, tbl_name, validWriteIdList): """ Parameters: - dbname - tbl_name + - validWriteIdList """ pass @@ -557,12 +562,13 @@ def drop_partitions_req(self, req): """ pass - def get_partition(self, db_name, tbl_name, part_vals): + def get_partition(self, db_name, tbl_name, part_vals, validTxnList): """ Parameters: - db_name - tbl_name - part_vals + - validTxnList """ pass @@ -588,7 +594,7 @@ def exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest """ pass - def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names): + def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names, validTxnList): """ Parameters: - db_name @@ -596,28 +602,31 @@ def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group - part_vals - user_name - group_names + - validTxnList """ pass - def get_partition_by_name(self, db_name, tbl_name, part_name): + def get_partition_by_name(self, db_name, tbl_name, part_name, validTxnList): """ Parameters: - db_name - tbl_name - part_name + - validTxnList """ pass - def get_partitions(self, db_name, tbl_name, max_parts): + def get_partitions(self, db_name, tbl_name, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - max_parts + - validTxnList """ pass - def get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, group_names): + def get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, group_names, validTxnList): """ Parameters: - db_name @@ -625,24 +634,27 @@ def get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, grou - max_parts - user_name - group_names + - validTxnList """ pass - def get_partitions_pspec(self, db_name, tbl_name, max_parts): + def get_partitions_pspec(self, db_name, tbl_name, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - max_parts + - validTxnList """ pass - def get_partition_names(self, db_name, tbl_name, max_parts): + def get_partition_names(self, db_name, tbl_name, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - max_parts + - validTxnList """ pass @@ -653,17 +665,18 @@ def get_partition_values(self, request): """ pass - def get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts): + def get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - part_vals - max_parts + - validTxnList """ pass - def get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, user_name, group_names): + def get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, user_name, group_names, validTxnList): """ Parameters: - db_name @@ -672,36 +685,40 @@ def get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, u - max_parts - user_name - group_names + - validTxnList """ pass - def get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts): + def get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - part_vals - max_parts + - validTxnList """ pass - def get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts): + def get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - filter - max_parts + - validTxnList """ pass - def get_part_specs_by_filter(self, db_name, tbl_name, filter, max_parts): + def get_part_specs_by_filter(self, db_name, tbl_name, filter, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - filter - max_parts + - validTxnList """ pass @@ -712,21 +729,23 @@ def get_partitions_by_expr(self, req): """ pass - def get_num_partitions_by_filter(self, db_name, tbl_name, filter): + def get_num_partitions_by_filter(self, db_name, tbl_name, filter, validTxnList): """ Parameters: - db_name - tbl_name - filter + - validTxnList """ pass - def get_partitions_by_names(self, db_name, tbl_name, names): + def get_partitions_by_names(self, db_name, tbl_name, names, validTxnList): """ Parameters: - db_name - tbl_name - names + - validTxnList """ pass @@ -919,22 +938,24 @@ def update_partition_column_statistics_req(self, req): """ pass - def get_table_column_statistics(self, db_name, tbl_name, col_name): + def get_table_column_statistics(self, db_name, tbl_name, col_name, validWriteIdList): """ Parameters: - db_name - tbl_name - col_name + - validWriteIdList """ pass - def get_partition_column_statistics(self, db_name, tbl_name, part_name, col_name): + def get_partition_column_statistics(self, db_name, tbl_name, part_name, col_name, validWriteIdList): """ Parameters: - db_name - tbl_name - part_name - col_name + - validWriteIdList """ pass @@ -2321,20 +2342,22 @@ def recv_get_type_all(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_type_all failed: unknown result") - def get_fields(self, db_name, table_name): + def get_fields(self, db_name, table_name, validWriteIdList): """ Parameters: - db_name - table_name + - validWriteIdList """ - self.send_get_fields(db_name, table_name) + self.send_get_fields(db_name, table_name, validWriteIdList) return self.recv_get_fields() - def send_get_fields(self, db_name, table_name): + def send_get_fields(self, db_name, table_name, validWriteIdList): self._oprot.writeMessageBegin('get_fields', TMessageType.CALL, self._seqid) args = get_fields_args() args.db_name = db_name args.table_name = table_name + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -2360,22 +2383,24 @@ def recv_get_fields(self): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_fields failed: unknown result") - def get_fields_with_environment_context(self, db_name, table_name, environment_context): + def get_fields_with_environment_context(self, db_name, table_name, environment_context, validWriteIdList): """ Parameters: - db_name - table_name - environment_context + - validWriteIdList """ - self.send_get_fields_with_environment_context(db_name, table_name, environment_context) + self.send_get_fields_with_environment_context(db_name, table_name, environment_context, validWriteIdList) return self.recv_get_fields_with_environment_context() - def send_get_fields_with_environment_context(self, db_name, table_name, environment_context): + def send_get_fields_with_environment_context(self, db_name, table_name, environment_context, validWriteIdList): self._oprot.writeMessageBegin('get_fields_with_environment_context', TMessageType.CALL, self._seqid) args = get_fields_with_environment_context_args() args.db_name = db_name args.table_name = table_name args.environment_context = environment_context + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -2401,20 +2426,22 @@ def recv_get_fields_with_environment_context(self): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_fields_with_environment_context failed: unknown result") - def get_schema(self, db_name, table_name): + def get_schema(self, db_name, table_name, validWriteIdList): """ Parameters: - db_name - table_name + - validWriteIdList """ - self.send_get_schema(db_name, table_name) + self.send_get_schema(db_name, table_name, validWriteIdList) return self.recv_get_schema() - def send_get_schema(self, db_name, table_name): + def send_get_schema(self, db_name, table_name, validWriteIdList): self._oprot.writeMessageBegin('get_schema', TMessageType.CALL, self._seqid) args = get_schema_args() args.db_name = db_name args.table_name = table_name + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -2440,22 +2467,24 @@ def recv_get_schema(self): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_schema failed: unknown result") - def get_schema_with_environment_context(self, db_name, table_name, environment_context): + def get_schema_with_environment_context(self, db_name, table_name, environment_context, validWriteIdList): """ Parameters: - db_name - table_name - environment_context + - validWriteIdList """ - self.send_get_schema_with_environment_context(db_name, table_name, environment_context) + self.send_get_schema_with_environment_context(db_name, table_name, environment_context, validWriteIdList) return self.recv_get_schema_with_environment_context() - def send_get_schema_with_environment_context(self, db_name, table_name, environment_context): + def send_get_schema_with_environment_context(self, db_name, table_name, environment_context, validWriteIdList): self._oprot.writeMessageBegin('get_schema_with_environment_context', TMessageType.CALL, self._seqid) args = get_schema_with_environment_context_args() args.db_name = db_name args.table_name = table_name args.environment_context = environment_context + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -3221,20 +3250,22 @@ def recv_get_all_tables(self): raise result.o1 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_tables failed: unknown result") - def get_table(self, dbname, tbl_name): + def get_table(self, dbname, tbl_name, validWriteIdList): """ Parameters: - dbname - tbl_name + - validWriteIdList """ - self.send_get_table(dbname, tbl_name) + self.send_get_table(dbname, tbl_name, validWriteIdList) return self.recv_get_table() - def send_get_table(self, dbname, tbl_name): + def send_get_table(self, dbname, tbl_name, validWriteIdList): self._oprot.writeMessageBegin('get_table', TMessageType.CALL, self._seqid) args = get_table_args() args.dbname = dbname args.tbl_name = tbl_name + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4225,22 +4256,24 @@ def recv_drop_partitions_req(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_partitions_req failed: unknown result") - def get_partition(self, db_name, tbl_name, part_vals): + def get_partition(self, db_name, tbl_name, part_vals, validTxnList): """ Parameters: - db_name - tbl_name - part_vals + - validTxnList """ - self.send_get_partition(db_name, tbl_name, part_vals) + self.send_get_partition(db_name, tbl_name, part_vals, validTxnList) return self.recv_get_partition() - def send_get_partition(self, db_name, tbl_name, part_vals): + def send_get_partition(self, db_name, tbl_name, part_vals, validTxnList): self._oprot.writeMessageBegin('get_partition', TMessageType.CALL, self._seqid) args = get_partition_args() args.db_name = db_name args.tbl_name = tbl_name args.part_vals = part_vals + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4358,7 +4391,7 @@ def recv_exchange_partitions(self): raise result.o4 raise TApplicationException(TApplicationException.MISSING_RESULT, "exchange_partitions failed: unknown result") - def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names): + def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names, validTxnList): """ Parameters: - db_name @@ -4366,11 +4399,12 @@ def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group - part_vals - user_name - group_names + - validTxnList """ - self.send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names) + self.send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names, validTxnList) return self.recv_get_partition_with_auth() - def send_get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names): + def send_get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names, validTxnList): self._oprot.writeMessageBegin('get_partition_with_auth', TMessageType.CALL, self._seqid) args = get_partition_with_auth_args() args.db_name = db_name @@ -4378,6 +4412,7 @@ def send_get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, args.part_vals = part_vals args.user_name = user_name args.group_names = group_names + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4401,22 +4436,24 @@ def recv_get_partition_with_auth(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_with_auth failed: unknown result") - def get_partition_by_name(self, db_name, tbl_name, part_name): + def get_partition_by_name(self, db_name, tbl_name, part_name, validTxnList): """ Parameters: - db_name - tbl_name - part_name + - validTxnList """ - self.send_get_partition_by_name(db_name, tbl_name, part_name) + self.send_get_partition_by_name(db_name, tbl_name, part_name, validTxnList) return self.recv_get_partition_by_name() - def send_get_partition_by_name(self, db_name, tbl_name, part_name): + def send_get_partition_by_name(self, db_name, tbl_name, part_name, validTxnList): self._oprot.writeMessageBegin('get_partition_by_name', TMessageType.CALL, self._seqid) args = get_partition_by_name_args() args.db_name = db_name args.tbl_name = tbl_name args.part_name = part_name + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4440,22 +4477,24 @@ def recv_get_partition_by_name(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_by_name failed: unknown result") - def get_partitions(self, db_name, tbl_name, max_parts): + def get_partitions(self, db_name, tbl_name, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - max_parts + - validTxnList """ - self.send_get_partitions(db_name, tbl_name, max_parts) + self.send_get_partitions(db_name, tbl_name, max_parts, validTxnList) return self.recv_get_partitions() - def send_get_partitions(self, db_name, tbl_name, max_parts): + def send_get_partitions(self, db_name, tbl_name, max_parts, validTxnList): self._oprot.writeMessageBegin('get_partitions', TMessageType.CALL, self._seqid) args = get_partitions_args() args.db_name = db_name args.tbl_name = tbl_name args.max_parts = max_parts + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4479,7 +4518,7 @@ def recv_get_partitions(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions failed: unknown result") - def get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, group_names): + def get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, group_names, validTxnList): """ Parameters: - db_name @@ -4487,11 +4526,12 @@ def get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, grou - max_parts - user_name - group_names + - validTxnList """ - self.send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names) + self.send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names, validTxnList) return self.recv_get_partitions_with_auth() - def send_get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, group_names): + def send_get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, group_names, validTxnList): self._oprot.writeMessageBegin('get_partitions_with_auth', TMessageType.CALL, self._seqid) args = get_partitions_with_auth_args() args.db_name = db_name @@ -4499,6 +4539,7 @@ def send_get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, args.max_parts = max_parts args.user_name = user_name args.group_names = group_names + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4522,22 +4563,24 @@ def recv_get_partitions_with_auth(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_with_auth failed: unknown result") - def get_partitions_pspec(self, db_name, tbl_name, max_parts): + def get_partitions_pspec(self, db_name, tbl_name, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - max_parts + - validTxnList """ - self.send_get_partitions_pspec(db_name, tbl_name, max_parts) + self.send_get_partitions_pspec(db_name, tbl_name, max_parts, validTxnList) return self.recv_get_partitions_pspec() - def send_get_partitions_pspec(self, db_name, tbl_name, max_parts): + def send_get_partitions_pspec(self, db_name, tbl_name, max_parts, validTxnList): self._oprot.writeMessageBegin('get_partitions_pspec', TMessageType.CALL, self._seqid) args = get_partitions_pspec_args() args.db_name = db_name args.tbl_name = tbl_name args.max_parts = max_parts + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4561,22 +4604,24 @@ def recv_get_partitions_pspec(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_pspec failed: unknown result") - def get_partition_names(self, db_name, tbl_name, max_parts): + def get_partition_names(self, db_name, tbl_name, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - max_parts + - validTxnList """ - self.send_get_partition_names(db_name, tbl_name, max_parts) + self.send_get_partition_names(db_name, tbl_name, max_parts, validTxnList) return self.recv_get_partition_names() - def send_get_partition_names(self, db_name, tbl_name, max_parts): + def send_get_partition_names(self, db_name, tbl_name, max_parts, validTxnList): self._oprot.writeMessageBegin('get_partition_names', TMessageType.CALL, self._seqid) args = get_partition_names_args() args.db_name = db_name args.tbl_name = tbl_name args.max_parts = max_parts + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4635,24 +4680,26 @@ def recv_get_partition_values(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_values failed: unknown result") - def get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts): + def get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - part_vals - max_parts + - validTxnList """ - self.send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts) + self.send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts, validTxnList) return self.recv_get_partitions_ps() - def send_get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts): + def send_get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts, validTxnList): self._oprot.writeMessageBegin('get_partitions_ps', TMessageType.CALL, self._seqid) args = get_partitions_ps_args() args.db_name = db_name args.tbl_name = tbl_name args.part_vals = part_vals args.max_parts = max_parts + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4676,7 +4723,7 @@ def recv_get_partitions_ps(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_ps failed: unknown result") - def get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, user_name, group_names): + def get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, user_name, group_names, validTxnList): """ Parameters: - db_name @@ -4685,11 +4732,12 @@ def get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, u - max_parts - user_name - group_names + - validTxnList """ - self.send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names) + self.send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names, validTxnList) return self.recv_get_partitions_ps_with_auth() - def send_get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, user_name, group_names): + def send_get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, user_name, group_names, validTxnList): self._oprot.writeMessageBegin('get_partitions_ps_with_auth', TMessageType.CALL, self._seqid) args = get_partitions_ps_with_auth_args() args.db_name = db_name @@ -4698,6 +4746,7 @@ def send_get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_par args.max_parts = max_parts args.user_name = user_name args.group_names = group_names + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4721,24 +4770,26 @@ def recv_get_partitions_ps_with_auth(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_ps_with_auth failed: unknown result") - def get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts): + def get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - part_vals - max_parts + - validTxnList """ - self.send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts) + self.send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts, validTxnList) return self.recv_get_partition_names_ps() - def send_get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts): + def send_get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts, validTxnList): self._oprot.writeMessageBegin('get_partition_names_ps', TMessageType.CALL, self._seqid) args = get_partition_names_ps_args() args.db_name = db_name args.tbl_name = tbl_name args.part_vals = part_vals args.max_parts = max_parts + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4762,24 +4813,26 @@ def recv_get_partition_names_ps(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_names_ps failed: unknown result") - def get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts): + def get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - filter - max_parts + - validTxnList """ - self.send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts) + self.send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts, validTxnList) return self.recv_get_partitions_by_filter() - def send_get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts): + def send_get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts, validTxnList): self._oprot.writeMessageBegin('get_partitions_by_filter', TMessageType.CALL, self._seqid) args = get_partitions_by_filter_args() args.db_name = db_name args.tbl_name = tbl_name args.filter = filter args.max_parts = max_parts + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4803,24 +4856,26 @@ def recv_get_partitions_by_filter(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_filter failed: unknown result") - def get_part_specs_by_filter(self, db_name, tbl_name, filter, max_parts): + def get_part_specs_by_filter(self, db_name, tbl_name, filter, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - filter - max_parts + - validTxnList """ - self.send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts) + self.send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts, validTxnList) return self.recv_get_part_specs_by_filter() - def send_get_part_specs_by_filter(self, db_name, tbl_name, filter, max_parts): + def send_get_part_specs_by_filter(self, db_name, tbl_name, filter, max_parts, validTxnList): self._oprot.writeMessageBegin('get_part_specs_by_filter', TMessageType.CALL, self._seqid) args = get_part_specs_by_filter_args() args.db_name = db_name args.tbl_name = tbl_name args.filter = filter args.max_parts = max_parts + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4879,22 +4934,24 @@ def recv_get_partitions_by_expr(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_expr failed: unknown result") - def get_num_partitions_by_filter(self, db_name, tbl_name, filter): + def get_num_partitions_by_filter(self, db_name, tbl_name, filter, validTxnList): """ Parameters: - db_name - tbl_name - filter + - validTxnList """ - self.send_get_num_partitions_by_filter(db_name, tbl_name, filter) + self.send_get_num_partitions_by_filter(db_name, tbl_name, filter, validTxnList) return self.recv_get_num_partitions_by_filter() - def send_get_num_partitions_by_filter(self, db_name, tbl_name, filter): + def send_get_num_partitions_by_filter(self, db_name, tbl_name, filter, validTxnList): self._oprot.writeMessageBegin('get_num_partitions_by_filter', TMessageType.CALL, self._seqid) args = get_num_partitions_by_filter_args() args.db_name = db_name args.tbl_name = tbl_name args.filter = filter + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4918,22 +4975,24 @@ def recv_get_num_partitions_by_filter(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_num_partitions_by_filter failed: unknown result") - def get_partitions_by_names(self, db_name, tbl_name, names): + def get_partitions_by_names(self, db_name, tbl_name, names, validTxnList): """ Parameters: - db_name - tbl_name - names + - validTxnList """ - self.send_get_partitions_by_names(db_name, tbl_name, names) + self.send_get_partitions_by_names(db_name, tbl_name, names, validTxnList) return self.recv_get_partitions_by_names() - def send_get_partitions_by_names(self, db_name, tbl_name, names): + def send_get_partitions_by_names(self, db_name, tbl_name, names, validTxnList): self._oprot.writeMessageBegin('get_partitions_by_names', TMessageType.CALL, self._seqid) args = get_partitions_by_names_args() args.db_name = db_name args.tbl_name = tbl_name args.names = names + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -5851,22 +5910,24 @@ def recv_update_partition_column_statistics_req(self): raise result.o4 raise TApplicationException(TApplicationException.MISSING_RESULT, "update_partition_column_statistics_req failed: unknown result") - def get_table_column_statistics(self, db_name, tbl_name, col_name): + def get_table_column_statistics(self, db_name, tbl_name, col_name, validWriteIdList): """ Parameters: - db_name - tbl_name - col_name + - validWriteIdList """ - self.send_get_table_column_statistics(db_name, tbl_name, col_name) + self.send_get_table_column_statistics(db_name, tbl_name, col_name, validWriteIdList) return self.recv_get_table_column_statistics() - def send_get_table_column_statistics(self, db_name, tbl_name, col_name): + def send_get_table_column_statistics(self, db_name, tbl_name, col_name, validWriteIdList): self._oprot.writeMessageBegin('get_table_column_statistics', TMessageType.CALL, self._seqid) args = get_table_column_statistics_args() args.db_name = db_name args.tbl_name = tbl_name args.col_name = col_name + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -5894,24 +5955,26 @@ def recv_get_table_column_statistics(self): raise result.o4 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_column_statistics failed: unknown result") - def get_partition_column_statistics(self, db_name, tbl_name, part_name, col_name): + def get_partition_column_statistics(self, db_name, tbl_name, part_name, col_name, validWriteIdList): """ Parameters: - db_name - tbl_name - part_name - col_name + - validWriteIdList """ - self.send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name) + self.send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name, validWriteIdList) return self.recv_get_partition_column_statistics() - def send_get_partition_column_statistics(self, db_name, tbl_name, part_name, col_name): + def send_get_partition_column_statistics(self, db_name, tbl_name, part_name, col_name, validWriteIdList): self._oprot.writeMessageBegin('get_partition_column_statistics', TMessageType.CALL, self._seqid) args = get_partition_column_statistics_args() args.db_name = db_name args.tbl_name = tbl_name args.part_name = part_name args.col_name = col_name + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -10441,7 +10504,7 @@ def process_get_fields(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_fields_result() try: - result.success = self._handler.get_fields(args.db_name, args.table_name) + result.success = self._handler.get_fields(args.db_name, args.table_name, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -10469,7 +10532,7 @@ def process_get_fields_with_environment_context(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_fields_with_environment_context_result() try: - result.success = self._handler.get_fields_with_environment_context(args.db_name, args.table_name, args.environment_context) + result.success = self._handler.get_fields_with_environment_context(args.db_name, args.table_name, args.environment_context, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -10497,7 +10560,7 @@ def process_get_schema(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_schema_result() try: - result.success = self._handler.get_schema(args.db_name, args.table_name) + result.success = self._handler.get_schema(args.db_name, args.table_name, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -10525,7 +10588,7 @@ def process_get_schema_with_environment_context(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_schema_with_environment_context_result() try: - result.success = self._handler.get_schema_with_environment_context(args.db_name, args.table_name, args.environment_context) + result.success = self._handler.get_schema_with_environment_context(args.db_name, args.table_name, args.environment_context, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11078,7 +11141,7 @@ def process_get_table(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_table_result() try: - result.success = self._handler.get_table(args.dbname, args.tbl_name) + result.success = self._handler.get_table(args.dbname, args.tbl_name, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11758,7 +11821,7 @@ def process_get_partition(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partition_result() try: - result.success = self._handler.get_partition(args.db_name, args.tbl_name, args.part_vals) + result.success = self._handler.get_partition(args.db_name, args.tbl_name, args.part_vals, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11845,7 +11908,7 @@ def process_get_partition_with_auth(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partition_with_auth_result() try: - result.success = self._handler.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names) + result.success = self._handler.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11870,7 +11933,7 @@ def process_get_partition_by_name(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partition_by_name_result() try: - result.success = self._handler.get_partition_by_name(args.db_name, args.tbl_name, args.part_name) + result.success = self._handler.get_partition_by_name(args.db_name, args.tbl_name, args.part_name, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11895,7 +11958,7 @@ def process_get_partitions(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partitions_result() try: - result.success = self._handler.get_partitions(args.db_name, args.tbl_name, args.max_parts) + result.success = self._handler.get_partitions(args.db_name, args.tbl_name, args.max_parts, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11920,7 +11983,7 @@ def process_get_partitions_with_auth(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partitions_with_auth_result() try: - result.success = self._handler.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names) + result.success = self._handler.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11945,7 +12008,7 @@ def process_get_partitions_pspec(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partitions_pspec_result() try: - result.success = self._handler.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts) + result.success = self._handler.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11970,7 +12033,7 @@ def process_get_partition_names(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partition_names_result() try: - result.success = self._handler.get_partition_names(args.db_name, args.tbl_name, args.max_parts) + result.success = self._handler.get_partition_names(args.db_name, args.tbl_name, args.max_parts, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -12020,7 +12083,7 @@ def process_get_partitions_ps(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partitions_ps_result() try: - result.success = self._handler.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts) + result.success = self._handler.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -12045,7 +12108,7 @@ def process_get_partitions_ps_with_auth(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partitions_ps_with_auth_result() try: - result.success = self._handler.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names) + result.success = self._handler.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -12070,7 +12133,7 @@ def process_get_partition_names_ps(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partition_names_ps_result() try: - result.success = self._handler.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts) + result.success = self._handler.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -12095,7 +12158,7 @@ def process_get_partitions_by_filter(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partitions_by_filter_result() try: - result.success = self._handler.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts) + result.success = self._handler.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -12120,7 +12183,7 @@ def process_get_part_specs_by_filter(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_part_specs_by_filter_result() try: - result.success = self._handler.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts) + result.success = self._handler.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -12170,7 +12233,7 @@ def process_get_num_partitions_by_filter(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_num_partitions_by_filter_result() try: - result.success = self._handler.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter) + result.success = self._handler.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -12195,7 +12258,7 @@ def process_get_partitions_by_names(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partitions_by_names_result() try: - result.success = self._handler.get_partitions_by_names(args.db_name, args.tbl_name, args.names) + result.success = self._handler.get_partitions_by_names(args.db_name, args.tbl_name, args.names, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -12856,7 +12919,7 @@ def process_get_table_column_statistics(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_table_column_statistics_result() try: - result.success = self._handler.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name) + result.success = self._handler.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -12887,7 +12950,7 @@ def process_get_partition_column_statistics(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partition_column_statistics_result() try: - result.success = self._handler.get_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name) + result.success = self._handler.get_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -17214,10 +17277,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1022, _size1019) = iprot.readListBegin() - for _i1023 in xrange(_size1019): - _elem1024 = iprot.readString() - self.success.append(_elem1024) + (_etype1029, _size1026) = iprot.readListBegin() + for _i1030 in xrange(_size1026): + _elem1031 = iprot.readString() + self.success.append(_elem1031) iprot.readListEnd() else: iprot.skip(ftype) @@ -17240,8 +17303,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1025 in self.success: - oprot.writeString(iter1025) + for iter1032 in self.success: + oprot.writeString(iter1032) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17346,10 +17409,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1029, _size1026) = iprot.readListBegin() - for _i1030 in xrange(_size1026): - _elem1031 = iprot.readString() - self.success.append(_elem1031) + (_etype1036, _size1033) = iprot.readListBegin() + for _i1037 in xrange(_size1033): + _elem1038 = iprot.readString() + self.success.append(_elem1038) iprot.readListEnd() else: iprot.skip(ftype) @@ -17372,8 +17435,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1032 in self.success: - oprot.writeString(iter1032) + for iter1039 in self.success: + oprot.writeString(iter1039) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18143,12 +18206,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1034, _vtype1035, _size1033 ) = iprot.readMapBegin() - for _i1037 in xrange(_size1033): - _key1038 = iprot.readString() - _val1039 = Type() - _val1039.read(iprot) - self.success[_key1038] = _val1039 + (_ktype1041, _vtype1042, _size1040 ) = iprot.readMapBegin() + for _i1044 in xrange(_size1040): + _key1045 = iprot.readString() + _val1046 = Type() + _val1046.read(iprot) + self.success[_key1045] = _val1046 iprot.readMapEnd() else: iprot.skip(ftype) @@ -18171,9 +18234,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter1040,viter1041 in self.success.items(): - oprot.writeString(kiter1040) - viter1041.write(oprot) + for kiter1047,viter1048 in self.success.items(): + oprot.writeString(kiter1047) + viter1048.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -18209,17 +18272,20 @@ class get_fields_args: Attributes: - db_name - table_name + - validWriteIdList """ thrift_spec = ( None, # 0 (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'table_name', None, None, ), # 2 + (3, TType.STRING, 'validWriteIdList', None, None, ), # 3 ) - def __init__(self, db_name=None, table_name=None,): + def __init__(self, db_name=None, table_name=None, validWriteIdList=None,): self.db_name = db_name self.table_name = table_name + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -18240,6 +18306,11 @@ def read(self, iprot): self.table_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -18258,6 +18329,10 @@ def write(self, oprot): oprot.writeFieldBegin('table_name', TType.STRING, 2) oprot.writeString(self.table_name) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 3) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -18269,6 +18344,7 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.table_name) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -18316,11 +18392,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1045, _size1042) = iprot.readListBegin() - for _i1046 in xrange(_size1042): - _elem1047 = FieldSchema() - _elem1047.read(iprot) - self.success.append(_elem1047) + (_etype1052, _size1049) = iprot.readListBegin() + for _i1053 in xrange(_size1049): + _elem1054 = FieldSchema() + _elem1054.read(iprot) + self.success.append(_elem1054) iprot.readListEnd() else: iprot.skip(ftype) @@ -18355,8 +18431,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1048 in self.success: - iter1048.write(oprot) + for iter1055 in self.success: + iter1055.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18403,6 +18479,7 @@ class get_fields_with_environment_context_args: - db_name - table_name - environment_context + - validWriteIdList """ thrift_spec = ( @@ -18410,12 +18487,14 @@ class get_fields_with_environment_context_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'table_name', None, None, ), # 2 (3, TType.STRUCT, 'environment_context', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 3 + (4, TType.STRING, 'validWriteIdList', None, None, ), # 4 ) - def __init__(self, db_name=None, table_name=None, environment_context=None,): + def __init__(self, db_name=None, table_name=None, environment_context=None, validWriteIdList=None,): self.db_name = db_name self.table_name = table_name self.environment_context = environment_context + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -18442,6 +18521,11 @@ def read(self, iprot): self.environment_context.read(iprot) else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -18464,6 +18548,10 @@ def write(self, oprot): oprot.writeFieldBegin('environment_context', TType.STRUCT, 3) self.environment_context.write(oprot) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -18476,6 +18564,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.table_name) value = (value * 31) ^ hash(self.environment_context) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -18523,11 +18612,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1052, _size1049) = iprot.readListBegin() - for _i1053 in xrange(_size1049): - _elem1054 = FieldSchema() - _elem1054.read(iprot) - self.success.append(_elem1054) + (_etype1059, _size1056) = iprot.readListBegin() + for _i1060 in xrange(_size1056): + _elem1061 = FieldSchema() + _elem1061.read(iprot) + self.success.append(_elem1061) iprot.readListEnd() else: iprot.skip(ftype) @@ -18562,8 +18651,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1055 in self.success: - iter1055.write(oprot) + for iter1062 in self.success: + iter1062.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18609,17 +18698,20 @@ class get_schema_args: Attributes: - db_name - table_name + - validWriteIdList """ thrift_spec = ( None, # 0 (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'table_name', None, None, ), # 2 + (3, TType.STRING, 'validWriteIdList', None, None, ), # 3 ) - def __init__(self, db_name=None, table_name=None,): + def __init__(self, db_name=None, table_name=None, validWriteIdList=None,): self.db_name = db_name self.table_name = table_name + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -18640,6 +18732,11 @@ def read(self, iprot): self.table_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -18658,6 +18755,10 @@ def write(self, oprot): oprot.writeFieldBegin('table_name', TType.STRING, 2) oprot.writeString(self.table_name) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 3) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -18669,6 +18770,7 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.table_name) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -18716,11 +18818,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1059, _size1056) = iprot.readListBegin() - for _i1060 in xrange(_size1056): - _elem1061 = FieldSchema() - _elem1061.read(iprot) - self.success.append(_elem1061) + (_etype1066, _size1063) = iprot.readListBegin() + for _i1067 in xrange(_size1063): + _elem1068 = FieldSchema() + _elem1068.read(iprot) + self.success.append(_elem1068) iprot.readListEnd() else: iprot.skip(ftype) @@ -18755,8 +18857,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1062 in self.success: - iter1062.write(oprot) + for iter1069 in self.success: + iter1069.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18803,6 +18905,7 @@ class get_schema_with_environment_context_args: - db_name - table_name - environment_context + - validWriteIdList """ thrift_spec = ( @@ -18810,12 +18913,14 @@ class get_schema_with_environment_context_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'table_name', None, None, ), # 2 (3, TType.STRUCT, 'environment_context', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 3 + (4, TType.STRING, 'validWriteIdList', None, None, ), # 4 ) - def __init__(self, db_name=None, table_name=None, environment_context=None,): + def __init__(self, db_name=None, table_name=None, environment_context=None, validWriteIdList=None,): self.db_name = db_name self.table_name = table_name self.environment_context = environment_context + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -18842,6 +18947,11 @@ def read(self, iprot): self.environment_context.read(iprot) else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -18864,6 +18974,10 @@ def write(self, oprot): oprot.writeFieldBegin('environment_context', TType.STRUCT, 3) self.environment_context.write(oprot) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -18876,6 +18990,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.table_name) value = (value * 31) ^ hash(self.environment_context) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -18923,11 +19038,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1066, _size1063) = iprot.readListBegin() - for _i1067 in xrange(_size1063): - _elem1068 = FieldSchema() - _elem1068.read(iprot) - self.success.append(_elem1068) + (_etype1073, _size1070) = iprot.readListBegin() + for _i1074 in xrange(_size1070): + _elem1075 = FieldSchema() + _elem1075.read(iprot) + self.success.append(_elem1075) iprot.readListEnd() else: iprot.skip(ftype) @@ -18962,8 +19077,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1069 in self.success: - iter1069.write(oprot) + for iter1076 in self.success: + iter1076.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19416,66 +19531,66 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.primaryKeys = [] - (_etype1073, _size1070) = iprot.readListBegin() - for _i1074 in xrange(_size1070): - _elem1075 = SQLPrimaryKey() - _elem1075.read(iprot) - self.primaryKeys.append(_elem1075) + (_etype1080, _size1077) = iprot.readListBegin() + for _i1081 in xrange(_size1077): + _elem1082 = SQLPrimaryKey() + _elem1082.read(iprot) + self.primaryKeys.append(_elem1082) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.foreignKeys = [] - (_etype1079, _size1076) = iprot.readListBegin() - for _i1080 in xrange(_size1076): - _elem1081 = SQLForeignKey() - _elem1081.read(iprot) - self.foreignKeys.append(_elem1081) + (_etype1086, _size1083) = iprot.readListBegin() + for _i1087 in xrange(_size1083): + _elem1088 = SQLForeignKey() + _elem1088.read(iprot) + self.foreignKeys.append(_elem1088) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.uniqueConstraints = [] - (_etype1085, _size1082) = iprot.readListBegin() - for _i1086 in xrange(_size1082): - _elem1087 = SQLUniqueConstraint() - _elem1087.read(iprot) - self.uniqueConstraints.append(_elem1087) + (_etype1092, _size1089) = iprot.readListBegin() + for _i1093 in xrange(_size1089): + _elem1094 = SQLUniqueConstraint() + _elem1094.read(iprot) + self.uniqueConstraints.append(_elem1094) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.notNullConstraints = [] - (_etype1091, _size1088) = iprot.readListBegin() - for _i1092 in xrange(_size1088): - _elem1093 = SQLNotNullConstraint() - _elem1093.read(iprot) - self.notNullConstraints.append(_elem1093) + (_etype1098, _size1095) = iprot.readListBegin() + for _i1099 in xrange(_size1095): + _elem1100 = SQLNotNullConstraint() + _elem1100.read(iprot) + self.notNullConstraints.append(_elem1100) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.LIST: self.defaultConstraints = [] - (_etype1097, _size1094) = iprot.readListBegin() - for _i1098 in xrange(_size1094): - _elem1099 = SQLDefaultConstraint() - _elem1099.read(iprot) - self.defaultConstraints.append(_elem1099) + (_etype1104, _size1101) = iprot.readListBegin() + for _i1105 in xrange(_size1101): + _elem1106 = SQLDefaultConstraint() + _elem1106.read(iprot) + self.defaultConstraints.append(_elem1106) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.LIST: self.checkConstraints = [] - (_etype1103, _size1100) = iprot.readListBegin() - for _i1104 in xrange(_size1100): - _elem1105 = SQLCheckConstraint() - _elem1105.read(iprot) - self.checkConstraints.append(_elem1105) + (_etype1110, _size1107) = iprot.readListBegin() + for _i1111 in xrange(_size1107): + _elem1112 = SQLCheckConstraint() + _elem1112.read(iprot) + self.checkConstraints.append(_elem1112) iprot.readListEnd() else: iprot.skip(ftype) @@ -19496,43 +19611,43 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter1106 in self.primaryKeys: - iter1106.write(oprot) + for iter1113 in self.primaryKeys: + iter1113.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter1107 in self.foreignKeys: - iter1107.write(oprot) + for iter1114 in self.foreignKeys: + iter1114.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.uniqueConstraints is not None: oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) - for iter1108 in self.uniqueConstraints: - iter1108.write(oprot) + for iter1115 in self.uniqueConstraints: + iter1115.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.notNullConstraints is not None: oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) - for iter1109 in self.notNullConstraints: - iter1109.write(oprot) + for iter1116 in self.notNullConstraints: + iter1116.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.defaultConstraints is not None: oprot.writeFieldBegin('defaultConstraints', TType.LIST, 6) oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) - for iter1110 in self.defaultConstraints: - iter1110.write(oprot) + for iter1117 in self.defaultConstraints: + iter1117.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.checkConstraints is not None: oprot.writeFieldBegin('checkConstraints', TType.LIST, 7) oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints)) - for iter1111 in self.checkConstraints: - iter1111.write(oprot) + for iter1118 in self.checkConstraints: + iter1118.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21266,10 +21381,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.partNames = [] - (_etype1115, _size1112) = iprot.readListBegin() - for _i1116 in xrange(_size1112): - _elem1117 = iprot.readString() - self.partNames.append(_elem1117) + (_etype1122, _size1119) = iprot.readListBegin() + for _i1123 in xrange(_size1119): + _elem1124 = iprot.readString() + self.partNames.append(_elem1124) iprot.readListEnd() else: iprot.skip(ftype) @@ -21294,8 +21409,8 @@ def write(self, oprot): if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter1118 in self.partNames: - oprot.writeString(iter1118) + for iter1125 in self.partNames: + oprot.writeString(iter1125) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21640,10 +21755,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1122, _size1119) = iprot.readListBegin() - for _i1123 in xrange(_size1119): - _elem1124 = iprot.readString() - self.success.append(_elem1124) + (_etype1129, _size1126) = iprot.readListBegin() + for _i1130 in xrange(_size1126): + _elem1131 = iprot.readString() + self.success.append(_elem1131) iprot.readListEnd() else: iprot.skip(ftype) @@ -21666,8 +21781,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1125 in self.success: - oprot.writeString(iter1125) + for iter1132 in self.success: + oprot.writeString(iter1132) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21817,10 +21932,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1129, _size1126) = iprot.readListBegin() - for _i1130 in xrange(_size1126): - _elem1131 = iprot.readString() - self.success.append(_elem1131) + (_etype1136, _size1133) = iprot.readListBegin() + for _i1137 in xrange(_size1133): + _elem1138 = iprot.readString() + self.success.append(_elem1138) iprot.readListEnd() else: iprot.skip(ftype) @@ -21843,8 +21958,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1132 in self.success: - oprot.writeString(iter1132) + for iter1139 in self.success: + oprot.writeString(iter1139) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21949,11 +22064,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1136, _size1133) = iprot.readListBegin() - for _i1137 in xrange(_size1133): - _elem1138 = Table() - _elem1138.read(iprot) - self.success.append(_elem1138) + (_etype1143, _size1140) = iprot.readListBegin() + for _i1144 in xrange(_size1140): + _elem1145 = Table() + _elem1145.read(iprot) + self.success.append(_elem1145) iprot.readListEnd() else: iprot.skip(ftype) @@ -21976,8 +22091,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1139 in self.success: - iter1139.write(oprot) + for iter1146 in self.success: + iter1146.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22101,10 +22216,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1143, _size1140) = iprot.readListBegin() - for _i1144 in xrange(_size1140): - _elem1145 = iprot.readString() - self.success.append(_elem1145) + (_etype1150, _size1147) = iprot.readListBegin() + for _i1151 in xrange(_size1147): + _elem1152 = iprot.readString() + self.success.append(_elem1152) iprot.readListEnd() else: iprot.skip(ftype) @@ -22127,8 +22242,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1146 in self.success: - oprot.writeString(iter1146) + for iter1153 in self.success: + oprot.writeString(iter1153) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22201,10 +22316,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.tbl_types = [] - (_etype1150, _size1147) = iprot.readListBegin() - for _i1151 in xrange(_size1147): - _elem1152 = iprot.readString() - self.tbl_types.append(_elem1152) + (_etype1157, _size1154) = iprot.readListBegin() + for _i1158 in xrange(_size1154): + _elem1159 = iprot.readString() + self.tbl_types.append(_elem1159) iprot.readListEnd() else: iprot.skip(ftype) @@ -22229,8 +22344,8 @@ def write(self, oprot): if self.tbl_types is not None: oprot.writeFieldBegin('tbl_types', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.tbl_types)) - for iter1153 in self.tbl_types: - oprot.writeString(iter1153) + for iter1160 in self.tbl_types: + oprot.writeString(iter1160) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22286,11 +22401,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1157, _size1154) = iprot.readListBegin() - for _i1158 in xrange(_size1154): - _elem1159 = TableMeta() - _elem1159.read(iprot) - self.success.append(_elem1159) + (_etype1164, _size1161) = iprot.readListBegin() + for _i1165 in xrange(_size1161): + _elem1166 = TableMeta() + _elem1166.read(iprot) + self.success.append(_elem1166) iprot.readListEnd() else: iprot.skip(ftype) @@ -22313,8 +22428,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1160 in self.success: - iter1160.write(oprot) + for iter1167 in self.success: + iter1167.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22438,10 +22553,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1164, _size1161) = iprot.readListBegin() - for _i1165 in xrange(_size1161): - _elem1166 = iprot.readString() - self.success.append(_elem1166) + (_etype1171, _size1168) = iprot.readListBegin() + for _i1172 in xrange(_size1168): + _elem1173 = iprot.readString() + self.success.append(_elem1173) iprot.readListEnd() else: iprot.skip(ftype) @@ -22464,8 +22579,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1167 in self.success: - oprot.writeString(iter1167) + for iter1174 in self.success: + oprot.writeString(iter1174) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22501,17 +22616,20 @@ class get_table_args: Attributes: - dbname - tbl_name + - validWriteIdList """ thrift_spec = ( None, # 0 (1, TType.STRING, 'dbname', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 + (3, TType.STRING, 'validWriteIdList', None, None, ), # 3 ) - def __init__(self, dbname=None, tbl_name=None,): + def __init__(self, dbname=None, tbl_name=None, validWriteIdList=None,): self.dbname = dbname self.tbl_name = tbl_name + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -22532,6 +22650,11 @@ def read(self, iprot): self.tbl_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -22550,6 +22673,10 @@ def write(self, oprot): oprot.writeFieldBegin('tbl_name', TType.STRING, 2) oprot.writeString(self.tbl_name) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 3) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -22561,6 +22688,7 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.dbname) value = (value * 31) ^ hash(self.tbl_name) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -22701,10 +22829,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype1171, _size1168) = iprot.readListBegin() - for _i1172 in xrange(_size1168): - _elem1173 = iprot.readString() - self.tbl_names.append(_elem1173) + (_etype1178, _size1175) = iprot.readListBegin() + for _i1179 in xrange(_size1175): + _elem1180 = iprot.readString() + self.tbl_names.append(_elem1180) iprot.readListEnd() else: iprot.skip(ftype) @@ -22725,8 +22853,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter1174 in self.tbl_names: - oprot.writeString(iter1174) + for iter1181 in self.tbl_names: + oprot.writeString(iter1181) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22778,11 +22906,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1178, _size1175) = iprot.readListBegin() - for _i1179 in xrange(_size1175): - _elem1180 = Table() - _elem1180.read(iprot) - self.success.append(_elem1180) + (_etype1185, _size1182) = iprot.readListBegin() + for _i1186 in xrange(_size1182): + _elem1187 = Table() + _elem1187.read(iprot) + self.success.append(_elem1187) iprot.readListEnd() else: iprot.skip(ftype) @@ -22799,8 +22927,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1181 in self.success: - iter1181.write(oprot) + for iter1188 in self.success: + iter1188.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22920,11 +23048,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1185, _size1182) = iprot.readListBegin() - for _i1186 in xrange(_size1182): - _elem1187 = ExtendedTableInfo() - _elem1187.read(iprot) - self.success.append(_elem1187) + (_etype1192, _size1189) = iprot.readListBegin() + for _i1193 in xrange(_size1189): + _elem1194 = ExtendedTableInfo() + _elem1194.read(iprot) + self.success.append(_elem1194) iprot.readListEnd() else: iprot.skip(ftype) @@ -22947,8 +23075,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1188 in self.success: - iter1188.write(oprot) + for iter1195 in self.success: + iter1195.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23821,10 +23949,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1192, _size1189) = iprot.readListBegin() - for _i1193 in xrange(_size1189): - _elem1194 = iprot.readString() - self.success.append(_elem1194) + (_etype1199, _size1196) = iprot.readListBegin() + for _i1200 in xrange(_size1196): + _elem1201 = iprot.readString() + self.success.append(_elem1201) iprot.readListEnd() else: iprot.skip(ftype) @@ -23859,8 +23987,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1195 in self.success: - oprot.writeString(iter1195) + for iter1202 in self.success: + oprot.writeString(iter1202) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24989,11 +25117,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype1199, _size1196) = iprot.readListBegin() - for _i1200 in xrange(_size1196): - _elem1201 = Partition() - _elem1201.read(iprot) - self.new_parts.append(_elem1201) + (_etype1206, _size1203) = iprot.readListBegin() + for _i1207 in xrange(_size1203): + _elem1208 = Partition() + _elem1208.read(iprot) + self.new_parts.append(_elem1208) iprot.readListEnd() else: iprot.skip(ftype) @@ -25010,8 +25138,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1202 in self.new_parts: - iter1202.write(oprot) + for iter1209 in self.new_parts: + iter1209.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -25169,11 +25297,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype1206, _size1203) = iprot.readListBegin() - for _i1207 in xrange(_size1203): - _elem1208 = PartitionSpec() - _elem1208.read(iprot) - self.new_parts.append(_elem1208) + (_etype1213, _size1210) = iprot.readListBegin() + for _i1214 in xrange(_size1210): + _elem1215 = PartitionSpec() + _elem1215.read(iprot) + self.new_parts.append(_elem1215) iprot.readListEnd() else: iprot.skip(ftype) @@ -25190,8 +25318,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1209 in self.new_parts: - iter1209.write(oprot) + for iter1216 in self.new_parts: + iter1216.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -25365,10 +25493,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1213, _size1210) = iprot.readListBegin() - for _i1214 in xrange(_size1210): - _elem1215 = iprot.readString() - self.part_vals.append(_elem1215) + (_etype1220, _size1217) = iprot.readListBegin() + for _i1221 in xrange(_size1217): + _elem1222 = iprot.readString() + self.part_vals.append(_elem1222) iprot.readListEnd() else: iprot.skip(ftype) @@ -25393,8 +25521,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1216 in self.part_vals: - oprot.writeString(iter1216) + for iter1223 in self.part_vals: + oprot.writeString(iter1223) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -25747,10 +25875,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1220, _size1217) = iprot.readListBegin() - for _i1221 in xrange(_size1217): - _elem1222 = iprot.readString() - self.part_vals.append(_elem1222) + (_etype1227, _size1224) = iprot.readListBegin() + for _i1228 in xrange(_size1224): + _elem1229 = iprot.readString() + self.part_vals.append(_elem1229) iprot.readListEnd() else: iprot.skip(ftype) @@ -25781,8 +25909,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1223 in self.part_vals: - oprot.writeString(iter1223) + for iter1230 in self.part_vals: + oprot.writeString(iter1230) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -26377,10 +26505,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1227, _size1224) = iprot.readListBegin() - for _i1228 in xrange(_size1224): - _elem1229 = iprot.readString() - self.part_vals.append(_elem1229) + (_etype1234, _size1231) = iprot.readListBegin() + for _i1235 in xrange(_size1231): + _elem1236 = iprot.readString() + self.part_vals.append(_elem1236) iprot.readListEnd() else: iprot.skip(ftype) @@ -26410,8 +26538,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1230 in self.part_vals: - oprot.writeString(iter1230) + for iter1237 in self.part_vals: + oprot.writeString(iter1237) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -26584,10 +26712,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1234, _size1231) = iprot.readListBegin() - for _i1235 in xrange(_size1231): - _elem1236 = iprot.readString() - self.part_vals.append(_elem1236) + (_etype1241, _size1238) = iprot.readListBegin() + for _i1242 in xrange(_size1238): + _elem1243 = iprot.readString() + self.part_vals.append(_elem1243) iprot.readListEnd() else: iprot.skip(ftype) @@ -26623,8 +26751,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1237 in self.part_vals: - oprot.writeString(iter1237) + for iter1244 in self.part_vals: + oprot.writeString(iter1244) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -27325,6 +27453,7 @@ class get_partition_args: - db_name - tbl_name - part_vals + - validTxnList """ thrift_spec = ( @@ -27332,12 +27461,14 @@ class get_partition_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.LIST, 'part_vals', (TType.STRING,None), None, ), # 3 + (4, TType.STRING, 'validTxnList', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, part_vals=None,): + def __init__(self, db_name=None, tbl_name=None, part_vals=None, validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_vals = part_vals + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -27361,13 +27492,18 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1241, _size1238) = iprot.readListBegin() - for _i1242 in xrange(_size1238): - _elem1243 = iprot.readString() - self.part_vals.append(_elem1243) + (_etype1248, _size1245) = iprot.readListBegin() + for _i1249 in xrange(_size1245): + _elem1250 = iprot.readString() + self.part_vals.append(_elem1250) iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -27389,10 +27525,14 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1244 in self.part_vals: - oprot.writeString(iter1244) + for iter1251 in self.part_vals: + oprot.writeString(iter1251) oprot.writeListEnd() oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 4) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -27405,6 +27545,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.part_vals) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -27549,11 +27690,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1246, _vtype1247, _size1245 ) = iprot.readMapBegin() - for _i1249 in xrange(_size1245): - _key1250 = iprot.readString() - _val1251 = iprot.readString() - self.partitionSpecs[_key1250] = _val1251 + (_ktype1253, _vtype1254, _size1252 ) = iprot.readMapBegin() + for _i1256 in xrange(_size1252): + _key1257 = iprot.readString() + _val1258 = iprot.readString() + self.partitionSpecs[_key1257] = _val1258 iprot.readMapEnd() else: iprot.skip(ftype) @@ -27590,9 +27731,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1252,viter1253 in self.partitionSpecs.items(): - oprot.writeString(kiter1252) - oprot.writeString(viter1253) + for kiter1259,viter1260 in self.partitionSpecs.items(): + oprot.writeString(kiter1259) + oprot.writeString(viter1260) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -27797,11 +27938,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1255, _vtype1256, _size1254 ) = iprot.readMapBegin() - for _i1258 in xrange(_size1254): - _key1259 = iprot.readString() - _val1260 = iprot.readString() - self.partitionSpecs[_key1259] = _val1260 + (_ktype1262, _vtype1263, _size1261 ) = iprot.readMapBegin() + for _i1265 in xrange(_size1261): + _key1266 = iprot.readString() + _val1267 = iprot.readString() + self.partitionSpecs[_key1266] = _val1267 iprot.readMapEnd() else: iprot.skip(ftype) @@ -27838,9 +27979,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1261,viter1262 in self.partitionSpecs.items(): - oprot.writeString(kiter1261) - oprot.writeString(viter1262) + for kiter1268,viter1269 in self.partitionSpecs.items(): + oprot.writeString(kiter1268) + oprot.writeString(viter1269) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -27923,11 +28064,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1266, _size1263) = iprot.readListBegin() - for _i1267 in xrange(_size1263): - _elem1268 = Partition() - _elem1268.read(iprot) - self.success.append(_elem1268) + (_etype1273, _size1270) = iprot.readListBegin() + for _i1274 in xrange(_size1270): + _elem1275 = Partition() + _elem1275.read(iprot) + self.success.append(_elem1275) iprot.readListEnd() else: iprot.skip(ftype) @@ -27968,8 +28109,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1269 in self.success: - iter1269.write(oprot) + for iter1276 in self.success: + iter1276.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28023,6 +28164,7 @@ class get_partition_with_auth_args: - part_vals - user_name - group_names + - validTxnList """ thrift_spec = ( @@ -28032,14 +28174,16 @@ class get_partition_with_auth_args: (3, TType.LIST, 'part_vals', (TType.STRING,None), None, ), # 3 (4, TType.STRING, 'user_name', None, None, ), # 4 (5, TType.LIST, 'group_names', (TType.STRING,None), None, ), # 5 + (6, TType.STRING, 'validTxnList', None, None, ), # 6 ) - def __init__(self, db_name=None, tbl_name=None, part_vals=None, user_name=None, group_names=None,): + def __init__(self, db_name=None, tbl_name=None, part_vals=None, user_name=None, group_names=None, validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_vals = part_vals self.user_name = user_name self.group_names = group_names + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -28063,10 +28207,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1273, _size1270) = iprot.readListBegin() - for _i1274 in xrange(_size1270): - _elem1275 = iprot.readString() - self.part_vals.append(_elem1275) + (_etype1280, _size1277) = iprot.readListBegin() + for _i1281 in xrange(_size1277): + _elem1282 = iprot.readString() + self.part_vals.append(_elem1282) iprot.readListEnd() else: iprot.skip(ftype) @@ -28078,13 +28222,18 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1279, _size1276) = iprot.readListBegin() - for _i1280 in xrange(_size1276): - _elem1281 = iprot.readString() - self.group_names.append(_elem1281) + (_etype1286, _size1283) = iprot.readListBegin() + for _i1287 in xrange(_size1283): + _elem1288 = iprot.readString() + self.group_names.append(_elem1288) iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -28106,8 +28255,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1282 in self.part_vals: - oprot.writeString(iter1282) + for iter1289 in self.part_vals: + oprot.writeString(iter1289) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -28117,10 +28266,14 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1283 in self.group_names: - oprot.writeString(iter1283) + for iter1290 in self.group_names: + oprot.writeString(iter1290) oprot.writeListEnd() oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 6) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -28135,6 +28288,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.part_vals) value = (value * 31) ^ hash(self.user_name) value = (value * 31) ^ hash(self.group_names) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -28247,6 +28401,7 @@ class get_partition_by_name_args: - db_name - tbl_name - part_name + - validTxnList """ thrift_spec = ( @@ -28254,12 +28409,14 @@ class get_partition_by_name_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRING, 'part_name', None, None, ), # 3 + (4, TType.STRING, 'validTxnList', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, part_name=None,): + def __init__(self, db_name=None, tbl_name=None, part_name=None, validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_name = part_name + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -28285,6 +28442,11 @@ def read(self, iprot): self.part_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -28307,6 +28469,10 @@ def write(self, oprot): oprot.writeFieldBegin('part_name', TType.STRING, 3) oprot.writeString(self.part_name) oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 4) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -28319,6 +28485,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.part_name) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -28431,6 +28598,7 @@ class get_partitions_args: - db_name - tbl_name - max_parts + - validTxnList """ thrift_spec = ( @@ -28438,12 +28606,14 @@ class get_partitions_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.I16, 'max_parts', None, -1, ), # 3 + (4, TType.STRING, 'validTxnList', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, max_parts=thrift_spec[3][4],): + def __init__(self, db_name=None, tbl_name=None, max_parts=thrift_spec[3][4], validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.max_parts = max_parts + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -28469,6 +28639,11 @@ def read(self, iprot): self.max_parts = iprot.readI16() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -28491,6 +28666,10 @@ def write(self, oprot): oprot.writeFieldBegin('max_parts', TType.I16, 3) oprot.writeI16(self.max_parts) oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 4) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -28503,6 +28682,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.max_parts) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -28547,11 +28727,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1287, _size1284) = iprot.readListBegin() - for _i1288 in xrange(_size1284): - _elem1289 = Partition() - _elem1289.read(iprot) - self.success.append(_elem1289) + (_etype1294, _size1291) = iprot.readListBegin() + for _i1295 in xrange(_size1291): + _elem1296 = Partition() + _elem1296.read(iprot) + self.success.append(_elem1296) iprot.readListEnd() else: iprot.skip(ftype) @@ -28580,8 +28760,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1290 in self.success: - iter1290.write(oprot) + for iter1297 in self.success: + iter1297.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28625,6 +28805,7 @@ class get_partitions_with_auth_args: - max_parts - user_name - group_names + - validTxnList """ thrift_spec = ( @@ -28634,14 +28815,16 @@ class get_partitions_with_auth_args: (3, TType.I16, 'max_parts', None, -1, ), # 3 (4, TType.STRING, 'user_name', None, None, ), # 4 (5, TType.LIST, 'group_names', (TType.STRING,None), None, ), # 5 + (6, TType.STRING, 'validTxnList', None, None, ), # 6 ) - def __init__(self, db_name=None, tbl_name=None, max_parts=thrift_spec[3][4], user_name=None, group_names=None,): + def __init__(self, db_name=None, tbl_name=None, max_parts=thrift_spec[3][4], user_name=None, group_names=None, validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.max_parts = max_parts self.user_name = user_name self.group_names = group_names + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -28675,13 +28858,18 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1294, _size1291) = iprot.readListBegin() - for _i1295 in xrange(_size1291): - _elem1296 = iprot.readString() - self.group_names.append(_elem1296) + (_etype1301, _size1298) = iprot.readListBegin() + for _i1302 in xrange(_size1298): + _elem1303 = iprot.readString() + self.group_names.append(_elem1303) iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -28711,10 +28899,14 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1297 in self.group_names: - oprot.writeString(iter1297) + for iter1304 in self.group_names: + oprot.writeString(iter1304) oprot.writeListEnd() oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 6) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -28729,6 +28921,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.max_parts) value = (value * 31) ^ hash(self.user_name) value = (value * 31) ^ hash(self.group_names) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -28773,11 +28966,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1301, _size1298) = iprot.readListBegin() - for _i1302 in xrange(_size1298): - _elem1303 = Partition() - _elem1303.read(iprot) - self.success.append(_elem1303) + (_etype1308, _size1305) = iprot.readListBegin() + for _i1309 in xrange(_size1305): + _elem1310 = Partition() + _elem1310.read(iprot) + self.success.append(_elem1310) iprot.readListEnd() else: iprot.skip(ftype) @@ -28806,8 +28999,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1304 in self.success: - iter1304.write(oprot) + for iter1311 in self.success: + iter1311.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28849,6 +29042,7 @@ class get_partitions_pspec_args: - db_name - tbl_name - max_parts + - validTxnList """ thrift_spec = ( @@ -28856,12 +29050,14 @@ class get_partitions_pspec_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.I32, 'max_parts', None, -1, ), # 3 + (4, TType.STRING, 'validTxnList', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, max_parts=thrift_spec[3][4],): + def __init__(self, db_name=None, tbl_name=None, max_parts=thrift_spec[3][4], validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.max_parts = max_parts + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -28887,6 +29083,11 @@ def read(self, iprot): self.max_parts = iprot.readI32() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -28909,6 +29110,10 @@ def write(self, oprot): oprot.writeFieldBegin('max_parts', TType.I32, 3) oprot.writeI32(self.max_parts) oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 4) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -28921,6 +29126,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.max_parts) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -28965,11 +29171,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1308, _size1305) = iprot.readListBegin() - for _i1309 in xrange(_size1305): - _elem1310 = PartitionSpec() - _elem1310.read(iprot) - self.success.append(_elem1310) + (_etype1315, _size1312) = iprot.readListBegin() + for _i1316 in xrange(_size1312): + _elem1317 = PartitionSpec() + _elem1317.read(iprot) + self.success.append(_elem1317) iprot.readListEnd() else: iprot.skip(ftype) @@ -28998,8 +29204,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1311 in self.success: - iter1311.write(oprot) + for iter1318 in self.success: + iter1318.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29041,6 +29247,7 @@ class get_partition_names_args: - db_name - tbl_name - max_parts + - validTxnList """ thrift_spec = ( @@ -29048,12 +29255,14 @@ class get_partition_names_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.I16, 'max_parts', None, -1, ), # 3 + (4, TType.STRING, 'validTxnList', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, max_parts=thrift_spec[3][4],): + def __init__(self, db_name=None, tbl_name=None, max_parts=thrift_spec[3][4], validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.max_parts = max_parts + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -29079,6 +29288,11 @@ def read(self, iprot): self.max_parts = iprot.readI16() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -29101,6 +29315,10 @@ def write(self, oprot): oprot.writeFieldBegin('max_parts', TType.I16, 3) oprot.writeI16(self.max_parts) oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 4) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -29113,6 +29331,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.max_parts) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -29157,10 +29376,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1315, _size1312) = iprot.readListBegin() - for _i1316 in xrange(_size1312): - _elem1317 = iprot.readString() - self.success.append(_elem1317) + (_etype1322, _size1319) = iprot.readListBegin() + for _i1323 in xrange(_size1319): + _elem1324 = iprot.readString() + self.success.append(_elem1324) iprot.readListEnd() else: iprot.skip(ftype) @@ -29189,8 +29408,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1318 in self.success: - oprot.writeString(iter1318) + for iter1325 in self.success: + oprot.writeString(iter1325) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29392,6 +29611,7 @@ class get_partitions_ps_args: - tbl_name - part_vals - max_parts + - validTxnList """ thrift_spec = ( @@ -29400,13 +29620,15 @@ class get_partitions_ps_args: (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.LIST, 'part_vals', (TType.STRING,None), None, ), # 3 (4, TType.I16, 'max_parts', None, -1, ), # 4 + (5, TType.STRING, 'validTxnList', None, None, ), # 5 ) - def __init__(self, db_name=None, tbl_name=None, part_vals=None, max_parts=thrift_spec[4][4],): + def __init__(self, db_name=None, tbl_name=None, part_vals=None, max_parts=thrift_spec[4][4], validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_vals = part_vals self.max_parts = max_parts + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -29430,10 +29652,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1322, _size1319) = iprot.readListBegin() - for _i1323 in xrange(_size1319): - _elem1324 = iprot.readString() - self.part_vals.append(_elem1324) + (_etype1329, _size1326) = iprot.readListBegin() + for _i1330 in xrange(_size1326): + _elem1331 = iprot.readString() + self.part_vals.append(_elem1331) iprot.readListEnd() else: iprot.skip(ftype) @@ -29442,6 +29664,11 @@ def read(self, iprot): self.max_parts = iprot.readI16() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -29463,14 +29690,18 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1325 in self.part_vals: - oprot.writeString(iter1325) + for iter1332 in self.part_vals: + oprot.writeString(iter1332) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: oprot.writeFieldBegin('max_parts', TType.I16, 4) oprot.writeI16(self.max_parts) oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 5) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -29484,6 +29715,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.part_vals) value = (value * 31) ^ hash(self.max_parts) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -29528,11 +29760,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1329, _size1326) = iprot.readListBegin() - for _i1330 in xrange(_size1326): - _elem1331 = Partition() - _elem1331.read(iprot) - self.success.append(_elem1331) + (_etype1336, _size1333) = iprot.readListBegin() + for _i1337 in xrange(_size1333): + _elem1338 = Partition() + _elem1338.read(iprot) + self.success.append(_elem1338) iprot.readListEnd() else: iprot.skip(ftype) @@ -29561,8 +29793,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1332 in self.success: - iter1332.write(oprot) + for iter1339 in self.success: + iter1339.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29607,6 +29839,7 @@ class get_partitions_ps_with_auth_args: - max_parts - user_name - group_names + - validTxnList """ thrift_spec = ( @@ -29617,15 +29850,17 @@ class get_partitions_ps_with_auth_args: (4, TType.I16, 'max_parts', None, -1, ), # 4 (5, TType.STRING, 'user_name', None, None, ), # 5 (6, TType.LIST, 'group_names', (TType.STRING,None), None, ), # 6 + (7, TType.STRING, 'validTxnList', None, None, ), # 7 ) - def __init__(self, db_name=None, tbl_name=None, part_vals=None, max_parts=thrift_spec[4][4], user_name=None, group_names=None,): + def __init__(self, db_name=None, tbl_name=None, part_vals=None, max_parts=thrift_spec[4][4], user_name=None, group_names=None, validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_vals = part_vals self.max_parts = max_parts self.user_name = user_name self.group_names = group_names + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -29649,10 +29884,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1336, _size1333) = iprot.readListBegin() - for _i1337 in xrange(_size1333): - _elem1338 = iprot.readString() - self.part_vals.append(_elem1338) + (_etype1343, _size1340) = iprot.readListBegin() + for _i1344 in xrange(_size1340): + _elem1345 = iprot.readString() + self.part_vals.append(_elem1345) iprot.readListEnd() else: iprot.skip(ftype) @@ -29669,13 +29904,18 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype1342, _size1339) = iprot.readListBegin() - for _i1343 in xrange(_size1339): - _elem1344 = iprot.readString() - self.group_names.append(_elem1344) + (_etype1349, _size1346) = iprot.readListBegin() + for _i1350 in xrange(_size1346): + _elem1351 = iprot.readString() + self.group_names.append(_elem1351) iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -29697,8 +29937,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1345 in self.part_vals: - oprot.writeString(iter1345) + for iter1352 in self.part_vals: + oprot.writeString(iter1352) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -29712,10 +29952,14 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1346 in self.group_names: - oprot.writeString(iter1346) + for iter1353 in self.group_names: + oprot.writeString(iter1353) oprot.writeListEnd() oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 7) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -29731,6 +29975,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.max_parts) value = (value * 31) ^ hash(self.user_name) value = (value * 31) ^ hash(self.group_names) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -29775,11 +30020,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1350, _size1347) = iprot.readListBegin() - for _i1351 in xrange(_size1347): - _elem1352 = Partition() - _elem1352.read(iprot) - self.success.append(_elem1352) + (_etype1357, _size1354) = iprot.readListBegin() + for _i1358 in xrange(_size1354): + _elem1359 = Partition() + _elem1359.read(iprot) + self.success.append(_elem1359) iprot.readListEnd() else: iprot.skip(ftype) @@ -29808,8 +30053,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1353 in self.success: - iter1353.write(oprot) + for iter1360 in self.success: + iter1360.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29852,6 +30097,7 @@ class get_partition_names_ps_args: - tbl_name - part_vals - max_parts + - validTxnList """ thrift_spec = ( @@ -29860,13 +30106,15 @@ class get_partition_names_ps_args: (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.LIST, 'part_vals', (TType.STRING,None), None, ), # 3 (4, TType.I16, 'max_parts', None, -1, ), # 4 + (5, TType.STRING, 'validTxnList', None, None, ), # 5 ) - def __init__(self, db_name=None, tbl_name=None, part_vals=None, max_parts=thrift_spec[4][4],): + def __init__(self, db_name=None, tbl_name=None, part_vals=None, max_parts=thrift_spec[4][4], validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_vals = part_vals self.max_parts = max_parts + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -29890,10 +30138,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1357, _size1354) = iprot.readListBegin() - for _i1358 in xrange(_size1354): - _elem1359 = iprot.readString() - self.part_vals.append(_elem1359) + (_etype1364, _size1361) = iprot.readListBegin() + for _i1365 in xrange(_size1361): + _elem1366 = iprot.readString() + self.part_vals.append(_elem1366) iprot.readListEnd() else: iprot.skip(ftype) @@ -29902,6 +30150,11 @@ def read(self, iprot): self.max_parts = iprot.readI16() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -29923,14 +30176,18 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1360 in self.part_vals: - oprot.writeString(iter1360) + for iter1367 in self.part_vals: + oprot.writeString(iter1367) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: oprot.writeFieldBegin('max_parts', TType.I16, 4) oprot.writeI16(self.max_parts) oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 5) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -29944,6 +30201,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.part_vals) value = (value * 31) ^ hash(self.max_parts) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -29988,10 +30246,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1364, _size1361) = iprot.readListBegin() - for _i1365 in xrange(_size1361): - _elem1366 = iprot.readString() - self.success.append(_elem1366) + (_etype1371, _size1368) = iprot.readListBegin() + for _i1372 in xrange(_size1368): + _elem1373 = iprot.readString() + self.success.append(_elem1373) iprot.readListEnd() else: iprot.skip(ftype) @@ -30020,8 +30278,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1367 in self.success: - oprot.writeString(iter1367) + for iter1374 in self.success: + oprot.writeString(iter1374) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30064,6 +30322,7 @@ class get_partitions_by_filter_args: - tbl_name - filter - max_parts + - validTxnList """ thrift_spec = ( @@ -30072,13 +30331,15 @@ class get_partitions_by_filter_args: (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRING, 'filter', None, None, ), # 3 (4, TType.I16, 'max_parts', None, -1, ), # 4 + (5, TType.STRING, 'validTxnList', None, None, ), # 5 ) - def __init__(self, db_name=None, tbl_name=None, filter=None, max_parts=thrift_spec[4][4],): + def __init__(self, db_name=None, tbl_name=None, filter=None, max_parts=thrift_spec[4][4], validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.filter = filter self.max_parts = max_parts + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -30109,6 +30370,11 @@ def read(self, iprot): self.max_parts = iprot.readI16() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -30135,6 +30401,10 @@ def write(self, oprot): oprot.writeFieldBegin('max_parts', TType.I16, 4) oprot.writeI16(self.max_parts) oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 5) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -30148,6 +30418,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.filter) value = (value * 31) ^ hash(self.max_parts) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -30192,11 +30463,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1371, _size1368) = iprot.readListBegin() - for _i1372 in xrange(_size1368): - _elem1373 = Partition() - _elem1373.read(iprot) - self.success.append(_elem1373) + (_etype1378, _size1375) = iprot.readListBegin() + for _i1379 in xrange(_size1375): + _elem1380 = Partition() + _elem1380.read(iprot) + self.success.append(_elem1380) iprot.readListEnd() else: iprot.skip(ftype) @@ -30225,8 +30496,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1374 in self.success: - iter1374.write(oprot) + for iter1381 in self.success: + iter1381.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30269,6 +30540,7 @@ class get_part_specs_by_filter_args: - tbl_name - filter - max_parts + - validTxnList """ thrift_spec = ( @@ -30277,13 +30549,15 @@ class get_part_specs_by_filter_args: (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRING, 'filter', None, None, ), # 3 (4, TType.I32, 'max_parts', None, -1, ), # 4 + (5, TType.STRING, 'validTxnList', None, None, ), # 5 ) - def __init__(self, db_name=None, tbl_name=None, filter=None, max_parts=thrift_spec[4][4],): + def __init__(self, db_name=None, tbl_name=None, filter=None, max_parts=thrift_spec[4][4], validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.filter = filter self.max_parts = max_parts + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -30314,6 +30588,11 @@ def read(self, iprot): self.max_parts = iprot.readI32() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -30340,6 +30619,10 @@ def write(self, oprot): oprot.writeFieldBegin('max_parts', TType.I32, 4) oprot.writeI32(self.max_parts) oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 5) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -30353,6 +30636,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.filter) value = (value * 31) ^ hash(self.max_parts) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -30397,11 +30681,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1378, _size1375) = iprot.readListBegin() - for _i1379 in xrange(_size1375): - _elem1380 = PartitionSpec() - _elem1380.read(iprot) - self.success.append(_elem1380) + (_etype1385, _size1382) = iprot.readListBegin() + for _i1386 in xrange(_size1382): + _elem1387 = PartitionSpec() + _elem1387.read(iprot) + self.success.append(_elem1387) iprot.readListEnd() else: iprot.skip(ftype) @@ -30430,8 +30714,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1381 in self.success: - iter1381.write(oprot) + for iter1388 in self.success: + iter1388.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30632,6 +30916,7 @@ class get_num_partitions_by_filter_args: - db_name - tbl_name - filter + - validTxnList """ thrift_spec = ( @@ -30639,12 +30924,14 @@ class get_num_partitions_by_filter_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRING, 'filter', None, None, ), # 3 + (4, TType.STRING, 'validTxnList', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, filter=None,): + def __init__(self, db_name=None, tbl_name=None, filter=None, validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.filter = filter + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -30670,6 +30957,11 @@ def read(self, iprot): self.filter = iprot.readString() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -30692,6 +30984,10 @@ def write(self, oprot): oprot.writeFieldBegin('filter', TType.STRING, 3) oprot.writeString(self.filter) oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 4) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -30704,6 +31000,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.filter) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -30815,6 +31112,7 @@ class get_partitions_by_names_args: - db_name - tbl_name - names + - validTxnList """ thrift_spec = ( @@ -30822,12 +31120,14 @@ class get_partitions_by_names_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.LIST, 'names', (TType.STRING,None), None, ), # 3 + (4, TType.STRING, 'validTxnList', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, names=None,): + def __init__(self, db_name=None, tbl_name=None, names=None, validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.names = names + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -30851,13 +31151,18 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype1385, _size1382) = iprot.readListBegin() - for _i1386 in xrange(_size1382): - _elem1387 = iprot.readString() - self.names.append(_elem1387) + (_etype1392, _size1389) = iprot.readListBegin() + for _i1393 in xrange(_size1389): + _elem1394 = iprot.readString() + self.names.append(_elem1394) iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -30879,10 +31184,14 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter1388 in self.names: - oprot.writeString(iter1388) + for iter1395 in self.names: + oprot.writeString(iter1395) oprot.writeListEnd() oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 4) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -30895,6 +31204,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.names) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -30939,11 +31249,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1392, _size1389) = iprot.readListBegin() - for _i1393 in xrange(_size1389): - _elem1394 = Partition() - _elem1394.read(iprot) - self.success.append(_elem1394) + (_etype1399, _size1396) = iprot.readListBegin() + for _i1400 in xrange(_size1396): + _elem1401 = Partition() + _elem1401.read(iprot) + self.success.append(_elem1401) iprot.readListEnd() else: iprot.skip(ftype) @@ -30972,8 +31282,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1395 in self.success: - iter1395.write(oprot) + for iter1402 in self.success: + iter1402.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -31382,11 +31692,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1399, _size1396) = iprot.readListBegin() - for _i1400 in xrange(_size1396): - _elem1401 = Partition() - _elem1401.read(iprot) - self.new_parts.append(_elem1401) + (_etype1406, _size1403) = iprot.readListBegin() + for _i1407 in xrange(_size1403): + _elem1408 = Partition() + _elem1408.read(iprot) + self.new_parts.append(_elem1408) iprot.readListEnd() else: iprot.skip(ftype) @@ -31411,8 +31721,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1402 in self.new_parts: - iter1402.write(oprot) + for iter1409 in self.new_parts: + iter1409.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -31565,11 +31875,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1406, _size1403) = iprot.readListBegin() - for _i1407 in xrange(_size1403): - _elem1408 = Partition() - _elem1408.read(iprot) - self.new_parts.append(_elem1408) + (_etype1413, _size1410) = iprot.readListBegin() + for _i1414 in xrange(_size1410): + _elem1415 = Partition() + _elem1415.read(iprot) + self.new_parts.append(_elem1415) iprot.readListEnd() else: iprot.skip(ftype) @@ -31600,8 +31910,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1409 in self.new_parts: - iter1409.write(oprot) + for iter1416 in self.new_parts: + iter1416.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -32104,10 +32414,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1413, _size1410) = iprot.readListBegin() - for _i1414 in xrange(_size1410): - _elem1415 = iprot.readString() - self.part_vals.append(_elem1415) + (_etype1420, _size1417) = iprot.readListBegin() + for _i1421 in xrange(_size1417): + _elem1422 = iprot.readString() + self.part_vals.append(_elem1422) iprot.readListEnd() else: iprot.skip(ftype) @@ -32138,8 +32448,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1416 in self.part_vals: - oprot.writeString(iter1416) + for iter1423 in self.part_vals: + oprot.writeString(iter1423) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -32440,10 +32750,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype1420, _size1417) = iprot.readListBegin() - for _i1421 in xrange(_size1417): - _elem1422 = iprot.readString() - self.part_vals.append(_elem1422) + (_etype1427, _size1424) = iprot.readListBegin() + for _i1428 in xrange(_size1424): + _elem1429 = iprot.readString() + self.part_vals.append(_elem1429) iprot.readListEnd() else: iprot.skip(ftype) @@ -32465,8 +32775,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1423 in self.part_vals: - oprot.writeString(iter1423) + for iter1430 in self.part_vals: + oprot.writeString(iter1430) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -32824,10 +33134,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1427, _size1424) = iprot.readListBegin() - for _i1428 in xrange(_size1424): - _elem1429 = iprot.readString() - self.success.append(_elem1429) + (_etype1434, _size1431) = iprot.readListBegin() + for _i1435 in xrange(_size1431): + _elem1436 = iprot.readString() + self.success.append(_elem1436) iprot.readListEnd() else: iprot.skip(ftype) @@ -32850,8 +33160,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1430 in self.success: - oprot.writeString(iter1430) + for iter1437 in self.success: + oprot.writeString(iter1437) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -32975,11 +33285,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1432, _vtype1433, _size1431 ) = iprot.readMapBegin() - for _i1435 in xrange(_size1431): - _key1436 = iprot.readString() - _val1437 = iprot.readString() - self.success[_key1436] = _val1437 + (_ktype1439, _vtype1440, _size1438 ) = iprot.readMapBegin() + for _i1442 in xrange(_size1438): + _key1443 = iprot.readString() + _val1444 = iprot.readString() + self.success[_key1443] = _val1444 iprot.readMapEnd() else: iprot.skip(ftype) @@ -33002,9 +33312,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter1438,viter1439 in self.success.items(): - oprot.writeString(kiter1438) - oprot.writeString(viter1439) + for kiter1445,viter1446 in self.success.items(): + oprot.writeString(kiter1445) + oprot.writeString(viter1446) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -33080,11 +33390,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1441, _vtype1442, _size1440 ) = iprot.readMapBegin() - for _i1444 in xrange(_size1440): - _key1445 = iprot.readString() - _val1446 = iprot.readString() - self.part_vals[_key1445] = _val1446 + (_ktype1448, _vtype1449, _size1447 ) = iprot.readMapBegin() + for _i1451 in xrange(_size1447): + _key1452 = iprot.readString() + _val1453 = iprot.readString() + self.part_vals[_key1452] = _val1453 iprot.readMapEnd() else: iprot.skip(ftype) @@ -33114,9 +33424,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1447,viter1448 in self.part_vals.items(): - oprot.writeString(kiter1447) - oprot.writeString(viter1448) + for kiter1454,viter1455 in self.part_vals.items(): + oprot.writeString(kiter1454) + oprot.writeString(viter1455) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -33330,11 +33640,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1450, _vtype1451, _size1449 ) = iprot.readMapBegin() - for _i1453 in xrange(_size1449): - _key1454 = iprot.readString() - _val1455 = iprot.readString() - self.part_vals[_key1454] = _val1455 + (_ktype1457, _vtype1458, _size1456 ) = iprot.readMapBegin() + for _i1460 in xrange(_size1456): + _key1461 = iprot.readString() + _val1462 = iprot.readString() + self.part_vals[_key1461] = _val1462 iprot.readMapEnd() else: iprot.skip(ftype) @@ -33364,9 +33674,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1456,viter1457 in self.part_vals.items(): - oprot.writeString(kiter1456) - oprot.writeString(viter1457) + for kiter1463,viter1464 in self.part_vals.items(): + oprot.writeString(kiter1463) + oprot.writeString(viter1464) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -35253,6 +35563,7 @@ class get_table_column_statistics_args: - db_name - tbl_name - col_name + - validWriteIdList """ thrift_spec = ( @@ -35260,12 +35571,14 @@ class get_table_column_statistics_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRING, 'col_name', None, None, ), # 3 + (4, TType.STRING, 'validWriteIdList', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, col_name=None,): + def __init__(self, db_name=None, tbl_name=None, col_name=None, validWriteIdList=None,): self.db_name = db_name self.tbl_name = tbl_name self.col_name = col_name + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35291,6 +35604,11 @@ def read(self, iprot): self.col_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -35313,6 +35631,10 @@ def write(self, oprot): oprot.writeFieldBegin('col_name', TType.STRING, 3) oprot.writeString(self.col_name) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35325,6 +35647,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.col_name) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -35466,6 +35789,7 @@ class get_partition_column_statistics_args: - tbl_name - part_name - col_name + - validWriteIdList """ thrift_spec = ( @@ -35474,13 +35798,15 @@ class get_partition_column_statistics_args: (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRING, 'part_name', None, None, ), # 3 (4, TType.STRING, 'col_name', None, None, ), # 4 + (5, TType.STRING, 'validWriteIdList', None, None, ), # 5 ) - def __init__(self, db_name=None, tbl_name=None, part_name=None, col_name=None,): + def __init__(self, db_name=None, tbl_name=None, part_name=None, col_name=None, validWriteIdList=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_name = part_name self.col_name = col_name + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35511,6 +35837,11 @@ def read(self, iprot): self.col_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -35537,6 +35868,10 @@ def write(self, oprot): oprot.writeFieldBegin('col_name', TType.STRING, 4) oprot.writeString(self.col_name) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 5) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35550,6 +35885,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.part_name) value = (value * 31) ^ hash(self.col_name) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -37392,10 +37728,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1461, _size1458) = iprot.readListBegin() - for _i1462 in xrange(_size1458): - _elem1463 = iprot.readString() - self.success.append(_elem1463) + (_etype1468, _size1465) = iprot.readListBegin() + for _i1469 in xrange(_size1465): + _elem1470 = iprot.readString() + self.success.append(_elem1470) iprot.readListEnd() else: iprot.skip(ftype) @@ -37418,8 +37754,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1464 in self.success: - oprot.writeString(iter1464) + for iter1471 in self.success: + oprot.writeString(iter1471) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -38107,10 +38443,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1468, _size1465) = iprot.readListBegin() - for _i1469 in xrange(_size1465): - _elem1470 = iprot.readString() - self.success.append(_elem1470) + (_etype1475, _size1472) = iprot.readListBegin() + for _i1476 in xrange(_size1472): + _elem1477 = iprot.readString() + self.success.append(_elem1477) iprot.readListEnd() else: iprot.skip(ftype) @@ -38133,8 +38469,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1471 in self.success: - oprot.writeString(iter1471) + for iter1478 in self.success: + oprot.writeString(iter1478) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -38648,11 +38984,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1475, _size1472) = iprot.readListBegin() - for _i1476 in xrange(_size1472): - _elem1477 = Role() - _elem1477.read(iprot) - self.success.append(_elem1477) + (_etype1482, _size1479) = iprot.readListBegin() + for _i1483 in xrange(_size1479): + _elem1484 = Role() + _elem1484.read(iprot) + self.success.append(_elem1484) iprot.readListEnd() else: iprot.skip(ftype) @@ -38675,8 +39011,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1478 in self.success: - iter1478.write(oprot) + for iter1485 in self.success: + iter1485.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -39185,10 +39521,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype1482, _size1479) = iprot.readListBegin() - for _i1483 in xrange(_size1479): - _elem1484 = iprot.readString() - self.group_names.append(_elem1484) + (_etype1489, _size1486) = iprot.readListBegin() + for _i1490 in xrange(_size1486): + _elem1491 = iprot.readString() + self.group_names.append(_elem1491) iprot.readListEnd() else: iprot.skip(ftype) @@ -39213,8 +39549,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1485 in self.group_names: - oprot.writeString(iter1485) + for iter1492 in self.group_names: + oprot.writeString(iter1492) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -39441,11 +39777,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1489, _size1486) = iprot.readListBegin() - for _i1490 in xrange(_size1486): - _elem1491 = HiveObjectPrivilege() - _elem1491.read(iprot) - self.success.append(_elem1491) + (_etype1496, _size1493) = iprot.readListBegin() + for _i1497 in xrange(_size1493): + _elem1498 = HiveObjectPrivilege() + _elem1498.read(iprot) + self.success.append(_elem1498) iprot.readListEnd() else: iprot.skip(ftype) @@ -39468,8 +39804,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1492 in self.success: - iter1492.write(oprot) + for iter1499 in self.success: + iter1499.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -40139,10 +40475,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype1496, _size1493) = iprot.readListBegin() - for _i1497 in xrange(_size1493): - _elem1498 = iprot.readString() - self.group_names.append(_elem1498) + (_etype1503, _size1500) = iprot.readListBegin() + for _i1504 in xrange(_size1500): + _elem1505 = iprot.readString() + self.group_names.append(_elem1505) iprot.readListEnd() else: iprot.skip(ftype) @@ -40163,8 +40499,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1499 in self.group_names: - oprot.writeString(iter1499) + for iter1506 in self.group_names: + oprot.writeString(iter1506) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -40219,10 +40555,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1503, _size1500) = iprot.readListBegin() - for _i1504 in xrange(_size1500): - _elem1505 = iprot.readString() - self.success.append(_elem1505) + (_etype1510, _size1507) = iprot.readListBegin() + for _i1511 in xrange(_size1507): + _elem1512 = iprot.readString() + self.success.append(_elem1512) iprot.readListEnd() else: iprot.skip(ftype) @@ -40245,8 +40581,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1506 in self.success: - oprot.writeString(iter1506) + for iter1513 in self.success: + oprot.writeString(iter1513) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -41178,10 +41514,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1510, _size1507) = iprot.readListBegin() - for _i1511 in xrange(_size1507): - _elem1512 = iprot.readString() - self.success.append(_elem1512) + (_etype1517, _size1514) = iprot.readListBegin() + for _i1518 in xrange(_size1514): + _elem1519 = iprot.readString() + self.success.append(_elem1519) iprot.readListEnd() else: iprot.skip(ftype) @@ -41198,8 +41534,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1513 in self.success: - oprot.writeString(iter1513) + for iter1520 in self.success: + oprot.writeString(iter1520) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -41726,10 +42062,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1517, _size1514) = iprot.readListBegin() - for _i1518 in xrange(_size1514): - _elem1519 = iprot.readString() - self.success.append(_elem1519) + (_etype1524, _size1521) = iprot.readListBegin() + for _i1525 in xrange(_size1521): + _elem1526 = iprot.readString() + self.success.append(_elem1526) iprot.readListEnd() else: iprot.skip(ftype) @@ -41746,8 +42082,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1520 in self.success: - oprot.writeString(iter1520) + for iter1527 in self.success: + oprot.writeString(iter1527) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -44760,10 +45096,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1524, _size1521) = iprot.readListBegin() - for _i1525 in xrange(_size1521): - _elem1526 = iprot.readString() - self.success.append(_elem1526) + (_etype1531, _size1528) = iprot.readListBegin() + for _i1532 in xrange(_size1528): + _elem1533 = iprot.readString() + self.success.append(_elem1533) iprot.readListEnd() else: iprot.skip(ftype) @@ -44780,8 +45116,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1527 in self.success: - oprot.writeString(iter1527) + for iter1534 in self.success: + oprot.writeString(iter1534) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -51091,11 +51427,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1531, _size1528) = iprot.readListBegin() - for _i1532 in xrange(_size1528): - _elem1533 = SchemaVersion() - _elem1533.read(iprot) - self.success.append(_elem1533) + (_etype1538, _size1535) = iprot.readListBegin() + for _i1539 in xrange(_size1535): + _elem1540 = SchemaVersion() + _elem1540.read(iprot) + self.success.append(_elem1540) iprot.readListEnd() else: iprot.skip(ftype) @@ -51124,8 +51460,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1534 in self.success: - iter1534.write(oprot) + for iter1541 in self.success: + iter1541.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -52600,11 +52936,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1538, _size1535) = iprot.readListBegin() - for _i1539 in xrange(_size1535): - _elem1540 = RuntimeStat() - _elem1540.read(iprot) - self.success.append(_elem1540) + (_etype1545, _size1542) = iprot.readListBegin() + for _i1546 in xrange(_size1542): + _elem1547 = RuntimeStat() + _elem1547.read(iprot) + self.success.append(_elem1547) iprot.readListEnd() else: iprot.skip(ftype) @@ -52627,8 +52963,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1541 in self.success: - iter1541.write(oprot) + for iter1548 in self.success: + iter1548.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py index ffee182198..499094ea48 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -9696,6 +9696,7 @@ class PartitionsByExprRequest: - defaultPartitionName - maxParts - catName + - validWriteIdList """ thrift_spec = ( @@ -9706,15 +9707,17 @@ class PartitionsByExprRequest: (4, TType.STRING, 'defaultPartitionName', None, None, ), # 4 (5, TType.I16, 'maxParts', None, -1, ), # 5 (6, TType.STRING, 'catName', None, None, ), # 6 + (7, TType.STRING, 'validWriteIdList', None, None, ), # 7 ) - def __init__(self, dbName=None, tblName=None, expr=None, defaultPartitionName=None, maxParts=thrift_spec[5][4], catName=None,): + def __init__(self, dbName=None, tblName=None, expr=None, defaultPartitionName=None, maxParts=thrift_spec[5][4], catName=None, validWriteIdList=None,): self.dbName = dbName self.tblName = tblName self.expr = expr self.defaultPartitionName = defaultPartitionName self.maxParts = maxParts self.catName = catName + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9755,6 +9758,11 @@ def read(self, iprot): self.catName = iprot.readString() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9789,6 +9797,10 @@ def write(self, oprot): oprot.writeFieldBegin('catName', TType.STRING, 6) oprot.writeString(self.catName) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 7) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9810,6 +9822,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.defaultPartitionName) value = (value * 31) ^ hash(self.maxParts) value = (value * 31) ^ hash(self.catName) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -10981,6 +10994,7 @@ class PartitionValuesRequest: - ascending - maxParts - catName + - validWriteIdList """ thrift_spec = ( @@ -10994,9 +11008,10 @@ class PartitionValuesRequest: (7, TType.BOOL, 'ascending', None, True, ), # 7 (8, TType.I64, 'maxParts', None, -1, ), # 8 (9, TType.STRING, 'catName', None, None, ), # 9 + (10, TType.STRING, 'validWriteIdList', None, None, ), # 10 ) - def __init__(self, dbName=None, tblName=None, partitionKeys=None, applyDistinct=thrift_spec[4][4], filter=None, partitionOrder=None, ascending=thrift_spec[7][4], maxParts=thrift_spec[8][4], catName=None,): + def __init__(self, dbName=None, tblName=None, partitionKeys=None, applyDistinct=thrift_spec[4][4], filter=None, partitionOrder=None, ascending=thrift_spec[7][4], maxParts=thrift_spec[8][4], catName=None, validWriteIdList=None,): self.dbName = dbName self.tblName = tblName self.partitionKeys = partitionKeys @@ -11006,6 +11021,7 @@ def __init__(self, dbName=None, tblName=None, partitionKeys=None, applyDistinct= self.ascending = ascending self.maxParts = maxParts self.catName = catName + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -11073,6 +11089,11 @@ def read(self, iprot): self.catName = iprot.readString() else: iprot.skip(ftype) + elif fid == 10: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -11125,6 +11146,10 @@ def write(self, oprot): oprot.writeFieldBegin('catName', TType.STRING, 9) oprot.writeString(self.catName) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 10) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -11149,6 +11174,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.ascending) value = (value * 31) ^ hash(self.maxParts) value = (value * 31) ^ hash(self.catName) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -11322,6 +11348,7 @@ class GetPartitionsByNamesRequest: - get_col_stats - processorCapabilities - processorIdentifier + - validWriteIdList """ thrift_spec = ( @@ -11332,15 +11359,17 @@ class GetPartitionsByNamesRequest: (4, TType.BOOL, 'get_col_stats', None, None, ), # 4 (5, TType.LIST, 'processorCapabilities', (TType.STRING,None), None, ), # 5 (6, TType.STRING, 'processorIdentifier', None, None, ), # 6 + (7, TType.STRING, 'validWriteIdList', None, None, ), # 7 ) - def __init__(self, db_name=None, tbl_name=None, names=None, get_col_stats=None, processorCapabilities=None, processorIdentifier=None,): + def __init__(self, db_name=None, tbl_name=None, names=None, get_col_stats=None, processorCapabilities=None, processorIdentifier=None, validWriteIdList=None,): self.db_name = db_name self.tbl_name = tbl_name self.names = names self.get_col_stats = get_col_stats self.processorCapabilities = processorCapabilities self.processorIdentifier = processorIdentifier + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -11391,6 +11420,11 @@ def read(self, iprot): self.processorIdentifier = iprot.readString() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -11431,6 +11465,10 @@ def write(self, oprot): oprot.writeFieldBegin('processorIdentifier', TType.STRING, 6) oprot.writeString(self.processorIdentifier) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 7) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -11450,6 +11488,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.get_col_stats) value = (value * 31) ^ hash(self.processorCapabilities) value = (value * 31) ^ hash(self.processorIdentifier) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -13457,6 +13496,164 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class TableWriteId: + """ + Attributes: + - fullTableName + - writeId + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'fullTableName', None, None, ), # 1 + (2, TType.I64, 'writeId', None, None, ), # 2 + ) + + def __init__(self, fullTableName=None, writeId=None,): + self.fullTableName = fullTableName + self.writeId = writeId + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.fullTableName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('TableWriteId') + if self.fullTableName is not None: + oprot.writeFieldBegin('fullTableName', TType.STRING, 1) + oprot.writeString(self.fullTableName) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 2) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.fullTableName is None: + raise TProtocol.TProtocolException(message='Required field fullTableName is unset!') + if self.writeId is None: + raise TProtocol.TProtocolException(message='Required field writeId is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.fullTableName) + value = (value * 31) ^ hash(self.writeId) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetTxnTableWriteIdsResponse: + """ + Attributes: + - tableWriteIds + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'tableWriteIds', (TType.STRUCT,(TableWriteId, TableWriteId.thrift_spec)), None, ), # 1 + ) + + def __init__(self, tableWriteIds=None,): + self.tableWriteIds = tableWriteIds + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.tableWriteIds = [] + (_etype610, _size607) = iprot.readListBegin() + for _i611 in xrange(_size607): + _elem612 = TableWriteId() + _elem612.read(iprot) + self.tableWriteIds.append(_elem612) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetTxnTableWriteIdsResponse') + if self.tableWriteIds is not None: + oprot.writeFieldBegin('tableWriteIds', TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.tableWriteIds)) + for iter613 in self.tableWriteIds: + iter613.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.tableWriteIds is None: + raise TProtocol.TProtocolException(message='Required field tableWriteIds is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.tableWriteIds) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class GetValidWriteIdsResponse: """ Attributes: @@ -13483,11 +13680,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tblValidWriteIds = [] - (_etype610, _size607) = iprot.readListBegin() - for _i611 in xrange(_size607): - _elem612 = TableValidWriteIds() - _elem612.read(iprot) - self.tblValidWriteIds.append(_elem612) + (_etype617, _size614) = iprot.readListBegin() + for _i618 in xrange(_size614): + _elem619 = TableValidWriteIds() + _elem619.read(iprot) + self.tblValidWriteIds.append(_elem619) iprot.readListEnd() else: iprot.skip(ftype) @@ -13504,8 +13701,8 @@ def write(self, oprot): if self.tblValidWriteIds is not None: oprot.writeFieldBegin('tblValidWriteIds', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tblValidWriteIds)) - for iter613 in self.tblValidWriteIds: - iter613.write(oprot) + for iter620 in self.tblValidWriteIds: + iter620.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13663,10 +13860,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.txnIds = [] - (_etype617, _size614) = iprot.readListBegin() - for _i618 in xrange(_size614): - _elem619 = iprot.readI64() - self.txnIds.append(_elem619) + (_etype624, _size621) = iprot.readListBegin() + for _i625 in xrange(_size621): + _elem626 = iprot.readI64() + self.txnIds.append(_elem626) iprot.readListEnd() else: iprot.skip(ftype) @@ -13678,11 +13875,11 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.srcTxnToWriteIdList = [] - (_etype623, _size620) = iprot.readListBegin() - for _i624 in xrange(_size620): - _elem625 = TxnToWriteId() - _elem625.read(iprot) - self.srcTxnToWriteIdList.append(_elem625) + (_etype630, _size627) = iprot.readListBegin() + for _i631 in xrange(_size627): + _elem632 = TxnToWriteId() + _elem632.read(iprot) + self.srcTxnToWriteIdList.append(_elem632) iprot.readListEnd() else: iprot.skip(ftype) @@ -13707,8 +13904,8 @@ def write(self, oprot): if self.txnIds is not None: oprot.writeFieldBegin('txnIds', TType.LIST, 3) oprot.writeListBegin(TType.I64, len(self.txnIds)) - for iter626 in self.txnIds: - oprot.writeI64(iter626) + for iter633 in self.txnIds: + oprot.writeI64(iter633) oprot.writeListEnd() oprot.writeFieldEnd() if self.replPolicy is not None: @@ -13718,8 +13915,8 @@ def write(self, oprot): if self.srcTxnToWriteIdList is not None: oprot.writeFieldBegin('srcTxnToWriteIdList', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.srcTxnToWriteIdList)) - for iter627 in self.srcTxnToWriteIdList: - iter627.write(oprot) + for iter634 in self.srcTxnToWriteIdList: + iter634.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13779,11 +13976,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.txnToWriteIds = [] - (_etype631, _size628) = iprot.readListBegin() - for _i632 in xrange(_size628): - _elem633 = TxnToWriteId() - _elem633.read(iprot) - self.txnToWriteIds.append(_elem633) + (_etype638, _size635) = iprot.readListBegin() + for _i639 in xrange(_size635): + _elem640 = TxnToWriteId() + _elem640.read(iprot) + self.txnToWriteIds.append(_elem640) iprot.readListEnd() else: iprot.skip(ftype) @@ -13800,8 +13997,8 @@ def write(self, oprot): if self.txnToWriteIds is not None: oprot.writeFieldBegin('txnToWriteIds', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.txnToWriteIds)) - for iter634 in self.txnToWriteIds: - iter634.write(oprot) + for iter641 in self.txnToWriteIds: + iter641.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14029,11 +14226,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.component = [] - (_etype638, _size635) = iprot.readListBegin() - for _i639 in xrange(_size635): - _elem640 = LockComponent() - _elem640.read(iprot) - self.component.append(_elem640) + (_etype645, _size642) = iprot.readListBegin() + for _i646 in xrange(_size642): + _elem647 = LockComponent() + _elem647.read(iprot) + self.component.append(_elem647) iprot.readListEnd() else: iprot.skip(ftype) @@ -14070,8 +14267,8 @@ def write(self, oprot): if self.component is not None: oprot.writeFieldBegin('component', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.component)) - for iter641 in self.component: - iter641.write(oprot) + for iter648 in self.component: + iter648.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.txnid is not None: @@ -14769,11 +14966,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.locks = [] - (_etype645, _size642) = iprot.readListBegin() - for _i646 in xrange(_size642): - _elem647 = ShowLocksResponseElement() - _elem647.read(iprot) - self.locks.append(_elem647) + (_etype652, _size649) = iprot.readListBegin() + for _i653 in xrange(_size649): + _elem654 = ShowLocksResponseElement() + _elem654.read(iprot) + self.locks.append(_elem654) iprot.readListEnd() else: iprot.skip(ftype) @@ -14790,8 +14987,8 @@ def write(self, oprot): if self.locks is not None: oprot.writeFieldBegin('locks', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.locks)) - for iter648 in self.locks: - iter648.write(oprot) + for iter655 in self.locks: + iter655.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15006,20 +15203,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.SET: self.aborted = set() - (_etype652, _size649) = iprot.readSetBegin() - for _i653 in xrange(_size649): - _elem654 = iprot.readI64() - self.aborted.add(_elem654) + (_etype659, _size656) = iprot.readSetBegin() + for _i660 in xrange(_size656): + _elem661 = iprot.readI64() + self.aborted.add(_elem661) iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.SET: self.nosuch = set() - (_etype658, _size655) = iprot.readSetBegin() - for _i659 in xrange(_size655): - _elem660 = iprot.readI64() - self.nosuch.add(_elem660) + (_etype665, _size662) = iprot.readSetBegin() + for _i666 in xrange(_size662): + _elem667 = iprot.readI64() + self.nosuch.add(_elem667) iprot.readSetEnd() else: iprot.skip(ftype) @@ -15036,15 +15233,15 @@ def write(self, oprot): if self.aborted is not None: oprot.writeFieldBegin('aborted', TType.SET, 1) oprot.writeSetBegin(TType.I64, len(self.aborted)) - for iter661 in self.aborted: - oprot.writeI64(iter661) + for iter668 in self.aborted: + oprot.writeI64(iter668) oprot.writeSetEnd() oprot.writeFieldEnd() if self.nosuch is not None: oprot.writeFieldBegin('nosuch', TType.SET, 2) oprot.writeSetBegin(TType.I64, len(self.nosuch)) - for iter662 in self.nosuch: - oprot.writeI64(iter662) + for iter669 in self.nosuch: + oprot.writeI64(iter669) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15141,11 +15338,11 @@ def read(self, iprot): elif fid == 6: if ftype == TType.MAP: self.properties = {} - (_ktype664, _vtype665, _size663 ) = iprot.readMapBegin() - for _i667 in xrange(_size663): - _key668 = iprot.readString() - _val669 = iprot.readString() - self.properties[_key668] = _val669 + (_ktype671, _vtype672, _size670 ) = iprot.readMapBegin() + for _i674 in xrange(_size670): + _key675 = iprot.readString() + _val676 = iprot.readString() + self.properties[_key675] = _val676 iprot.readMapEnd() else: iprot.skip(ftype) @@ -15182,9 +15379,9 @@ def write(self, oprot): if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 6) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter670,viter671 in self.properties.items(): - oprot.writeString(kiter670) - oprot.writeString(viter671) + for kiter677,viter678 in self.properties.items(): + oprot.writeString(kiter677) + oprot.writeString(viter678) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15901,11 +16098,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.compacts = [] - (_etype675, _size672) = iprot.readListBegin() - for _i676 in xrange(_size672): - _elem677 = ShowCompactResponseElement() - _elem677.read(iprot) - self.compacts.append(_elem677) + (_etype682, _size679) = iprot.readListBegin() + for _i683 in xrange(_size679): + _elem684 = ShowCompactResponseElement() + _elem684.read(iprot) + self.compacts.append(_elem684) iprot.readListEnd() else: iprot.skip(ftype) @@ -15922,8 +16119,8 @@ def write(self, oprot): if self.compacts is not None: oprot.writeFieldBegin('compacts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.compacts)) - for iter678 in self.compacts: - iter678.write(oprot) + for iter685 in self.compacts: + iter685.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16012,10 +16209,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.partitionnames = [] - (_etype682, _size679) = iprot.readListBegin() - for _i683 in xrange(_size679): - _elem684 = iprot.readString() - self.partitionnames.append(_elem684) + (_etype689, _size686) = iprot.readListBegin() + for _i690 in xrange(_size686): + _elem691 = iprot.readString() + self.partitionnames.append(_elem691) iprot.readListEnd() else: iprot.skip(ftype) @@ -16053,8 +16250,8 @@ def write(self, oprot): if self.partitionnames is not None: oprot.writeFieldBegin('partitionnames', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.partitionnames)) - for iter685 in self.partitionnames: - oprot.writeString(iter685) + for iter692 in self.partitionnames: + oprot.writeString(iter692) oprot.writeListEnd() oprot.writeFieldEnd() if self.operationType is not None: @@ -16273,10 +16470,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.eventTypeSkipList = [] - (_etype689, _size686) = iprot.readListBegin() - for _i690 in xrange(_size686): - _elem691 = iprot.readString() - self.eventTypeSkipList.append(_elem691) + (_etype696, _size693) = iprot.readListBegin() + for _i697 in xrange(_size693): + _elem698 = iprot.readString() + self.eventTypeSkipList.append(_elem698) iprot.readListEnd() else: iprot.skip(ftype) @@ -16301,8 +16498,8 @@ def write(self, oprot): if self.eventTypeSkipList is not None: oprot.writeFieldBegin('eventTypeSkipList', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.eventTypeSkipList)) - for iter692 in self.eventTypeSkipList: - oprot.writeString(iter692) + for iter699 in self.eventTypeSkipList: + oprot.writeString(iter699) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16522,11 +16719,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.events = [] - (_etype696, _size693) = iprot.readListBegin() - for _i697 in xrange(_size693): - _elem698 = NotificationEvent() - _elem698.read(iprot) - self.events.append(_elem698) + (_etype703, _size700) = iprot.readListBegin() + for _i704 in xrange(_size700): + _elem705 = NotificationEvent() + _elem705.read(iprot) + self.events.append(_elem705) iprot.readListEnd() else: iprot.skip(ftype) @@ -16543,8 +16740,8 @@ def write(self, oprot): if self.events is not None: oprot.writeFieldBegin('events', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.events)) - for iter699 in self.events: - iter699.write(oprot) + for iter706 in self.events: + iter706.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16867,30 +17064,30 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.filesAdded = [] - (_etype703, _size700) = iprot.readListBegin() - for _i704 in xrange(_size700): - _elem705 = iprot.readString() - self.filesAdded.append(_elem705) + (_etype710, _size707) = iprot.readListBegin() + for _i711 in xrange(_size707): + _elem712 = iprot.readString() + self.filesAdded.append(_elem712) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.filesAddedChecksum = [] - (_etype709, _size706) = iprot.readListBegin() - for _i710 in xrange(_size706): - _elem711 = iprot.readString() - self.filesAddedChecksum.append(_elem711) + (_etype716, _size713) = iprot.readListBegin() + for _i717 in xrange(_size713): + _elem718 = iprot.readString() + self.filesAddedChecksum.append(_elem718) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.subDirectoryList = [] - (_etype715, _size712) = iprot.readListBegin() - for _i716 in xrange(_size712): - _elem717 = iprot.readString() - self.subDirectoryList.append(_elem717) + (_etype722, _size719) = iprot.readListBegin() + for _i723 in xrange(_size719): + _elem724 = iprot.readString() + self.subDirectoryList.append(_elem724) iprot.readListEnd() else: iprot.skip(ftype) @@ -16911,22 +17108,22 @@ def write(self, oprot): if self.filesAdded is not None: oprot.writeFieldBegin('filesAdded', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.filesAdded)) - for iter718 in self.filesAdded: - oprot.writeString(iter718) + for iter725 in self.filesAdded: + oprot.writeString(iter725) oprot.writeListEnd() oprot.writeFieldEnd() if self.filesAddedChecksum is not None: oprot.writeFieldBegin('filesAddedChecksum', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.filesAddedChecksum)) - for iter719 in self.filesAddedChecksum: - oprot.writeString(iter719) + for iter726 in self.filesAddedChecksum: + oprot.writeString(iter726) oprot.writeListEnd() oprot.writeFieldEnd() if self.subDirectoryList is not None: oprot.writeFieldBegin('subDirectoryList', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.subDirectoryList)) - for iter720 in self.subDirectoryList: - oprot.writeString(iter720) + for iter727 in self.subDirectoryList: + oprot.writeString(iter727) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17085,10 +17282,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.partitionVals = [] - (_etype724, _size721) = iprot.readListBegin() - for _i725 in xrange(_size721): - _elem726 = iprot.readString() - self.partitionVals.append(_elem726) + (_etype731, _size728) = iprot.readListBegin() + for _i732 in xrange(_size728): + _elem733 = iprot.readString() + self.partitionVals.append(_elem733) iprot.readListEnd() else: iprot.skip(ftype) @@ -17126,8 +17323,8 @@ def write(self, oprot): if self.partitionVals is not None: oprot.writeFieldBegin('partitionVals', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.partitionVals)) - for iter727 in self.partitionVals: - oprot.writeString(iter727) + for iter734 in self.partitionVals: + oprot.writeString(iter734) oprot.writeListEnd() oprot.writeFieldEnd() if self.catName is not None: @@ -17279,10 +17476,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.partitionVals = [] - (_etype731, _size728) = iprot.readListBegin() - for _i732 in xrange(_size728): - _elem733 = iprot.readString() - self.partitionVals.append(_elem733) + (_etype738, _size735) = iprot.readListBegin() + for _i739 in xrange(_size735): + _elem740 = iprot.readString() + self.partitionVals.append(_elem740) iprot.readListEnd() else: iprot.skip(ftype) @@ -17319,8 +17516,8 @@ def write(self, oprot): if self.partitionVals is not None: oprot.writeFieldBegin('partitionVals', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.partitionVals)) - for iter734 in self.partitionVals: - oprot.writeString(iter734) + for iter741 in self.partitionVals: + oprot.writeString(iter741) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17514,12 +17711,12 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.metadata = {} - (_ktype736, _vtype737, _size735 ) = iprot.readMapBegin() - for _i739 in xrange(_size735): - _key740 = iprot.readI64() - _val741 = MetadataPpdResult() - _val741.read(iprot) - self.metadata[_key740] = _val741 + (_ktype743, _vtype744, _size742 ) = iprot.readMapBegin() + for _i746 in xrange(_size742): + _key747 = iprot.readI64() + _val748 = MetadataPpdResult() + _val748.read(iprot) + self.metadata[_key747] = _val748 iprot.readMapEnd() else: iprot.skip(ftype) @@ -17541,9 +17738,9 @@ def write(self, oprot): if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.MAP, 1) oprot.writeMapBegin(TType.I64, TType.STRUCT, len(self.metadata)) - for kiter742,viter743 in self.metadata.items(): - oprot.writeI64(kiter742) - viter743.write(oprot) + for kiter749,viter750 in self.metadata.items(): + oprot.writeI64(kiter749) + viter750.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.isSupported is not None: @@ -17613,10 +17810,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype747, _size744) = iprot.readListBegin() - for _i748 in xrange(_size744): - _elem749 = iprot.readI64() - self.fileIds.append(_elem749) + (_etype754, _size751) = iprot.readListBegin() + for _i755 in xrange(_size751): + _elem756 = iprot.readI64() + self.fileIds.append(_elem756) iprot.readListEnd() else: iprot.skip(ftype) @@ -17648,8 +17845,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter750 in self.fileIds: - oprot.writeI64(iter750) + for iter757 in self.fileIds: + oprot.writeI64(iter757) oprot.writeListEnd() oprot.writeFieldEnd() if self.expr is not None: @@ -17723,11 +17920,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.metadata = {} - (_ktype752, _vtype753, _size751 ) = iprot.readMapBegin() - for _i755 in xrange(_size751): - _key756 = iprot.readI64() - _val757 = iprot.readString() - self.metadata[_key756] = _val757 + (_ktype759, _vtype760, _size758 ) = iprot.readMapBegin() + for _i762 in xrange(_size758): + _key763 = iprot.readI64() + _val764 = iprot.readString() + self.metadata[_key763] = _val764 iprot.readMapEnd() else: iprot.skip(ftype) @@ -17749,9 +17946,9 @@ def write(self, oprot): if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.MAP, 1) oprot.writeMapBegin(TType.I64, TType.STRING, len(self.metadata)) - for kiter758,viter759 in self.metadata.items(): - oprot.writeI64(kiter758) - oprot.writeString(viter759) + for kiter765,viter766 in self.metadata.items(): + oprot.writeI64(kiter765) + oprot.writeString(viter766) oprot.writeMapEnd() oprot.writeFieldEnd() if self.isSupported is not None: @@ -17812,10 +18009,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype763, _size760) = iprot.readListBegin() - for _i764 in xrange(_size760): - _elem765 = iprot.readI64() - self.fileIds.append(_elem765) + (_etype770, _size767) = iprot.readListBegin() + for _i771 in xrange(_size767): + _elem772 = iprot.readI64() + self.fileIds.append(_elem772) iprot.readListEnd() else: iprot.skip(ftype) @@ -17832,8 +18029,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter766 in self.fileIds: - oprot.writeI64(iter766) + for iter773 in self.fileIds: + oprot.writeI64(iter773) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17939,20 +18136,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype770, _size767) = iprot.readListBegin() - for _i771 in xrange(_size767): - _elem772 = iprot.readI64() - self.fileIds.append(_elem772) + (_etype777, _size774) = iprot.readListBegin() + for _i778 in xrange(_size774): + _elem779 = iprot.readI64() + self.fileIds.append(_elem779) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.metadata = [] - (_etype776, _size773) = iprot.readListBegin() - for _i777 in xrange(_size773): - _elem778 = iprot.readString() - self.metadata.append(_elem778) + (_etype783, _size780) = iprot.readListBegin() + for _i784 in xrange(_size780): + _elem785 = iprot.readString() + self.metadata.append(_elem785) iprot.readListEnd() else: iprot.skip(ftype) @@ -17974,15 +18171,15 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter779 in self.fileIds: - oprot.writeI64(iter779) + for iter786 in self.fileIds: + oprot.writeI64(iter786) oprot.writeListEnd() oprot.writeFieldEnd() if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.metadata)) - for iter780 in self.metadata: - oprot.writeString(iter780) + for iter787 in self.metadata: + oprot.writeString(iter787) oprot.writeListEnd() oprot.writeFieldEnd() if self.type is not None: @@ -18090,10 +18287,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype784, _size781) = iprot.readListBegin() - for _i785 in xrange(_size781): - _elem786 = iprot.readI64() - self.fileIds.append(_elem786) + (_etype791, _size788) = iprot.readListBegin() + for _i792 in xrange(_size788): + _elem793 = iprot.readI64() + self.fileIds.append(_elem793) iprot.readListEnd() else: iprot.skip(ftype) @@ -18110,8 +18307,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter787 in self.fileIds: - oprot.writeI64(iter787) + for iter794 in self.fileIds: + oprot.writeI64(iter794) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18340,11 +18537,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.functions = [] - (_etype791, _size788) = iprot.readListBegin() - for _i792 in xrange(_size788): - _elem793 = Function() - _elem793.read(iprot) - self.functions.append(_elem793) + (_etype798, _size795) = iprot.readListBegin() + for _i799 in xrange(_size795): + _elem800 = Function() + _elem800.read(iprot) + self.functions.append(_elem800) iprot.readListEnd() else: iprot.skip(ftype) @@ -18361,8 +18558,8 @@ def write(self, oprot): if self.functions is not None: oprot.writeFieldBegin('functions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.functions)) - for iter794 in self.functions: - iter794.write(oprot) + for iter801 in self.functions: + iter801.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18414,10 +18611,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.values = [] - (_etype798, _size795) = iprot.readListBegin() - for _i799 in xrange(_size795): - _elem800 = iprot.readI32() - self.values.append(_elem800) + (_etype805, _size802) = iprot.readListBegin() + for _i806 in xrange(_size802): + _elem807 = iprot.readI32() + self.values.append(_elem807) iprot.readListEnd() else: iprot.skip(ftype) @@ -18434,8 +18631,8 @@ def write(self, oprot): if self.values is not None: oprot.writeFieldBegin('values', TType.LIST, 1) oprot.writeListBegin(TType.I32, len(self.values)) - for iter801 in self.values: - oprot.writeI32(iter801) + for iter808 in self.values: + oprot.writeI32(iter808) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18542,10 +18739,10 @@ def read(self, iprot): elif fid == 8: if ftype == TType.LIST: self.processorCapabilities = [] - (_etype805, _size802) = iprot.readListBegin() - for _i806 in xrange(_size802): - _elem807 = iprot.readString() - self.processorCapabilities.append(_elem807) + (_etype812, _size809) = iprot.readListBegin() + for _i813 in xrange(_size809): + _elem814 = iprot.readString() + self.processorCapabilities.append(_elem814) iprot.readListEnd() else: iprot.skip(ftype) @@ -18591,8 +18788,8 @@ def write(self, oprot): if self.processorCapabilities is not None: oprot.writeFieldBegin('processorCapabilities', TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) - for iter808 in self.processorCapabilities: - oprot.writeString(iter808) + for iter815 in self.processorCapabilities: + oprot.writeString(iter815) oprot.writeListEnd() oprot.writeFieldEnd() if self.processorIdentifier is not None: @@ -18760,10 +18957,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tblNames = [] - (_etype812, _size809) = iprot.readListBegin() - for _i813 in xrange(_size809): - _elem814 = iprot.readString() - self.tblNames.append(_elem814) + (_etype819, _size816) = iprot.readListBegin() + for _i820 in xrange(_size816): + _elem821 = iprot.readString() + self.tblNames.append(_elem821) iprot.readListEnd() else: iprot.skip(ftype) @@ -18781,10 +18978,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.processorCapabilities = [] - (_etype818, _size815) = iprot.readListBegin() - for _i819 in xrange(_size815): - _elem820 = iprot.readString() - self.processorCapabilities.append(_elem820) + (_etype825, _size822) = iprot.readListBegin() + for _i826 in xrange(_size822): + _elem827 = iprot.readString() + self.processorCapabilities.append(_elem827) iprot.readListEnd() else: iprot.skip(ftype) @@ -18810,8 +19007,8 @@ def write(self, oprot): if self.tblNames is not None: oprot.writeFieldBegin('tblNames', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tblNames)) - for iter821 in self.tblNames: - oprot.writeString(iter821) + for iter828 in self.tblNames: + oprot.writeString(iter828) oprot.writeListEnd() oprot.writeFieldEnd() if self.capabilities is not None: @@ -18825,8 +19022,8 @@ def write(self, oprot): if self.processorCapabilities is not None: oprot.writeFieldBegin('processorCapabilities', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) - for iter822 in self.processorCapabilities: - oprot.writeString(iter822) + for iter829 in self.processorCapabilities: + oprot.writeString(iter829) oprot.writeListEnd() oprot.writeFieldEnd() if self.processorIdentifier is not None: @@ -18889,11 +19086,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tables = [] - (_etype826, _size823) = iprot.readListBegin() - for _i827 in xrange(_size823): - _elem828 = Table() - _elem828.read(iprot) - self.tables.append(_elem828) + (_etype833, _size830) = iprot.readListBegin() + for _i834 in xrange(_size830): + _elem835 = Table() + _elem835.read(iprot) + self.tables.append(_elem835) iprot.readListEnd() else: iprot.skip(ftype) @@ -18910,8 +19107,8 @@ def write(self, oprot): if self.tables is not None: oprot.writeFieldBegin('tables', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tables)) - for iter829 in self.tables: - iter829.write(oprot) + for iter836 in self.tables: + iter836.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19008,10 +19205,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.processorCapabilities = [] - (_etype833, _size830) = iprot.readListBegin() - for _i834 in xrange(_size830): - _elem835 = iprot.readString() - self.processorCapabilities.append(_elem835) + (_etype840, _size837) = iprot.readListBegin() + for _i841 in xrange(_size837): + _elem842 = iprot.readString() + self.processorCapabilities.append(_elem842) iprot.readListEnd() else: iprot.skip(ftype) @@ -19053,8 +19250,8 @@ def write(self, oprot): if self.processorCapabilities is not None: oprot.writeFieldBegin('processorCapabilities', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) - for iter836 in self.processorCapabilities: - oprot.writeString(iter836) + for iter843 in self.processorCapabilities: + oprot.writeString(iter843) oprot.writeListEnd() oprot.writeFieldEnd() if self.processorIdentifier is not None: @@ -19143,20 +19340,20 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.requiredReadCapabilities = [] - (_etype840, _size837) = iprot.readListBegin() - for _i841 in xrange(_size837): - _elem842 = iprot.readString() - self.requiredReadCapabilities.append(_elem842) + (_etype847, _size844) = iprot.readListBegin() + for _i848 in xrange(_size844): + _elem849 = iprot.readString() + self.requiredReadCapabilities.append(_elem849) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.requiredWriteCapabilities = [] - (_etype846, _size843) = iprot.readListBegin() - for _i847 in xrange(_size843): - _elem848 = iprot.readString() - self.requiredWriteCapabilities.append(_elem848) + (_etype853, _size850) = iprot.readListBegin() + for _i854 in xrange(_size850): + _elem855 = iprot.readString() + self.requiredWriteCapabilities.append(_elem855) iprot.readListEnd() else: iprot.skip(ftype) @@ -19181,15 +19378,15 @@ def write(self, oprot): if self.requiredReadCapabilities is not None: oprot.writeFieldBegin('requiredReadCapabilities', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.requiredReadCapabilities)) - for iter849 in self.requiredReadCapabilities: - oprot.writeString(iter849) + for iter856 in self.requiredReadCapabilities: + oprot.writeString(iter856) oprot.writeListEnd() oprot.writeFieldEnd() if self.requiredWriteCapabilities is not None: oprot.writeFieldBegin('requiredWriteCapabilities', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.requiredWriteCapabilities)) - for iter850 in self.requiredWriteCapabilities: - oprot.writeString(iter850) + for iter857 in self.requiredWriteCapabilities: + oprot.writeString(iter857) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20491,44 +20688,44 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.pools = [] - (_etype854, _size851) = iprot.readListBegin() - for _i855 in xrange(_size851): - _elem856 = WMPool() - _elem856.read(iprot) - self.pools.append(_elem856) + (_etype861, _size858) = iprot.readListBegin() + for _i862 in xrange(_size858): + _elem863 = WMPool() + _elem863.read(iprot) + self.pools.append(_elem863) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.mappings = [] - (_etype860, _size857) = iprot.readListBegin() - for _i861 in xrange(_size857): - _elem862 = WMMapping() - _elem862.read(iprot) - self.mappings.append(_elem862) + (_etype867, _size864) = iprot.readListBegin() + for _i868 in xrange(_size864): + _elem869 = WMMapping() + _elem869.read(iprot) + self.mappings.append(_elem869) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.triggers = [] - (_etype866, _size863) = iprot.readListBegin() - for _i867 in xrange(_size863): - _elem868 = WMTrigger() - _elem868.read(iprot) - self.triggers.append(_elem868) + (_etype873, _size870) = iprot.readListBegin() + for _i874 in xrange(_size870): + _elem875 = WMTrigger() + _elem875.read(iprot) + self.triggers.append(_elem875) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.poolTriggers = [] - (_etype872, _size869) = iprot.readListBegin() - for _i873 in xrange(_size869): - _elem874 = WMPoolTrigger() - _elem874.read(iprot) - self.poolTriggers.append(_elem874) + (_etype879, _size876) = iprot.readListBegin() + for _i880 in xrange(_size876): + _elem881 = WMPoolTrigger() + _elem881.read(iprot) + self.poolTriggers.append(_elem881) iprot.readListEnd() else: iprot.skip(ftype) @@ -20549,29 +20746,29 @@ def write(self, oprot): if self.pools is not None: oprot.writeFieldBegin('pools', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.pools)) - for iter875 in self.pools: - iter875.write(oprot) + for iter882 in self.pools: + iter882.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.mappings is not None: oprot.writeFieldBegin('mappings', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.mappings)) - for iter876 in self.mappings: - iter876.write(oprot) + for iter883 in self.mappings: + iter883.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.triggers is not None: oprot.writeFieldBegin('triggers', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.triggers)) - for iter877 in self.triggers: - iter877.write(oprot) + for iter884 in self.triggers: + iter884.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.poolTriggers is not None: oprot.writeFieldBegin('poolTriggers', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.poolTriggers)) - for iter878 in self.poolTriggers: - iter878.write(oprot) + for iter885 in self.poolTriggers: + iter885.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21096,11 +21293,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.resourcePlans = [] - (_etype882, _size879) = iprot.readListBegin() - for _i883 in xrange(_size879): - _elem884 = WMResourcePlan() - _elem884.read(iprot) - self.resourcePlans.append(_elem884) + (_etype889, _size886) = iprot.readListBegin() + for _i890 in xrange(_size886): + _elem891 = WMResourcePlan() + _elem891.read(iprot) + self.resourcePlans.append(_elem891) iprot.readListEnd() else: iprot.skip(ftype) @@ -21117,8 +21314,8 @@ def write(self, oprot): if self.resourcePlans is not None: oprot.writeFieldBegin('resourcePlans', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.resourcePlans)) - for iter885 in self.resourcePlans: - iter885.write(oprot) + for iter892 in self.resourcePlans: + iter892.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21448,20 +21645,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.errors = [] - (_etype889, _size886) = iprot.readListBegin() - for _i890 in xrange(_size886): - _elem891 = iprot.readString() - self.errors.append(_elem891) + (_etype896, _size893) = iprot.readListBegin() + for _i897 in xrange(_size893): + _elem898 = iprot.readString() + self.errors.append(_elem898) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.warnings = [] - (_etype895, _size892) = iprot.readListBegin() - for _i896 in xrange(_size892): - _elem897 = iprot.readString() - self.warnings.append(_elem897) + (_etype902, _size899) = iprot.readListBegin() + for _i903 in xrange(_size899): + _elem904 = iprot.readString() + self.warnings.append(_elem904) iprot.readListEnd() else: iprot.skip(ftype) @@ -21478,15 +21675,15 @@ def write(self, oprot): if self.errors is not None: oprot.writeFieldBegin('errors', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.errors)) - for iter898 in self.errors: - oprot.writeString(iter898) + for iter905 in self.errors: + oprot.writeString(iter905) oprot.writeListEnd() oprot.writeFieldEnd() if self.warnings is not None: oprot.writeFieldBegin('warnings', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.warnings)) - for iter899 in self.warnings: - oprot.writeString(iter899) + for iter906 in self.warnings: + oprot.writeString(iter906) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22102,11 +22299,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.triggers = [] - (_etype903, _size900) = iprot.readListBegin() - for _i904 in xrange(_size900): - _elem905 = WMTrigger() - _elem905.read(iprot) - self.triggers.append(_elem905) + (_etype910, _size907) = iprot.readListBegin() + for _i911 in xrange(_size907): + _elem912 = WMTrigger() + _elem912.read(iprot) + self.triggers.append(_elem912) iprot.readListEnd() else: iprot.skip(ftype) @@ -22123,8 +22320,8 @@ def write(self, oprot): if self.triggers is not None: oprot.writeFieldBegin('triggers', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.triggers)) - for iter906 in self.triggers: - iter906.write(oprot) + for iter913 in self.triggers: + iter913.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23334,11 +23531,11 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.cols = [] - (_etype910, _size907) = iprot.readListBegin() - for _i911 in xrange(_size907): - _elem912 = FieldSchema() - _elem912.read(iprot) - self.cols.append(_elem912) + (_etype917, _size914) = iprot.readListBegin() + for _i918 in xrange(_size914): + _elem919 = FieldSchema() + _elem919.read(iprot) + self.cols.append(_elem919) iprot.readListEnd() else: iprot.skip(ftype) @@ -23398,8 +23595,8 @@ def write(self, oprot): if self.cols is not None: oprot.writeFieldBegin('cols', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.cols)) - for iter913 in self.cols: - iter913.write(oprot) + for iter920 in self.cols: + iter920.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.state is not None: @@ -23654,11 +23851,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.schemaVersions = [] - (_etype917, _size914) = iprot.readListBegin() - for _i918 in xrange(_size914): - _elem919 = SchemaVersionDescriptor() - _elem919.read(iprot) - self.schemaVersions.append(_elem919) + (_etype924, _size921) = iprot.readListBegin() + for _i925 in xrange(_size921): + _elem926 = SchemaVersionDescriptor() + _elem926.read(iprot) + self.schemaVersions.append(_elem926) iprot.readListEnd() else: iprot.skip(ftype) @@ -23675,8 +23872,8 @@ def write(self, oprot): if self.schemaVersions is not None: oprot.writeFieldBegin('schemaVersions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.schemaVersions)) - for iter920 in self.schemaVersions: - iter920.write(oprot) + for iter927 in self.schemaVersions: + iter927.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -24167,76 +24364,76 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.primaryKeys = [] - (_etype924, _size921) = iprot.readListBegin() - for _i925 in xrange(_size921): - _elem926 = SQLPrimaryKey() - _elem926.read(iprot) - self.primaryKeys.append(_elem926) + (_etype931, _size928) = iprot.readListBegin() + for _i932 in xrange(_size928): + _elem933 = SQLPrimaryKey() + _elem933.read(iprot) + self.primaryKeys.append(_elem933) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.foreignKeys = [] - (_etype930, _size927) = iprot.readListBegin() - for _i931 in xrange(_size927): - _elem932 = SQLForeignKey() - _elem932.read(iprot) - self.foreignKeys.append(_elem932) + (_etype937, _size934) = iprot.readListBegin() + for _i938 in xrange(_size934): + _elem939 = SQLForeignKey() + _elem939.read(iprot) + self.foreignKeys.append(_elem939) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.uniqueConstraints = [] - (_etype936, _size933) = iprot.readListBegin() - for _i937 in xrange(_size933): - _elem938 = SQLUniqueConstraint() - _elem938.read(iprot) - self.uniqueConstraints.append(_elem938) + (_etype943, _size940) = iprot.readListBegin() + for _i944 in xrange(_size940): + _elem945 = SQLUniqueConstraint() + _elem945.read(iprot) + self.uniqueConstraints.append(_elem945) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.LIST: self.notNullConstraints = [] - (_etype942, _size939) = iprot.readListBegin() - for _i943 in xrange(_size939): - _elem944 = SQLNotNullConstraint() - _elem944.read(iprot) - self.notNullConstraints.append(_elem944) + (_etype949, _size946) = iprot.readListBegin() + for _i950 in xrange(_size946): + _elem951 = SQLNotNullConstraint() + _elem951.read(iprot) + self.notNullConstraints.append(_elem951) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.LIST: self.defaultConstraints = [] - (_etype948, _size945) = iprot.readListBegin() - for _i949 in xrange(_size945): - _elem950 = SQLDefaultConstraint() - _elem950.read(iprot) - self.defaultConstraints.append(_elem950) + (_etype955, _size952) = iprot.readListBegin() + for _i956 in xrange(_size952): + _elem957 = SQLDefaultConstraint() + _elem957.read(iprot) + self.defaultConstraints.append(_elem957) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 8: if ftype == TType.LIST: self.checkConstraints = [] - (_etype954, _size951) = iprot.readListBegin() - for _i955 in xrange(_size951): - _elem956 = SQLCheckConstraint() - _elem956.read(iprot) - self.checkConstraints.append(_elem956) + (_etype961, _size958) = iprot.readListBegin() + for _i962 in xrange(_size958): + _elem963 = SQLCheckConstraint() + _elem963.read(iprot) + self.checkConstraints.append(_elem963) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 9: if ftype == TType.LIST: self.processorCapabilities = [] - (_etype960, _size957) = iprot.readListBegin() - for _i961 in xrange(_size957): - _elem962 = iprot.readString() - self.processorCapabilities.append(_elem962) + (_etype967, _size964) = iprot.readListBegin() + for _i968 in xrange(_size964): + _elem969 = iprot.readString() + self.processorCapabilities.append(_elem969) iprot.readListEnd() else: iprot.skip(ftype) @@ -24266,50 +24463,50 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter963 in self.primaryKeys: - iter963.write(oprot) + for iter970 in self.primaryKeys: + iter970.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter964 in self.foreignKeys: - iter964.write(oprot) + for iter971 in self.foreignKeys: + iter971.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.uniqueConstraints is not None: oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) - for iter965 in self.uniqueConstraints: - iter965.write(oprot) + for iter972 in self.uniqueConstraints: + iter972.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.notNullConstraints is not None: oprot.writeFieldBegin('notNullConstraints', TType.LIST, 6) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) - for iter966 in self.notNullConstraints: - iter966.write(oprot) + for iter973 in self.notNullConstraints: + iter973.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.defaultConstraints is not None: oprot.writeFieldBegin('defaultConstraints', TType.LIST, 7) oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) - for iter967 in self.defaultConstraints: - iter967.write(oprot) + for iter974 in self.defaultConstraints: + iter974.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.checkConstraints is not None: oprot.writeFieldBegin('checkConstraints', TType.LIST, 8) oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints)) - for iter968 in self.checkConstraints: - iter968.write(oprot) + for iter975 in self.checkConstraints: + iter975.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.processorCapabilities is not None: oprot.writeFieldBegin('processorCapabilities', TType.LIST, 9) oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) - for iter969 in self.processorCapabilities: - oprot.writeString(iter969) + for iter976 in self.processorCapabilities: + oprot.writeString(iter976) oprot.writeListEnd() oprot.writeFieldEnd() if self.processorIdentifier is not None: @@ -24409,11 +24606,11 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.partitions = [] - (_etype973, _size970) = iprot.readListBegin() - for _i974 in xrange(_size970): - _elem975 = Partition() - _elem975.read(iprot) - self.partitions.append(_elem975) + (_etype980, _size977) = iprot.readListBegin() + for _i981 in xrange(_size977): + _elem982 = Partition() + _elem982.read(iprot) + self.partitions.append(_elem982) iprot.readListEnd() else: iprot.skip(ftype) @@ -24458,8 +24655,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter976 in self.partitions: - iter976.write(oprot) + for iter983 in self.partitions: + iter983.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environmentContext is not None: @@ -24611,10 +24808,10 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.partVals = [] - (_etype980, _size977) = iprot.readListBegin() - for _i981 in xrange(_size977): - _elem982 = iprot.readString() - self.partVals.append(_elem982) + (_etype987, _size984) = iprot.readListBegin() + for _i988 in xrange(_size984): + _elem989 = iprot.readString() + self.partVals.append(_elem989) iprot.readListEnd() else: iprot.skip(ftype) @@ -24654,8 +24851,8 @@ def write(self, oprot): if self.partVals is not None: oprot.writeFieldBegin('partVals', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.partVals)) - for iter983 in self.partVals: - oprot.writeString(iter983) + for iter990 in self.partVals: + oprot.writeString(iter990) oprot.writeListEnd() oprot.writeFieldEnd() if self.newPart is not None: @@ -24977,10 +25174,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fieldList = [] - (_etype987, _size984) = iprot.readListBegin() - for _i988 in xrange(_size984): - _elem989 = iprot.readString() - self.fieldList.append(_elem989) + (_etype994, _size991) = iprot.readListBegin() + for _i995 in xrange(_size991): + _elem996 = iprot.readString() + self.fieldList.append(_elem996) iprot.readListEnd() else: iprot.skip(ftype) @@ -25007,8 +25204,8 @@ def write(self, oprot): if self.fieldList is not None: oprot.writeFieldBegin('fieldList', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.fieldList)) - for iter990 in self.fieldList: - oprot.writeString(iter990) + for iter997 in self.fieldList: + oprot.writeString(iter997) oprot.writeListEnd() oprot.writeFieldEnd() if self.includeParamKeyPattern is not None: @@ -25084,10 +25281,10 @@ def read(self, iprot): elif fid == 8: if ftype == TType.LIST: self.filters = [] - (_etype994, _size991) = iprot.readListBegin() - for _i995 in xrange(_size991): - _elem996 = iprot.readString() - self.filters.append(_elem996) + (_etype1001, _size998) = iprot.readListBegin() + for _i1002 in xrange(_size998): + _elem1003 = iprot.readString() + self.filters.append(_elem1003) iprot.readListEnd() else: iprot.skip(ftype) @@ -25108,8 +25305,8 @@ def write(self, oprot): if self.filters is not None: oprot.writeFieldBegin('filters', TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.filters)) - for iter997 in self.filters: - oprot.writeString(iter997) + for iter1004 in self.filters: + oprot.writeString(iter1004) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -25162,11 +25359,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitionSpec = [] - (_etype1001, _size998) = iprot.readListBegin() - for _i1002 in xrange(_size998): - _elem1003 = PartitionSpec() - _elem1003.read(iprot) - self.partitionSpec.append(_elem1003) + (_etype1008, _size1005) = iprot.readListBegin() + for _i1009 in xrange(_size1005): + _elem1010 = PartitionSpec() + _elem1010.read(iprot) + self.partitionSpec.append(_elem1010) iprot.readListEnd() else: iprot.skip(ftype) @@ -25183,8 +25380,8 @@ def write(self, oprot): if self.partitionSpec is not None: oprot.writeFieldBegin('partitionSpec', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitionSpec)) - for iter1004 in self.partitionSpec: - iter1004.write(oprot) + for iter1011 in self.partitionSpec: + iter1011.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -25223,6 +25420,7 @@ class GetPartitionsRequest: - filterSpec - processorCapabilities - processorIdentifier + - validWriteIdList """ thrift_spec = ( @@ -25237,9 +25435,10 @@ class GetPartitionsRequest: (8, TType.STRUCT, 'filterSpec', (GetPartitionsFilterSpec, GetPartitionsFilterSpec.thrift_spec), None, ), # 8 (9, TType.LIST, 'processorCapabilities', (TType.STRING,None), None, ), # 9 (10, TType.STRING, 'processorIdentifier', None, None, ), # 10 + (11, TType.STRING, 'validWriteIdList', None, None, ), # 11 ) - def __init__(self, catName=None, dbName=None, tblName=None, withAuth=None, user=None, groupNames=None, projectionSpec=None, filterSpec=None, processorCapabilities=None, processorIdentifier=None,): + def __init__(self, catName=None, dbName=None, tblName=None, withAuth=None, user=None, groupNames=None, projectionSpec=None, filterSpec=None, processorCapabilities=None, processorIdentifier=None, validWriteIdList=None,): self.catName = catName self.dbName = dbName self.tblName = tblName @@ -25250,6 +25449,7 @@ def __init__(self, catName=None, dbName=None, tblName=None, withAuth=None, user= self.filterSpec = filterSpec self.processorCapabilities = processorCapabilities self.processorIdentifier = processorIdentifier + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -25288,10 +25488,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.groupNames = [] - (_etype1008, _size1005) = iprot.readListBegin() - for _i1009 in xrange(_size1005): - _elem1010 = iprot.readString() - self.groupNames.append(_elem1010) + (_etype1015, _size1012) = iprot.readListBegin() + for _i1016 in xrange(_size1012): + _elem1017 = iprot.readString() + self.groupNames.append(_elem1017) iprot.readListEnd() else: iprot.skip(ftype) @@ -25310,10 +25510,10 @@ def read(self, iprot): elif fid == 9: if ftype == TType.LIST: self.processorCapabilities = [] - (_etype1014, _size1011) = iprot.readListBegin() - for _i1015 in xrange(_size1011): - _elem1016 = iprot.readString() - self.processorCapabilities.append(_elem1016) + (_etype1021, _size1018) = iprot.readListBegin() + for _i1022 in xrange(_size1018): + _elem1023 = iprot.readString() + self.processorCapabilities.append(_elem1023) iprot.readListEnd() else: iprot.skip(ftype) @@ -25322,6 +25522,11 @@ def read(self, iprot): self.processorIdentifier = iprot.readString() else: iprot.skip(ftype) + elif fid == 11: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -25355,8 +25560,8 @@ def write(self, oprot): if self.groupNames is not None: oprot.writeFieldBegin('groupNames', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.groupNames)) - for iter1017 in self.groupNames: - oprot.writeString(iter1017) + for iter1024 in self.groupNames: + oprot.writeString(iter1024) oprot.writeListEnd() oprot.writeFieldEnd() if self.projectionSpec is not None: @@ -25370,14 +25575,18 @@ def write(self, oprot): if self.processorCapabilities is not None: oprot.writeFieldBegin('processorCapabilities', TType.LIST, 9) oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) - for iter1018 in self.processorCapabilities: - oprot.writeString(iter1018) + for iter1025 in self.processorCapabilities: + oprot.writeString(iter1025) oprot.writeListEnd() oprot.writeFieldEnd() if self.processorIdentifier is not None: oprot.writeFieldBegin('processorIdentifier', TType.STRING, 10) oprot.writeString(self.processorIdentifier) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 11) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -25397,6 +25606,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.filterSpec) value = (value * 31) ^ hash(self.processorCapabilities) value = (value * 31) ^ hash(self.processorIdentifier) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb index c0442ff7c0..745f1d72c6 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -2180,6 +2180,7 @@ class PartitionsByExprRequest DEFAULTPARTITIONNAME = 4 MAXPARTS = 5 CATNAME = 6 + VALIDWRITEIDLIST = 7 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, @@ -2187,7 +2188,8 @@ class PartitionsByExprRequest EXPR => {:type => ::Thrift::Types::STRING, :name => 'expr', :binary => true}, DEFAULTPARTITIONNAME => {:type => ::Thrift::Types::STRING, :name => 'defaultPartitionName', :optional => true}, MAXPARTS => {:type => ::Thrift::Types::I16, :name => 'maxParts', :default => -1, :optional => true}, - CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } def struct_fields; FIELDS; end @@ -2456,6 +2458,7 @@ class PartitionValuesRequest ASCENDING = 7 MAXPARTS = 8 CATNAME = 9 + VALIDWRITEIDLIST = 10 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, @@ -2466,7 +2469,8 @@ class PartitionValuesRequest PARTITIONORDER => {:type => ::Thrift::Types::LIST, :name => 'partitionOrder', :element => {:type => ::Thrift::Types::STRUCT, :class => ::FieldSchema}, :optional => true}, ASCENDING => {:type => ::Thrift::Types::BOOL, :name => 'ascending', :default => true, :optional => true}, MAXPARTS => {:type => ::Thrift::Types::I64, :name => 'maxParts', :default => -1, :optional => true}, - CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } def struct_fields; FIELDS; end @@ -2522,6 +2526,7 @@ class GetPartitionsByNamesRequest GET_COL_STATS = 4 PROCESSORCAPABILITIES = 5 PROCESSORIDENTIFIER = 6 + VALIDWRITEIDLIST = 7 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, @@ -2529,7 +2534,8 @@ class GetPartitionsByNamesRequest NAMES => {:type => ::Thrift::Types::LIST, :name => 'names', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, GET_COL_STATS => {:type => ::Thrift::Types::BOOL, :name => 'get_col_stats', :optional => true}, PROCESSORCAPABILITIES => {:type => ::Thrift::Types::LIST, :name => 'processorCapabilities', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, - PROCESSORIDENTIFIER => {:type => ::Thrift::Types::STRING, :name => 'processorIdentifier', :optional => true} + PROCESSORIDENTIFIER => {:type => ::Thrift::Types::STRING, :name => 'processorIdentifier', :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } def struct_fields; FIELDS; end @@ -2975,6 +2981,43 @@ class TableValidWriteIds ::Thrift::Struct.generate_accessors self end +class TableWriteId + include ::Thrift::Struct, ::Thrift::Struct_Union + FULLTABLENAME = 1 + WRITEID = 2 + + FIELDS = { + FULLTABLENAME => {:type => ::Thrift::Types::STRING, :name => 'fullTableName'}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field fullTableName is unset!') unless @fullTableName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field writeId is unset!') unless @writeId + end + + ::Thrift::Struct.generate_accessors self +end + +class GetTxnTableWriteIdsResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + TABLEWRITEIDS = 1 + + FIELDS = { + TABLEWRITEIDS => {:type => ::Thrift::Types::LIST, :name => 'tableWriteIds', :element => {:type => ::Thrift::Types::STRUCT, :class => ::TableWriteId}} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableWriteIds is unset!') unless @tableWriteIds + end + + ::Thrift::Struct.generate_accessors self +end + class GetValidWriteIdsResponse include ::Thrift::Struct, ::Thrift::Struct_Union TBLVALIDWRITEIDS = 1 @@ -5645,6 +5688,7 @@ class GetPartitionsRequest FILTERSPEC = 8 PROCESSORCAPABILITIES = 9 PROCESSORIDENTIFIER = 10 + VALIDWRITEIDLIST = 11 FIELDS = { CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, @@ -5656,7 +5700,8 @@ class GetPartitionsRequest PROJECTIONSPEC => {:type => ::Thrift::Types::STRUCT, :name => 'projectionSpec', :class => ::GetPartitionsProjectionSpec}, FILTERSPEC => {:type => ::Thrift::Types::STRUCT, :name => 'filterSpec', :class => ::GetPartitionsFilterSpec}, PROCESSORCAPABILITIES => {:type => ::Thrift::Types::LIST, :name => 'processorCapabilities', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, - PROCESSORIDENTIFIER => {:type => ::Thrift::Types::STRING, :name => 'processorIdentifier', :optional => true} + PROCESSORIDENTIFIER => {:type => ::Thrift::Types::STRING, :name => 'processorIdentifier', :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } def struct_fields; FIELDS; end diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 3151fd82b8..3a87db907d 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -294,13 +294,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_type_all failed: unknown result') end - def get_fields(db_name, table_name) - send_get_fields(db_name, table_name) + def get_fields(db_name, table_name, validWriteIdList) + send_get_fields(db_name, table_name, validWriteIdList) return recv_get_fields() end - def send_get_fields(db_name, table_name) - send_message('get_fields', Get_fields_args, :db_name => db_name, :table_name => table_name) + def send_get_fields(db_name, table_name, validWriteIdList) + send_message('get_fields', Get_fields_args, :db_name => db_name, :table_name => table_name, :validWriteIdList => validWriteIdList) end def recv_get_fields() @@ -312,13 +312,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_fields failed: unknown result') end - def get_fields_with_environment_context(db_name, table_name, environment_context) - send_get_fields_with_environment_context(db_name, table_name, environment_context) + def get_fields_with_environment_context(db_name, table_name, environment_context, validWriteIdList) + send_get_fields_with_environment_context(db_name, table_name, environment_context, validWriteIdList) return recv_get_fields_with_environment_context() end - def send_get_fields_with_environment_context(db_name, table_name, environment_context) - send_message('get_fields_with_environment_context', Get_fields_with_environment_context_args, :db_name => db_name, :table_name => table_name, :environment_context => environment_context) + def send_get_fields_with_environment_context(db_name, table_name, environment_context, validWriteIdList) + send_message('get_fields_with_environment_context', Get_fields_with_environment_context_args, :db_name => db_name, :table_name => table_name, :environment_context => environment_context, :validWriteIdList => validWriteIdList) end def recv_get_fields_with_environment_context() @@ -330,13 +330,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_fields_with_environment_context failed: unknown result') end - def get_schema(db_name, table_name) - send_get_schema(db_name, table_name) + def get_schema(db_name, table_name, validWriteIdList) + send_get_schema(db_name, table_name, validWriteIdList) return recv_get_schema() end - def send_get_schema(db_name, table_name) - send_message('get_schema', Get_schema_args, :db_name => db_name, :table_name => table_name) + def send_get_schema(db_name, table_name, validWriteIdList) + send_message('get_schema', Get_schema_args, :db_name => db_name, :table_name => table_name, :validWriteIdList => validWriteIdList) end def recv_get_schema() @@ -348,13 +348,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_schema failed: unknown result') end - def get_schema_with_environment_context(db_name, table_name, environment_context) - send_get_schema_with_environment_context(db_name, table_name, environment_context) + def get_schema_with_environment_context(db_name, table_name, environment_context, validWriteIdList) + send_get_schema_with_environment_context(db_name, table_name, environment_context, validWriteIdList) return recv_get_schema_with_environment_context() end - def send_get_schema_with_environment_context(db_name, table_name, environment_context) - send_message('get_schema_with_environment_context', Get_schema_with_environment_context_args, :db_name => db_name, :table_name => table_name, :environment_context => environment_context) + def send_get_schema_with_environment_context(db_name, table_name, environment_context, validWriteIdList) + send_message('get_schema_with_environment_context', Get_schema_with_environment_context_args, :db_name => db_name, :table_name => table_name, :environment_context => environment_context, :validWriteIdList => validWriteIdList) end def recv_get_schema_with_environment_context() @@ -709,13 +709,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_all_tables failed: unknown result') end - def get_table(dbname, tbl_name) - send_get_table(dbname, tbl_name) + def get_table(dbname, tbl_name, validWriteIdList) + send_get_table(dbname, tbl_name, validWriteIdList) return recv_get_table() end - def send_get_table(dbname, tbl_name) - send_message('get_table', Get_table_args, :dbname => dbname, :tbl_name => tbl_name) + def send_get_table(dbname, tbl_name, validWriteIdList) + send_message('get_table', Get_table_args, :dbname => dbname, :tbl_name => tbl_name, :validWriteIdList => validWriteIdList) end def recv_get_table() @@ -1157,13 +1157,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'drop_partitions_req failed: unknown result') end - def get_partition(db_name, tbl_name, part_vals) - send_get_partition(db_name, tbl_name, part_vals) + def get_partition(db_name, tbl_name, part_vals, validTxnList) + send_get_partition(db_name, tbl_name, part_vals, validTxnList) return recv_get_partition() end - def send_get_partition(db_name, tbl_name, part_vals) - send_message('get_partition', Get_partition_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals) + def send_get_partition(db_name, tbl_name, part_vals, validTxnList) + send_message('get_partition', Get_partition_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :validTxnList => validTxnList) end def recv_get_partition() @@ -1212,13 +1212,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'exchange_partitions failed: unknown result') end - def get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names) - send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names) + def get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names, validTxnList) + send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names, validTxnList) return recv_get_partition_with_auth() end - def send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names) - send_message('get_partition_with_auth', Get_partition_with_auth_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :user_name => user_name, :group_names => group_names) + def send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names, validTxnList) + send_message('get_partition_with_auth', Get_partition_with_auth_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :user_name => user_name, :group_names => group_names, :validTxnList => validTxnList) end def recv_get_partition_with_auth() @@ -1229,13 +1229,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition_with_auth failed: unknown result') end - def get_partition_by_name(db_name, tbl_name, part_name) - send_get_partition_by_name(db_name, tbl_name, part_name) + def get_partition_by_name(db_name, tbl_name, part_name, validTxnList) + send_get_partition_by_name(db_name, tbl_name, part_name, validTxnList) return recv_get_partition_by_name() end - def send_get_partition_by_name(db_name, tbl_name, part_name) - send_message('get_partition_by_name', Get_partition_by_name_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name) + def send_get_partition_by_name(db_name, tbl_name, part_name, validTxnList) + send_message('get_partition_by_name', Get_partition_by_name_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name, :validTxnList => validTxnList) end def recv_get_partition_by_name() @@ -1246,13 +1246,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition_by_name failed: unknown result') end - def get_partitions(db_name, tbl_name, max_parts) - send_get_partitions(db_name, tbl_name, max_parts) + def get_partitions(db_name, tbl_name, max_parts, validTxnList) + send_get_partitions(db_name, tbl_name, max_parts, validTxnList) return recv_get_partitions() end - def send_get_partitions(db_name, tbl_name, max_parts) - send_message('get_partitions', Get_partitions_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts) + def send_get_partitions(db_name, tbl_name, max_parts, validTxnList) + send_message('get_partitions', Get_partitions_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts, :validTxnList => validTxnList) end def recv_get_partitions() @@ -1263,13 +1263,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions failed: unknown result') end - def get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names) - send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names) + def get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names, validTxnList) + send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names, validTxnList) return recv_get_partitions_with_auth() end - def send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names) - send_message('get_partitions_with_auth', Get_partitions_with_auth_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts, :user_name => user_name, :group_names => group_names) + def send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names, validTxnList) + send_message('get_partitions_with_auth', Get_partitions_with_auth_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts, :user_name => user_name, :group_names => group_names, :validTxnList => validTxnList) end def recv_get_partitions_with_auth() @@ -1280,13 +1280,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_with_auth failed: unknown result') end - def get_partitions_pspec(db_name, tbl_name, max_parts) - send_get_partitions_pspec(db_name, tbl_name, max_parts) + def get_partitions_pspec(db_name, tbl_name, max_parts, validTxnList) + send_get_partitions_pspec(db_name, tbl_name, max_parts, validTxnList) return recv_get_partitions_pspec() end - def send_get_partitions_pspec(db_name, tbl_name, max_parts) - send_message('get_partitions_pspec', Get_partitions_pspec_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts) + def send_get_partitions_pspec(db_name, tbl_name, max_parts, validTxnList) + send_message('get_partitions_pspec', Get_partitions_pspec_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts, :validTxnList => validTxnList) end def recv_get_partitions_pspec() @@ -1297,13 +1297,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_pspec failed: unknown result') end - def get_partition_names(db_name, tbl_name, max_parts) - send_get_partition_names(db_name, tbl_name, max_parts) + def get_partition_names(db_name, tbl_name, max_parts, validTxnList) + send_get_partition_names(db_name, tbl_name, max_parts, validTxnList) return recv_get_partition_names() end - def send_get_partition_names(db_name, tbl_name, max_parts) - send_message('get_partition_names', Get_partition_names_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts) + def send_get_partition_names(db_name, tbl_name, max_parts, validTxnList) + send_message('get_partition_names', Get_partition_names_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts, :validTxnList => validTxnList) end def recv_get_partition_names() @@ -1331,13 +1331,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition_values failed: unknown result') end - def get_partitions_ps(db_name, tbl_name, part_vals, max_parts) - send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts) + def get_partitions_ps(db_name, tbl_name, part_vals, max_parts, validTxnList) + send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts, validTxnList) return recv_get_partitions_ps() end - def send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts) - send_message('get_partitions_ps', Get_partitions_ps_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :max_parts => max_parts) + def send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts, validTxnList) + send_message('get_partitions_ps', Get_partitions_ps_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :max_parts => max_parts, :validTxnList => validTxnList) end def recv_get_partitions_ps() @@ -1348,13 +1348,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_ps failed: unknown result') end - def get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names) - send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names) + def get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names, validTxnList) + send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names, validTxnList) return recv_get_partitions_ps_with_auth() end - def send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names) - send_message('get_partitions_ps_with_auth', Get_partitions_ps_with_auth_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :max_parts => max_parts, :user_name => user_name, :group_names => group_names) + def send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names, validTxnList) + send_message('get_partitions_ps_with_auth', Get_partitions_ps_with_auth_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :max_parts => max_parts, :user_name => user_name, :group_names => group_names, :validTxnList => validTxnList) end def recv_get_partitions_ps_with_auth() @@ -1365,13 +1365,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_ps_with_auth failed: unknown result') end - def get_partition_names_ps(db_name, tbl_name, part_vals, max_parts) - send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts) + def get_partition_names_ps(db_name, tbl_name, part_vals, max_parts, validTxnList) + send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts, validTxnList) return recv_get_partition_names_ps() end - def send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts) - send_message('get_partition_names_ps', Get_partition_names_ps_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :max_parts => max_parts) + def send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts, validTxnList) + send_message('get_partition_names_ps', Get_partition_names_ps_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :max_parts => max_parts, :validTxnList => validTxnList) end def recv_get_partition_names_ps() @@ -1382,13 +1382,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition_names_ps failed: unknown result') end - def get_partitions_by_filter(db_name, tbl_name, filter, max_parts) - send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts) + def get_partitions_by_filter(db_name, tbl_name, filter, max_parts, validTxnList) + send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts, validTxnList) return recv_get_partitions_by_filter() end - def send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts) - send_message('get_partitions_by_filter', Get_partitions_by_filter_args, :db_name => db_name, :tbl_name => tbl_name, :filter => filter, :max_parts => max_parts) + def send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts, validTxnList) + send_message('get_partitions_by_filter', Get_partitions_by_filter_args, :db_name => db_name, :tbl_name => tbl_name, :filter => filter, :max_parts => max_parts, :validTxnList => validTxnList) end def recv_get_partitions_by_filter() @@ -1399,13 +1399,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_by_filter failed: unknown result') end - def get_part_specs_by_filter(db_name, tbl_name, filter, max_parts) - send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts) + def get_part_specs_by_filter(db_name, tbl_name, filter, max_parts, validTxnList) + send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts, validTxnList) return recv_get_part_specs_by_filter() end - def send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts) - send_message('get_part_specs_by_filter', Get_part_specs_by_filter_args, :db_name => db_name, :tbl_name => tbl_name, :filter => filter, :max_parts => max_parts) + def send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts, validTxnList) + send_message('get_part_specs_by_filter', Get_part_specs_by_filter_args, :db_name => db_name, :tbl_name => tbl_name, :filter => filter, :max_parts => max_parts, :validTxnList => validTxnList) end def recv_get_part_specs_by_filter() @@ -1433,13 +1433,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_by_expr failed: unknown result') end - def get_num_partitions_by_filter(db_name, tbl_name, filter) - send_get_num_partitions_by_filter(db_name, tbl_name, filter) + def get_num_partitions_by_filter(db_name, tbl_name, filter, validTxnList) + send_get_num_partitions_by_filter(db_name, tbl_name, filter, validTxnList) return recv_get_num_partitions_by_filter() end - def send_get_num_partitions_by_filter(db_name, tbl_name, filter) - send_message('get_num_partitions_by_filter', Get_num_partitions_by_filter_args, :db_name => db_name, :tbl_name => tbl_name, :filter => filter) + def send_get_num_partitions_by_filter(db_name, tbl_name, filter, validTxnList) + send_message('get_num_partitions_by_filter', Get_num_partitions_by_filter_args, :db_name => db_name, :tbl_name => tbl_name, :filter => filter, :validTxnList => validTxnList) end def recv_get_num_partitions_by_filter() @@ -1450,13 +1450,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_num_partitions_by_filter failed: unknown result') end - def get_partitions_by_names(db_name, tbl_name, names) - send_get_partitions_by_names(db_name, tbl_name, names) + def get_partitions_by_names(db_name, tbl_name, names, validTxnList) + send_get_partitions_by_names(db_name, tbl_name, names, validTxnList) return recv_get_partitions_by_names() end - def send_get_partitions_by_names(db_name, tbl_name, names) - send_message('get_partitions_by_names', Get_partitions_by_names_args, :db_name => db_name, :tbl_name => tbl_name, :names => names) + def send_get_partitions_by_names(db_name, tbl_name, names, validTxnList) + send_message('get_partitions_by_names', Get_partitions_by_names_args, :db_name => db_name, :tbl_name => tbl_name, :names => names, :validTxnList => validTxnList) end def recv_get_partitions_by_names() @@ -1881,13 +1881,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'update_partition_column_statistics_req failed: unknown result') end - def get_table_column_statistics(db_name, tbl_name, col_name) - send_get_table_column_statistics(db_name, tbl_name, col_name) + def get_table_column_statistics(db_name, tbl_name, col_name, validWriteIdList) + send_get_table_column_statistics(db_name, tbl_name, col_name, validWriteIdList) return recv_get_table_column_statistics() end - def send_get_table_column_statistics(db_name, tbl_name, col_name) - send_message('get_table_column_statistics', Get_table_column_statistics_args, :db_name => db_name, :tbl_name => tbl_name, :col_name => col_name) + def send_get_table_column_statistics(db_name, tbl_name, col_name, validWriteIdList) + send_message('get_table_column_statistics', Get_table_column_statistics_args, :db_name => db_name, :tbl_name => tbl_name, :col_name => col_name, :validWriteIdList => validWriteIdList) end def recv_get_table_column_statistics() @@ -1900,13 +1900,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_table_column_statistics failed: unknown result') end - def get_partition_column_statistics(db_name, tbl_name, part_name, col_name) - send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name) + def get_partition_column_statistics(db_name, tbl_name, part_name, col_name, validWriteIdList) + send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name, validWriteIdList) return recv_get_partition_column_statistics() end - def send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name) - send_message('get_partition_column_statistics', Get_partition_column_statistics_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name, :col_name => col_name) + def send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name, validWriteIdList) + send_message('get_partition_column_statistics', Get_partition_column_statistics_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name, :col_name => col_name, :validWriteIdList => validWriteIdList) end def recv_get_partition_column_statistics() @@ -3993,7 +3993,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_fields_args) result = Get_fields_result.new() begin - result.success = @handler.get_fields(args.db_name, args.table_name) + result.success = @handler.get_fields(args.db_name, args.table_name, args.validWriteIdList) rescue ::MetaException => o1 result.o1 = o1 rescue ::UnknownTableException => o2 @@ -4008,7 +4008,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_fields_with_environment_context_args) result = Get_fields_with_environment_context_result.new() begin - result.success = @handler.get_fields_with_environment_context(args.db_name, args.table_name, args.environment_context) + result.success = @handler.get_fields_with_environment_context(args.db_name, args.table_name, args.environment_context, args.validWriteIdList) rescue ::MetaException => o1 result.o1 = o1 rescue ::UnknownTableException => o2 @@ -4023,7 +4023,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_schema_args) result = Get_schema_result.new() begin - result.success = @handler.get_schema(args.db_name, args.table_name) + result.success = @handler.get_schema(args.db_name, args.table_name, args.validWriteIdList) rescue ::MetaException => o1 result.o1 = o1 rescue ::UnknownTableException => o2 @@ -4038,7 +4038,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_schema_with_environment_context_args) result = Get_schema_with_environment_context_result.new() begin - result.success = @handler.get_schema_with_environment_context(args.db_name, args.table_name, args.environment_context) + result.success = @handler.get_schema_with_environment_context(args.db_name, args.table_name, args.environment_context, args.validWriteIdList) rescue ::MetaException => o1 result.o1 = o1 rescue ::UnknownTableException => o2 @@ -4326,7 +4326,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_table_args) result = Get_table_result.new() begin - result.success = @handler.get_table(args.dbname, args.tbl_name) + result.success = @handler.get_table(args.dbname, args.tbl_name, args.validWriteIdList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4682,7 +4682,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partition_args) result = Get_partition_result.new() begin - result.success = @handler.get_partition(args.db_name, args.tbl_name, args.part_vals) + result.success = @handler.get_partition(args.db_name, args.tbl_name, args.part_vals, args.validTxnList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4729,7 +4729,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partition_with_auth_args) result = Get_partition_with_auth_result.new() begin - result.success = @handler.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names) + result.success = @handler.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names, args.validTxnList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4742,7 +4742,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partition_by_name_args) result = Get_partition_by_name_result.new() begin - result.success = @handler.get_partition_by_name(args.db_name, args.tbl_name, args.part_name) + result.success = @handler.get_partition_by_name(args.db_name, args.tbl_name, args.part_name, args.validTxnList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4755,7 +4755,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partitions_args) result = Get_partitions_result.new() begin - result.success = @handler.get_partitions(args.db_name, args.tbl_name, args.max_parts) + result.success = @handler.get_partitions(args.db_name, args.tbl_name, args.max_parts, args.validTxnList) rescue ::NoSuchObjectException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -4768,7 +4768,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partitions_with_auth_args) result = Get_partitions_with_auth_result.new() begin - result.success = @handler.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names) + result.success = @handler.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names, args.validTxnList) rescue ::NoSuchObjectException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -4781,7 +4781,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partitions_pspec_args) result = Get_partitions_pspec_result.new() begin - result.success = @handler.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts) + result.success = @handler.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts, args.validTxnList) rescue ::NoSuchObjectException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -4794,7 +4794,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partition_names_args) result = Get_partition_names_result.new() begin - result.success = @handler.get_partition_names(args.db_name, args.tbl_name, args.max_parts) + result.success = @handler.get_partition_names(args.db_name, args.tbl_name, args.max_parts, args.validTxnList) rescue ::NoSuchObjectException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -4820,7 +4820,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partitions_ps_args) result = Get_partitions_ps_result.new() begin - result.success = @handler.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts) + result.success = @handler.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.validTxnList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4833,7 +4833,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partitions_ps_with_auth_args) result = Get_partitions_ps_with_auth_result.new() begin - result.success = @handler.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names) + result.success = @handler.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names, args.validTxnList) rescue ::NoSuchObjectException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -4846,7 +4846,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partition_names_ps_args) result = Get_partition_names_ps_result.new() begin - result.success = @handler.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts) + result.success = @handler.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.validTxnList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4859,7 +4859,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partitions_by_filter_args) result = Get_partitions_by_filter_result.new() begin - result.success = @handler.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts) + result.success = @handler.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts, args.validTxnList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4872,7 +4872,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_part_specs_by_filter_args) result = Get_part_specs_by_filter_result.new() begin - result.success = @handler.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts) + result.success = @handler.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts, args.validTxnList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4898,7 +4898,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_num_partitions_by_filter_args) result = Get_num_partitions_by_filter_result.new() begin - result.success = @handler.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter) + result.success = @handler.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.validTxnList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4911,7 +4911,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partitions_by_names_args) result = Get_partitions_by_names_result.new() begin - result.success = @handler.get_partitions_by_names(args.db_name, args.tbl_name, args.names) + result.success = @handler.get_partitions_by_names(args.db_name, args.tbl_name, args.names, args.validTxnList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -5260,7 +5260,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_table_column_statistics_args) result = Get_table_column_statistics_result.new() begin - result.success = @handler.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name) + result.success = @handler.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name, args.validWriteIdList) rescue ::NoSuchObjectException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -5277,7 +5277,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partition_column_statistics_args) result = Get_partition_column_statistics_result.new() begin - result.success = @handler.get_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name) + result.success = @handler.get_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name, args.validWriteIdList) rescue ::NoSuchObjectException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -7190,10 +7190,12 @@ module ThriftHiveMetastore include ::Thrift::Struct, ::Thrift::Struct_Union DB_NAME = 1 TABLE_NAME = 2 + VALIDWRITEIDLIST = 3 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, - TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'} + TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -7231,11 +7233,13 @@ module ThriftHiveMetastore DB_NAME = 1 TABLE_NAME = 2 ENVIRONMENT_CONTEXT = 3 + VALIDWRITEIDLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'}, - ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext} + ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -7272,10 +7276,12 @@ module ThriftHiveMetastore include ::Thrift::Struct, ::Thrift::Struct_Union DB_NAME = 1 TABLE_NAME = 2 + VALIDWRITEIDLIST = 3 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, - TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'} + TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -7313,11 +7319,13 @@ module ThriftHiveMetastore DB_NAME = 1 TABLE_NAME = 2 ENVIRONMENT_CONTEXT = 3 + VALIDWRITEIDLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'}, - ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext} + ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -8119,10 +8127,12 @@ module ThriftHiveMetastore include ::Thrift::Struct, ::Thrift::Struct_Union DBNAME = 1 TBL_NAME = 2 + VALIDWRITEIDLIST = 3 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, - TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'} + TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -9150,11 +9160,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 PART_VALS = 3 + VALIDTXNLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}} + PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9288,13 +9300,15 @@ module ThriftHiveMetastore PART_VALS = 3 USER_NAME = 4 GROUP_NAMES = 5 + VALIDTXNLIST = 6 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}}, USER_NAME => {:type => ::Thrift::Types::STRING, :name => 'user_name'}, - GROUP_NAMES => {:type => ::Thrift::Types::LIST, :name => 'group_names', :element => {:type => ::Thrift::Types::STRING}} + GROUP_NAMES => {:type => ::Thrift::Types::LIST, :name => 'group_names', :element => {:type => ::Thrift::Types::STRING}}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9330,11 +9344,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 PART_NAME = 3 + VALIDTXNLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - PART_NAME => {:type => ::Thrift::Types::STRING, :name => 'part_name'} + PART_NAME => {:type => ::Thrift::Types::STRING, :name => 'part_name'}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9370,11 +9386,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 MAX_PARTS = 3 + VALIDTXNLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1} + MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9412,13 +9430,15 @@ module ThriftHiveMetastore MAX_PARTS = 3 USER_NAME = 4 GROUP_NAMES = 5 + VALIDTXNLIST = 6 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1}, USER_NAME => {:type => ::Thrift::Types::STRING, :name => 'user_name'}, - GROUP_NAMES => {:type => ::Thrift::Types::LIST, :name => 'group_names', :element => {:type => ::Thrift::Types::STRING}} + GROUP_NAMES => {:type => ::Thrift::Types::LIST, :name => 'group_names', :element => {:type => ::Thrift::Types::STRING}}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9454,11 +9474,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 MAX_PARTS = 3 + VALIDTXNLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - MAX_PARTS => {:type => ::Thrift::Types::I32, :name => 'max_parts', :default => -1} + MAX_PARTS => {:type => ::Thrift::Types::I32, :name => 'max_parts', :default => -1}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9494,11 +9516,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 MAX_PARTS = 3 + VALIDTXNLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1} + MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9571,12 +9595,14 @@ module ThriftHiveMetastore TBL_NAME = 2 PART_VALS = 3 MAX_PARTS = 4 + VALIDTXNLIST = 5 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}}, - MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1} + MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9615,6 +9641,7 @@ module ThriftHiveMetastore MAX_PARTS = 4 USER_NAME = 5 GROUP_NAMES = 6 + VALIDTXNLIST = 7 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, @@ -9622,7 +9649,8 @@ module ThriftHiveMetastore PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}}, MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1}, USER_NAME => {:type => ::Thrift::Types::STRING, :name => 'user_name'}, - GROUP_NAMES => {:type => ::Thrift::Types::LIST, :name => 'group_names', :element => {:type => ::Thrift::Types::STRING}} + GROUP_NAMES => {:type => ::Thrift::Types::LIST, :name => 'group_names', :element => {:type => ::Thrift::Types::STRING}}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9659,12 +9687,14 @@ module ThriftHiveMetastore TBL_NAME = 2 PART_VALS = 3 MAX_PARTS = 4 + VALIDTXNLIST = 5 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}}, - MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1} + MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9701,12 +9731,14 @@ module ThriftHiveMetastore TBL_NAME = 2 FILTER = 3 MAX_PARTS = 4 + VALIDTXNLIST = 5 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, FILTER => {:type => ::Thrift::Types::STRING, :name => 'filter'}, - MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1} + MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9743,12 +9775,14 @@ module ThriftHiveMetastore TBL_NAME = 2 FILTER = 3 MAX_PARTS = 4 + VALIDTXNLIST = 5 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, FILTER => {:type => ::Thrift::Types::STRING, :name => 'filter'}, - MAX_PARTS => {:type => ::Thrift::Types::I32, :name => 'max_parts', :default => -1} + MAX_PARTS => {:type => ::Thrift::Types::I32, :name => 'max_parts', :default => -1}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9820,11 +9854,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 FILTER = 3 + VALIDTXNLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - FILTER => {:type => ::Thrift::Types::STRING, :name => 'filter'} + FILTER => {:type => ::Thrift::Types::STRING, :name => 'filter'}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9860,11 +9896,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 NAMES = 3 + VALIDTXNLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - NAMES => {:type => ::Thrift::Types::LIST, :name => 'names', :element => {:type => ::Thrift::Types::STRING}} + NAMES => {:type => ::Thrift::Types::LIST, :name => 'names', :element => {:type => ::Thrift::Types::STRING}}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -10824,11 +10862,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 COL_NAME = 3 + VALIDWRITEIDLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - COL_NAME => {:type => ::Thrift::Types::STRING, :name => 'col_name'} + COL_NAME => {:type => ::Thrift::Types::STRING, :name => 'col_name'}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -10869,12 +10909,14 @@ module ThriftHiveMetastore TBL_NAME = 2 PART_NAME = 3 COL_NAME = 4 + VALIDWRITEIDLIST = 5 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, PART_NAME => {:type => ::Thrift::Types::STRING, :name => 'part_name'}, - COL_NAME => {:type => ::Thrift::Types::STRING, :name => 'col_name'} + COL_NAME => {:type => ::Thrift::Types::STRING, :name => 'col_name'}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index d3429c1003..0374582b2e 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -19,9 +19,7 @@ package org.apache.hadoop.hive.metastore; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; -import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.CAT_NAME; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; -import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.parseDbName; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName; import java.io.IOException; @@ -58,7 +56,9 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -145,6 +145,8 @@ private static String[] processorCapabilities; private static String processorIdentifier; + private ValidTxnWriteIdList txnWriteIdList; + //copied from ErrorMsg.java private static final String REPL_EVENTS_MISSING_IN_METASTORE = "Notification events are missing in the meta store."; @@ -1719,7 +1721,7 @@ public boolean dropType(String type) throws NoSuchObjectException, MetaException int max_parts) throws TException { // TODO should we add capabilities here as well as it returns Partition objects List parts = client.get_partitions(prependCatalogToDbName(catName, db_name, conf), - tbl_name, shrinkMaxtoShort(max_parts)); + tbl_name, shrinkMaxtoShort(max_parts), getValidWriteIdList(TableName.getDbTable(db_name, tbl_name))); return deepCopyPartitions( FilterUtils.filterPartitionsIfEnabled(isClientFilterEnabled, filterHook, parts)); } @@ -1733,7 +1735,8 @@ public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, in public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tableName, int maxParts) throws TException { List partitionSpecs = - client.get_partitions_pspec(prependCatalogToDbName(catName, dbName, conf), tableName, maxParts); + client.get_partitions_pspec(prependCatalogToDbName(catName, dbName, conf), tableName, maxParts, + getValidWriteIdList(TableName.getDbTable(dbName, tableName))); partitionSpecs = FilterUtils.filterPartitionSpecsIfEnabled(isClientFilterEnabled, filterHook, partitionSpecs); return PartitionSpecProxy.Factory.get(partitionSpecs); } @@ -1750,7 +1753,7 @@ public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, Stri List part_vals, int max_parts) throws TException { // TODO should we add capabilities here as well as it returns Partition objects List parts = client.get_partitions_ps(prependCatalogToDbName(catName, db_name, conf), - tbl_name, part_vals, shrinkMaxtoShort(max_parts)); + tbl_name, part_vals, shrinkMaxtoShort(max_parts), getValidWriteIdList(TableName.getDbTable(db_name, tbl_name))); return deepCopyPartitions(FilterUtils.filterPartitionsIfEnabled(isClientFilterEnabled, filterHook, parts)); } @@ -1769,7 +1772,7 @@ public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, Stri List groupNames) throws TException { // TODO should we add capabilities here as well as it returns Partition objects List parts = client.get_partitions_with_auth(prependCatalogToDbName(catName, - dbName, conf), tableName, shrinkMaxtoShort(maxParts), userName, groupNames); + dbName, conf), tableName, shrinkMaxtoShort(maxParts), userName, groupNames, getValidWriteIdList(TableName.getDbTable(dbName, tableName))); return deepCopyPartitions(FilterUtils.filterPartitionsIfEnabled(isClientFilterEnabled, filterHook, parts)); } @@ -1790,7 +1793,7 @@ public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, Stri throws TException { // TODO should we add capabilities here as well as it returns Partition objects List parts = client.get_partitions_ps_with_auth(prependCatalogToDbName(catName, - dbName, conf), tableName, partialPvals, shrinkMaxtoShort(maxParts), userName, groupNames); + dbName, conf), tableName, partialPvals, shrinkMaxtoShort(maxParts), userName, groupNames, getValidWriteIdList(TableName.getDbTable(dbName, tableName))); return deepCopyPartitions(FilterUtils.filterPartitionsIfEnabled(isClientFilterEnabled, filterHook, parts)); } @@ -1805,7 +1808,7 @@ public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, Stri String filter, int max_parts) throws TException { // TODO should we add capabilities here as well as it returns Partition objects List parts = client.get_partitions_by_filter(prependCatalogToDbName( - catName, db_name, conf), tbl_name, filter, shrinkMaxtoShort(max_parts)); + catName, db_name, conf), tbl_name, filter, shrinkMaxtoShort(max_parts), getValidWriteIdList(TableName.getDbTable(db_name, tbl_name))); return deepCopyPartitions(FilterUtils.filterPartitionsIfEnabled(isClientFilterEnabled, filterHook, parts)); } @@ -1822,7 +1825,7 @@ public PartitionSpecProxy listPartitionSpecsByFilter(String catName, String db_n int max_parts) throws TException { List partitionSpecs = client.get_part_specs_by_filter(prependCatalogToDbName(catName, db_name, conf), tbl_name, filter, - max_parts); + max_parts, getValidWriteIdList(TableName.getDbTable(db_name, tbl_name))); return PartitionSpecProxy.Factory.get( FilterUtils.filterPartitionSpecsIfEnabled(isClientFilterEnabled, filterHook, partitionSpecs)); } @@ -1887,7 +1890,7 @@ public Partition getPartition(String db_name, String tbl_name, List part @Override public Partition getPartition(String catName, String dbName, String tblName, List partVals) throws TException { - Partition p = client.get_partition(prependCatalogToDbName(catName, dbName, conf), tblName, partVals); + Partition p = client.get_partition(prependCatalogToDbName(catName, dbName, conf), tblName, partVals, getValidWriteIdList(TableName.getDbTable(dbName, tblName))); return deepCopy(FilterUtils.filterPartitionIfEnabled(isClientFilterEnabled, filterHook, p)); } @@ -1956,7 +1959,7 @@ public Partition getPartitionWithAuthInfo(String catName, String dbName, String List pvals, String userName, List groupNames) throws TException { Partition p = client.get_partition_with_auth(prependCatalogToDbName(catName, dbName, conf), tableName, - pvals, userName, groupNames); + pvals, userName, groupNames, getValidWriteIdList(TableName.getDbTable(dbName, tableName))); return deepCopy(FilterUtils.filterPartitionIfEnabled(isClientFilterEnabled, filterHook, p)); } @@ -1967,26 +1970,21 @@ public Table getTable(String dbname, String name) throws TException { @Override public Table getTable(String dbname, String name, boolean getColumnStats) throws TException { - return getTable(getDefaultCatalog(conf), dbname, name, getColumnStats); + return getTable(getDefaultCatalog(conf), dbname, name, true, getColumnStats); } @Override public Table getTable(String catName, String dbName, String tableName) throws TException { - return getTable(catName, dbName, tableName, false); + return getTable(catName, dbName, tableName, true, false); } - public Table getTable(String catName, String dbName, String tableName, boolean getColumnStats) throws TException { - GetTableRequest req = new GetTableRequest(dbName, tableName); - req.setCatName(catName); - req.setCapabilities(version); - req.setGetColumnStats(getColumnStats); - if (processorCapabilities != null) - req.setProcessorCapabilities(new ArrayList(Arrays.asList(processorCapabilities))); - if (processorIdentifier != null) - req.setProcessorIdentifier(processorIdentifier); - - Table t = client.get_table_req(req).getTable(); - return deepCopy(FilterUtils.filterTableIfEnabled(isClientFilterEnabled, filterHook, t)); + @Override + public Table getTable(String catName, String dbName, String tableName, boolean checkTransactional, boolean getColumnStats) throws TException { + String validWriteIdList = null; + if (checkTransactional) { + validWriteIdList = getValidWriteIdList(TableName.getDbTable(dbName, tableName)); + } + return getTable(catName, dbName, tableName, validWriteIdList, getColumnStats); } @Override @@ -2001,7 +1999,9 @@ public Table getTable(String catName, String dbName, String tableName, String va GetTableRequest req = new GetTableRequest(dbName, tableName); req.setCatName(catName); req.setCapabilities(version); - req.setValidWriteIdList(validWriteIdList); + if (validWriteIdList != null) { + req.setValidWriteIdList(validWriteIdList); + } req.setGetColumnStats(getColumnStats); if (processorCapabilities != null) req.setProcessorCapabilities(new ArrayList(Arrays.asList(processorCapabilities))); @@ -2228,7 +2228,7 @@ public boolean tableExists(String catName, String dbName, String tableName) thro int maxParts) throws TException { List partNames = client.get_partition_names( - prependCatalogToDbName(catName, dbName, conf), tableName, shrinkMaxtoShort(maxParts)); + prependCatalogToDbName(catName, dbName, conf), tableName, shrinkMaxtoShort(maxParts), getValidWriteIdList(TableName.getDbTable(dbName, tableName))); return FilterUtils.filterPartitionNamesIfEnabled( isClientFilterEnabled, filterHook, catName, dbName, tableName, partNames); } @@ -2243,7 +2243,7 @@ public boolean tableExists(String catName, String dbName, String tableName) thro public List listPartitionNames(String catName, String db_name, String tbl_name, List part_vals, int max_parts) throws TException { List partNames = client.get_partition_names_ps(prependCatalogToDbName(catName, db_name, conf), tbl_name, - part_vals, shrinkMaxtoShort(max_parts)); + part_vals, shrinkMaxtoShort(max_parts), getValidWriteIdList(TableName.getDbTable(db_name, tbl_name))); return FilterUtils.filterPartitionNamesIfEnabled( isClientFilterEnabled, filterHook, catName, db_name, tbl_name, partNames); } @@ -2258,7 +2258,7 @@ public int getNumPartitionsByFilter(String db_name, String tbl_name, public int getNumPartitionsByFilter(String catName, String dbName, String tableName, String filter) throws TException { return client.get_num_partitions_by_filter(prependCatalogToDbName(catName, dbName, conf), tableName, - filter); + filter, getValidWriteIdList(TableName.getDbTable(dbName, tableName))); } @Override @@ -2354,7 +2354,7 @@ public void alterDatabase(String catName, String dbName, Database newDb) throws @Override public List getFields(String catName, String db, String tableName) throws TException { - List fields = client.get_fields(prependCatalogToDbName(catName, db, conf), tableName); + List fields = client.get_fields(prependCatalogToDbName(catName, db, conf), tableName, getValidWriteIdList(TableName.getDbTable(db, tableName))); return deepCopyFieldSchemas(fields); } @@ -2554,7 +2554,7 @@ public boolean deleteTableColumnStatistics(String catName, String dbName, String } List fields = client.get_schema_with_environment_context(prependCatalogToDbName( - catName, db, conf), tableName, envCxt); + catName, db, conf), tableName, envCxt, getValidWriteIdList(TableName.getDbTable(db, tableName))); return deepCopyFieldSchemas(fields); } @@ -2573,7 +2573,7 @@ public Partition getPartition(String db, String tableName, String partName) thro public Partition getPartition(String catName, String dbName, String tblName, String name) throws TException { Partition p = client.get_partition_by_name(prependCatalogToDbName(catName, dbName, conf), tblName, - name); + name, getValidWriteIdList(TableName.getDbTable(dbName, tblName))); return deepCopy(FilterUtils.filterPartitionIfEnabled(isClientFilterEnabled, filterHook, p)); } @@ -3904,6 +3904,14 @@ public SerDeInfo getSerDe(String serDeName) throws TException { return client.get_serde(new GetSerdeRequest(serDeName)); } + private String getValidWriteIdList(String fullTableName) { + if (txnWriteIdList == null) { + return null; + } + ValidWriteIdList writeIdList = txnWriteIdList.getTableValidWriteIdList(fullTableName); + return writeIdList!=null?writeIdList.toString():null; + } + private short shrinkMaxtoShort(int max) { if (max < 0) { return -1; @@ -3987,6 +3995,16 @@ public String getServerVersion() throws TException { return client.getVersion(); } + @Override + public void setValidWriteIdList(String txnWriteIdListStr) { + this.txnWriteIdList = (txnWriteIdListStr==null?null:new ValidTxnWriteIdList(txnWriteIdListStr)); + } + + @Override + public void clearValidWriteIdList() { + this.txnWriteIdList = null; + } + /** * Builder for requiredFields bitmask to be sent via GetTablesExtRequest */ diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 4561b41593..8d89b9e08e 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.common.classification.RetrySemantics; import org.apache.hadoop.hive.metastore.annotation.NoReconnect; @@ -680,6 +681,21 @@ Table getTable(String dbName, String tableName, boolean getColumnStats) throws M */ Table getTable(String catName, String dbName, String tableName) throws MetaException, TException; + /** + * Get a table object. + * @param catName catalog the table is in. + * @param dbName database the table is in. + * @param tableName table name. + * @param checkTransactional + * checks whether the metadata table stats are valid (or + * compilant with the snapshot isolation of) for the current transaction. + * @param getColumnStats get the column stats, if available, when true + * @return table object. + * @throws MetaException Something went wrong, usually in the RDBMS. + * @throws TException general thrift error. + */ + Table getTable(String catName, String dbName, String tableName, boolean checkTransactional, boolean getColumnStats) throws MetaException, TException; + /** * Get a table object. * @param catName catalog the table is in. @@ -3893,4 +3909,8 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @return String representation of the version number of Metastore server (eg: 3.1.0-SNAPSHOT) */ String getServerVersion() throws TException; + + void setValidWriteIdList(String txnWriteIdList); + + void clearValidWriteIdList(); } diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index 927324e29e..0926dafe3a 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -349,9 +349,6 @@ public static ConfVars getMetaConf(String name) { CATALOG_DEFAULT("metastore.catalog.default", "metastore.catalog.default", "hive", "The default catalog to use when a catalog is not specified. Default is 'hive' (the " + "default catalog)."), - CATALOGS_TO_CACHE("metastore.cached.rawstore.catalogs", "metastore.cached.rawstore.catalogs", - "hive", "Comma separated list of catalogs to cache in the CachedStore. Default is 'hive' " + - "(the default catalog). Empty string means all catalogs will be cached."), CLIENT_CONNECT_RETRY_DELAY("metastore.client.connect.retry.delay", "hive.metastore.client.connect.retry.delay", 1, TimeUnit.SECONDS, "Number of seconds for the client to wait between consecutive connection attempts"), @@ -990,8 +987,6 @@ public static ConfVars getMetaConf(String name) { "Time interval describing how often the reaper runs"), TOKEN_SIGNATURE("metastore.token.signature", "hive.metastore.token.signature", "", "The delegation token service name to match when selecting a token from the current user's tokens."), - METASTORE_CACHE_CAN_USE_EVENT("metastore.cache.can.use.event", "hive.metastore.cache.can.use.event", false, - "Can notification events from notification log table be used for updating the metastore cache."), TRANSACTIONAL_EVENT_LISTENERS("metastore.transactional.event.listeners", "hive.metastore.transactional.event.listeners", "", "A comma separated list of Java classes that implement the org.apache.riven.MetaStoreEventListener" + diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index 7e55970c0f..10d52a2349 100644 --- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -747,8 +747,9 @@ struct PartitionsByExprRequest { 2: required string tblName, 3: required binary expr, 4: optional string defaultPartitionName, - 5: optional i16 maxParts=-1 - 6: optional string catName + 5: optional i16 maxParts=-1, + 6: optional string catName, + 7: optional string validWriteIdList } struct TableStatsResult { @@ -833,7 +834,8 @@ struct PartitionValuesRequest { 6: optional list partitionOrder; 7: optional bool ascending = true; 8: optional i64 maxParts = -1; - 9: optional string catName + 9: optional string catName, + 10: optional string validWriteIdList } struct PartitionValuesRow { @@ -850,7 +852,8 @@ struct GetPartitionsByNamesRequest { 3: optional list names, 4: optional bool get_col_stats, 5: optional list processorCapabilities, - 6: optional string processorIdentifier + 6: optional string processorIdentifier, + 7: optional string validWriteIdList } struct GetPartitionsByNamesResult { @@ -1009,6 +1012,16 @@ struct TableValidWriteIds { 5: required binary abortedBits, // Bit array to identify the aborted write ids in invalidWriteIds list } +struct TableWriteId { + 1: required string fullTableName, // Full table name of format . + 2: required i64 writeId, // current write id of the table +} + +// Current Write ID for changed tables of the txn +struct GetTxnTableWriteIdsResponse { + 1: required list tableWriteIds, +} + // Valid Write ID list for all the input tables wrt to current txn struct GetValidWriteIdsResponse { 1: required list tblValidWriteIds, @@ -1840,7 +1853,8 @@ struct GetPartitionsRequest { 7: GetPartitionsProjectionSpec projectionSpec 8: GetPartitionsFilterSpec filterSpec, // TODO not yet implemented. Must be present but ignored 9: optional list processorCapabilities, - 10: optional string processorIdentifier + 10: optional string processorIdentifier, + 11: optional string validWriteIdList } // Exceptions. @@ -1935,12 +1949,12 @@ service ThriftHiveMetastore extends fb303.FacebookService throws(1:MetaException o2) // Gets a list of FieldSchemas describing the columns of a particular table - list get_fields(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3), - list get_fields_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) + list get_fields(1: string db_name, 2: string table_name, 3: string validWriteIdList) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3), + list get_fields_with_environment_context(1: string db_name, 2: string table_name, 3: EnvironmentContext environment_context, 4: string validWriteIdList) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) // Gets a list of FieldSchemas describing both the columns and the partition keys of a particular table - list get_schema(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) - list get_schema_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) + list get_schema(1: string db_name, 2: string table_name, 3: string validWriteIdList) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) + list get_schema_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context, 4:string validWriteIdList) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) // create a Hive table. Following fields must be set // tableName @@ -1999,7 +2013,7 @@ service ThriftHiveMetastore extends fb303.FacebookService throws (1: MetaException o1) list get_all_tables(1: string db_name) throws (1: MetaException o1) - Table get_table(1:string dbname, 2:string tbl_name) + Table get_table(1:string dbname, 2:string tbl_name, 3: string validWriteIdList) throws (1:MetaException o1, 2:NoSuchObjectException o2) list
get_table_objects_by_name(1:string dbname, 2:list tbl_names) list get_tables_ext(1: GetTablesExtRequest req) throws (1: MetaException o1) @@ -2100,7 +2114,7 @@ service ThriftHiveMetastore extends fb303.FacebookService DropPartitionsResult drop_partitions_req(1: DropPartitionsRequest req) throws(1:NoSuchObjectException o1, 2:MetaException o2) - Partition get_partition(1:string db_name, 2:string tbl_name, 3:list part_vals) + Partition get_partition(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:string validTxnList) throws(1:MetaException o1, 2:NoSuchObjectException o2) Partition exchange_partition(1:map partitionSpecs, 2:string source_db, 3:string source_table_name, 4:string dest_db, 5:string dest_table_name) @@ -2113,22 +2127,22 @@ service ThriftHiveMetastore extends fb303.FacebookService 4:InvalidInputException o4) Partition get_partition_with_auth(1:string db_name, 2:string tbl_name, 3:list part_vals, - 4: string user_name, 5: list group_names) throws(1:MetaException o1, 2:NoSuchObjectException o2) + 4: string user_name, 5: list group_names, 6: string validTxnList) throws(1:MetaException o1, 2:NoSuchObjectException o2) - Partition get_partition_by_name(1:string db_name 2:string tbl_name, 3:string part_name) + Partition get_partition_by_name(1:string db_name 2:string tbl_name, 3:string part_name, 4:string validTxnList) throws(1:MetaException o1, 2:NoSuchObjectException o2) // returns all the partitions for this table in reverse chronological order. // If max parts is given then it will return only that many. - list get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) + list get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1, 4:string validTxnList) throws(1:NoSuchObjectException o1, 2:MetaException o2) list get_partitions_with_auth(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1, - 4: string user_name, 5: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2) + 4: string user_name, 5: list group_names, 6: string validTxnList) throws(1:NoSuchObjectException o1, 2:MetaException o2) - list get_partitions_pspec(1:string db_name, 2:string tbl_name, 3:i32 max_parts=-1) + list get_partitions_pspec(1:string db_name, 2:string tbl_name, 3:i32 max_parts=-1, 4:string validTxnList) throws(1:NoSuchObjectException o1, 2:MetaException o2) - list get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) + list get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1, 4:string validTxnList) throws(1:NoSuchObjectException o1, 2:MetaException o2) PartitionValuesResponse get_partition_values(1:PartitionValuesRequest request) @@ -2141,23 +2155,23 @@ service ThriftHiveMetastore extends fb303.FacebookService // number of partition columns - the unspecified values are considered the same // as "". list get_partitions_ps(1:string db_name 2:string tbl_name - 3:list part_vals, 4:i16 max_parts=-1) + 3:list part_vals, 4:i16 max_parts=-1, 5:string validTxnList) throws(1:MetaException o1, 2:NoSuchObjectException o2) list get_partitions_ps_with_auth(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1, - 5: string user_name, 6: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2) + 5: string user_name, 6: list group_names, 7: string validTxnList) throws(1:NoSuchObjectException o1, 2:MetaException o2) list get_partition_names_ps(1:string db_name, - 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1) + 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1, 5:string validTxnList) throws(1:MetaException o1, 2:NoSuchObjectException o2) // get the partitions matching the given partition filter list get_partitions_by_filter(1:string db_name 2:string tbl_name - 3:string filter, 4:i16 max_parts=-1) + 3:string filter, 4:i16 max_parts=-1, 5:string validTxnList) throws(1:MetaException o1, 2:NoSuchObjectException o2) // List partitions as PartitionSpec instances. list get_part_specs_by_filter(1:string db_name 2:string tbl_name - 3:string filter, 4:i32 max_parts=-1) + 3:string filter, 4:i32 max_parts=-1, 5:string validTxnList) throws(1:MetaException o1, 2:NoSuchObjectException o2) // get the partitions matching the given partition filter @@ -2167,11 +2181,11 @@ service ThriftHiveMetastore extends fb303.FacebookService throws(1:MetaException o1, 2:NoSuchObjectException o2) // get the partitions matching the given partition filter - i32 get_num_partitions_by_filter(1:string db_name 2:string tbl_name 3:string filter) + i32 get_num_partitions_by_filter(1:string db_name 2:string tbl_name 3:string filter 4:string validTxnList) throws(1:MetaException o1, 2:NoSuchObjectException o2) // get partitions give a list of partition names - list get_partitions_by_names(1:string db_name 2:string tbl_name 3:list names) + list get_partitions_by_names(1:string db_name 2:string tbl_name 3:list names 4:string validTxnList) throws(1:MetaException o1, 2:NoSuchObjectException o2) GetPartitionsByNamesResult get_partitions_by_names_req(1:GetPartitionsByNamesRequest req) throws(1:MetaException o1, 2:NoSuchObjectException o2) @@ -2271,10 +2285,10 @@ service ThriftHiveMetastore extends fb303.FacebookService // such statistics exists. If the required statistics doesn't exist, get APIs throw NoSuchObjectException // For instance, if get_table_column_statistics is called on a partitioned table for which only // partition level column stats exist, get_table_column_statistics will throw NoSuchObjectException - ColumnStatistics get_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name) throws + ColumnStatistics get_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name, 4:string validWriteIdList) throws (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidInputException o3, 4:InvalidObjectException o4) ColumnStatistics get_partition_column_statistics(1:string db_name, 2:string tbl_name, 3:string part_name, - 4:string col_name) throws (1:NoSuchObjectException o1, 2:MetaException o2, + 4:string col_name, 5:string validWriteIdList) throws (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidInputException o3, 4:InvalidObjectException o4) TableStatsResult get_table_statistics_req(1:TableStatsRequest request) throws (1:NoSuchObjectException o1, 2:MetaException o2) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 62345014f2..a1b89e8fe2 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -293,7 +293,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam String newTblLocPath = dataWasMoved ? destPath.toUri().getPath() : null; // also the location field in partition - parts = msdb.getPartitions(catName, dbname, name, -1); + parts = msdb.getPartitions(catName, dbname, name, -1, null); Map columnStatsNeedUpdated = new HashMap<>(); for (Partition part : parts) { String oldPartLoc = part.getSd().getLocation(); @@ -359,7 +359,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam if (isPartitionedTable) { //Currently only column related changes can be cascaded in alter table if(!MetaStoreServerUtils.areSameColumns(oldt.getSd().getCols(), newt.getSd().getCols())) { - parts = msdb.getPartitions(catName, dbname, name, -1); + parts = msdb.getPartitions(catName, dbname, name, -1, null); for (Partition part : parts) { Partition oldPart = new Partition(part); List oldCols = part.getSd().getCols(); @@ -512,6 +512,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str //alter partition if (part_vals == null || part_vals.size() == 0) { try { + msdb.openTransaction(); Table tbl = msdb.getTable(catName, dbname, name, null); @@ -519,7 +520,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str throw new InvalidObjectException( "Unable to alter partition because table or database does not exist."); } - oldPart = msdb.getPartition(catName, dbname, name, new_part.getValues()); + oldPart = msdb.getPartition(catName, dbname, name, new_part.getValues(), null); if (MetaStoreServerUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) { // if stats are same, no need to update if (MetaStoreServerUtils.isFastStatsSame(oldPart, new_part)) { @@ -571,6 +572,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str boolean dataWasMoved = false; Database db; try { + msdb.openTransaction(); Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name, null); if (tbl == null) { @@ -578,7 +580,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str "Unable to alter partition because table or database does not exist."); } try { - oldPart = msdb.getPartition(catName, dbname, name, part_vals); + oldPart = msdb.getPartition(catName, dbname, name, part_vals, null); } catch (NoSuchObjectException e) { // this means there is no existing partition throw new InvalidObjectException( @@ -587,7 +589,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str Partition check_part; try { - check_part = msdb.getPartition(catName, dbname, name, new_part.getValues()); + check_part = msdb.getPartition(catName, dbname, name, new_part.getValues(), null); } catch(NoSuchObjectException e) { // this means there is no existing partition check_part = null; @@ -745,6 +747,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str boolean success = false; try { + msdb.openTransaction(); // Note: should we pass in write ID here? We only update stats on parts so probably not. @@ -766,7 +769,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str .currentTimeMillis() / 1000)); } - Partition oldTmpPart = msdb.getPartition(catName, dbname, name, tmpPart.getValues()); + Partition oldTmpPart = msdb.getPartition(catName, dbname, name, tmpPart.getValues(), null); oldParts.add(oldTmpPart); partValsList.add(tmpPart.getValues()); @@ -961,7 +964,7 @@ private Path constructRenamedPath(Path defaultNewPath, Path currentPath) { // NOTE: this doesn't check stats being compliant, but the alterTable call below does. // The worst we can do is delete the stats. // Collect column stats which need to be rewritten and remove old stats. - colStats = msdb.getTableColumnStatistics(catName, dbName, tableName, oldColNames); + colStats = msdb.getTableColumnStatistics(catName, dbName, tableName, oldColNames, null); if (colStats == null) { updateColumnStats = false; } else { @@ -1050,7 +1053,7 @@ public static ColumnStatistics updateOrGetPartitionColumnStats( List oldPartNames = Lists.newArrayList(oldPartName); // TODO: doesn't take txn stats into account. This method can only remove stats. List partsColStats = msdb.getPartitionColumnStatistics(catName, dbname, tblname, - oldPartNames, oldColNames); + oldPartNames, oldColNames, null); assert (partsColStats.size() <= 1); // for out para, this value is initialized by caller. diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 3397f93bf6..f2dca3cf0f 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -586,19 +586,6 @@ public void init() throws MetaException { listeners.add(new HMSMetricsListener(conf)); } - boolean canCachedStoreCanUseEvent = false; - for (MetaStoreEventListener listener : transactionalListeners) { - if (listener.doesAddEventsToNotificationLogTable()) { - canCachedStoreCanUseEvent = true; - break; - } - } - if (conf.getBoolean(ConfVars.METASTORE_CACHE_CAN_USE_EVENT.getVarname(), false) && - !canCachedStoreCanUseEvent) { - throw new MetaException("CahcedStore can not use events for invalidation as there is no " + - " TransactionalMetaStoreEventListener to add events to notification table"); - } - endFunctionListeners = MetaStoreServerUtils.getMetaStoreListeners( MetaStoreEndFunctionListener.class, conf, MetastoreConf.getVar(conf, ConfVars.END_FUNCTION_LISTENERS)); @@ -2744,7 +2731,7 @@ private boolean drop_table_core(final RawStore ms, final String catName, final S isReplicated = isDbReplicationTarget(db); // drop any partitions - tbl = get_table_core(catName, dbname, name); + tbl = get_table_core(catName, dbname, name, null); if (tbl == null) { throw new NoSuchObjectException(name + " doesn't exist"); } @@ -2893,7 +2880,7 @@ private void deletePartitionData(List partPaths, boolean ifPurge, Database List partPaths = new ArrayList<>(); while (true) { Map partitionLocations = ms.getPartitionLocations(catName, dbName, tableName, - tableDnsPath, batchSize); + tableDnsPath, batchSize, null); if (partitionLocations == null || partitionLocations.isEmpty()) { // No more partitions left to drop. Return with the collected path list to delete. return partPaths; @@ -3013,7 +3000,7 @@ private void alterTableStatsForTruncate(RawStore ms, String catName, String dbNa String validWriteIds, long writeId) throws Exception { if (partNames == null) { if (0 != table.getPartitionKeysSize()) { - for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) { + for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE, null)) { alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition, validWriteIds, writeId); } @@ -3044,7 +3031,7 @@ private void alterTableStatsForTruncate(RawStore ms, String catName, String dbNa environmentContext, this, validWriteIds); } } else { - for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames)) { + for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames, null)) { alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition, validWriteIds, writeId); } @@ -3061,14 +3048,14 @@ private void alterTableStatsForTruncate(RawStore ms, String catName, String dbNa List locations = new ArrayList<>(); if (partNames == null) { if (0 != table.getPartitionKeysSize()) { - for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) { + for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE, null)) { locations.add(new Path(partition.getSd().getLocation())); } } else { locations.add(new Path(table.getSd().getLocation())); } } else { - for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames)) { + for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames, null)) { locations.add(new Path(partition.getSd().getLocation())); } } @@ -3100,7 +3087,7 @@ private void truncateTableInternal(String dbName, String tableName, List String validWriteIds, long writeId) throws MetaException, NoSuchObjectException { try { String[] parsedDbName = parseDbName(dbName, conf); - Table tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName); + Table tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, null); boolean isAutopurge = (tbl.isSetParameters() && "true".equalsIgnoreCase(tbl.getParameters().get("auto.purge"))); Database db = get_database_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]); @@ -3156,7 +3143,7 @@ private boolean isExternalTablePurge(Table table) { @Override @Deprecated - public Table get_table(final String dbname, final String name) throws MetaException, + public Table get_table(final String dbname, final String name, String validWriteIdList) throws MetaException, NoSuchObjectException { String[] parsedDbName = parseDbName(dbname, conf); return getTableInternal( @@ -3311,15 +3298,6 @@ private Table getTableInternal(String catName, String dbname, String name, return t; } - @Override - public Table get_table_core( - final String catName, - final String dbname, - final String name) - throws MetaException, NoSuchObjectException { - return get_table_core(catName, dbname, name, null); - } - @Override public Table get_table_core( final String catName, @@ -3556,7 +3534,7 @@ private Partition append_partition_common(RawStore ms, String catName, String db Partition old_part; try { old_part = ms.getPartition(part.getCatName(), part.getDbName(), part - .getTableName(), part.getValues()); + .getTableName(), part.getValues(), null); } catch (NoSuchObjectException e) { // this means there is no existing partition old_part = null; @@ -4175,7 +4153,7 @@ private boolean startAddPartition( MetaStoreServerUtils.validatePartitionNameCharacters(part.getValues(), partitionValidationPattern); boolean doesExist = ms.doesPartitionExist(part.getCatName(), - part.getDbName(), part.getTableName(), partitionKeys, part.getValues()); + part.getDbName(), part.getTableName(), partitionKeys, part.getValues(), null); if (doesExist && !ifNotExists) { throw new AlreadyExistsException("Partition already exists: " + part); } @@ -4428,6 +4406,7 @@ public Partition exchange_partition(Map partitionSpecs, boolean success = false; boolean pathCreated = false; RawStore ms = getMS(); + ms.openTransaction(); Table destinationTable = @@ -4462,7 +4441,7 @@ public Partition exchange_partition(Map partitionSpecs, } // Passed the unparsed DB name here, as get_partitions_ps expects to parse it List partitionsToExchange = get_partitions_ps(sourceDbName, sourceTableName, - partVals, (short)-1); + partVals, (short)-1, null); if (partitionsToExchange == null || partitionsToExchange.isEmpty()) { throw new MetaException("No partition is found with the values " + partitionSpecs + " for the table " + sourceTableName); @@ -4487,7 +4466,7 @@ public Partition exchange_partition(Map partitionSpecs, // Check if any of the partitions already exists in destTable. List destPartitionNames = ms.listPartitionNames(parsedDestDbName[CAT_NAME], - parsedDestDbName[DB_NAME], destTableName, (short) -1); + parsedDestDbName[DB_NAME], destTableName, (short) -1, null); if (destPartitionNames != null && !destPartitionNames.isEmpty()) { for (Partition partition : partitionsToExchange) { String partToExchangeName = @@ -4616,7 +4595,7 @@ private boolean drop_partition_common(RawStore ms, String catName, String db_nam try { ms.openTransaction(); - part = ms.getPartition(catName, db_name, tbl_name, part_vals); + part = ms.getPartition(catName, db_name, tbl_name, part_vals, null); tbl = get_table_core(catName, db_name, tbl_name, null); tableDataShouldBeDeleted = checkTableDataShouldBeDeleted(tbl, deleteData); firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this)); @@ -4743,6 +4722,7 @@ public DropPartitionsResult drop_partitions_req( ? request.getEnvironmentContext() : null; boolean success = false; + ms.openTransaction(); Table tbl = null; List parts = null; @@ -4753,7 +4733,7 @@ public DropPartitionsResult drop_partitions_req( try { // We need Partition-s for firing events and for result; DN needs MPartition-s to drop. // Great... Maybe we could bypass fetching MPartitions by issuing direct SQL deletes. - tbl = get_table_core(catName, dbName, tblName); + tbl = get_table_core(catName, dbName, tblName, null); isExternal(tbl); mustPurge = isMustPurge(envContext, tbl); int minCount = 0; @@ -4766,7 +4746,7 @@ public DropPartitionsResult drop_partitions_req( ++minCount; // At least one partition per expression, if not ifExists List result = new ArrayList<>(); boolean hasUnknown = ms.getPartitionsByExpr( - catName, dbName, tblName, expr.getExpr(), null, (short)-1, result); + catName, dbName, tblName, expr.getExpr(), null, (short)-1, result, null); if (hasUnknown) { // Expr is built by DDLSA, it should only contain part cols and simple ops throw new MetaException("Unexpected unknown partitions to drop"); @@ -4787,7 +4767,7 @@ public DropPartitionsResult drop_partitions_req( } else if (spec.isSetNames()) { partNames = spec.getNames(); minCount = partNames.size(); - parts = ms.getPartitionsByNames(catName, dbName, tblName, partNames); + parts = ms.getPartitionsByNames(catName, dbName, tblName, partNames, null); } else { throw new MetaException("Partition spec is not set"); } @@ -4840,6 +4820,7 @@ public DropPartitionsResult drop_partitions_req( } success = ms.commitTransaction(); + DropPartitionsResult result = new DropPartitionsResult(); if (needResult) { result.setPartitions(parts); @@ -4931,7 +4912,7 @@ public boolean drop_partition_with_environment_context(final String db_name, @Override public Partition get_partition(final String db_name, final String tbl_name, - final List part_vals) throws MetaException, NoSuchObjectException { + final List part_vals, String validWriteIdList) throws MetaException, NoSuchObjectException { String[] parsedDbName = parseDbName(db_name, conf); startPartitionFunction("get_partition", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals); @@ -4940,8 +4921,8 @@ public Partition get_partition(final String db_name, final String tbl_name, Exception ex = null; try { authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - ret = getMS().getPartition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, validWriteIdList); + ret = getMS().getPartition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals, validWriteIdList); ret = FilterUtils.filterPartitionIfEnabled(isServerFilterEnabled, filterHook, ret); } catch (Exception e) { ex = e; @@ -4956,12 +4937,12 @@ public Partition get_partition(final String db_name, final String tbl_name, * Fire a pre-event for read table operation, if there are any * pre-event listeners registered */ - private void fireReadTablePreEvent(String catName, String dbName, String tblName) + private void fireReadTablePreEvent(String catName, String dbName, String tblName, String validWriteIdList) throws MetaException, NoSuchObjectException { if(preListeners.size() > 0) { // do this only if there is a pre event listener registered (avoid unnecessary // metastore api call) - Table t = getMS().getTable(catName, dbName, tblName); + Table t = getMS().getTable(catName, dbName, tblName, validWriteIdList); if (t == null) { throw new NoSuchObjectException(TableName.getQualified(catName, dbName, tblName) + " table not found"); @@ -4973,19 +4954,19 @@ private void fireReadTablePreEvent(String catName, String dbName, String tblName @Override public Partition get_partition_with_auth(final String db_name, final String tbl_name, final List part_vals, - final String user_name, final List group_names) + final String user_name, final List group_names, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(db_name, conf); startPartitionFunction("get_partition_with_auth", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, validWriteIdList); Partition ret = null; Exception ex = null; try { authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); ret = getMS().getPartitionWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tbl_name, part_vals, user_name, group_names); + tbl_name, part_vals, user_name, group_names, validWriteIdList); ret = FilterUtils.filterPartitionIfEnabled(isServerFilterEnabled, filterHook, ret); } catch (InvalidObjectException e) { ex = e; @@ -5001,20 +4982,20 @@ public Partition get_partition_with_auth(final String db_name, @Override public List get_partitions(final String db_name, final String tbl_name, - final short max_parts) throws NoSuchObjectException, MetaException { + final short max_parts, String validWriteIdList) throws NoSuchObjectException, MetaException { String[] parsedDbName = parseDbName(db_name, conf); startTableFunction("get_partitions", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, validWriteIdList); List ret = null; Exception ex = null; try { checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tbl_name, NO_FILTER_STRING, max_parts); + tbl_name, NO_FILTER_STRING, max_parts, validWriteIdList); authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); ret = getMS().getPartitions(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, - max_parts); + max_parts, validWriteIdList); ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); } catch (Exception e) { ex = e; @@ -5029,7 +5010,7 @@ public Partition get_partition_with_auth(final String db_name, @Override public List get_partitions_with_auth(final String dbName, final String tblName, final short maxParts, final String userName, - final List groupNames) throws TException { + final List groupNames, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(dbName, conf); startTableFunction("get_partitions_with_auth", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); @@ -5037,12 +5018,12 @@ public Partition get_partition_with_auth(final String db_name, Exception ex = null; try { checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tblName, NO_FILTER_STRING, maxParts); + tblName, NO_FILTER_STRING, maxParts, validWriteIdList); authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); ret = getMS().getPartitionsWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, - maxParts, userName, groupNames); + maxParts, userName, groupNames, validWriteIdList); ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); } catch (InvalidObjectException e) { ex = e; @@ -5059,19 +5040,19 @@ public Partition get_partition_with_auth(final String db_name, private void checkLimitNumberOfPartitionsByFilter(String catName, String dbName, String tblName, String filterString, - int maxParts) throws TException { + int maxParts, String validWriteIdList) throws TException { if (isPartitionLimitEnabled()) { checkLimitNumberOfPartitions(tblName, get_num_partitions_by_filter(prependCatalogToDbName( - catName, dbName, conf), tblName, filterString), maxParts); + catName, dbName, conf), tblName, filterString, validWriteIdList), maxParts); } } private void checkLimitNumberOfPartitionsByExpr(String catName, String dbName, String tblName, - byte[] filterExpr, int maxParts) + byte[] filterExpr, int maxParts, String validWriteIdList) throws TException { if (isPartitionLimitEnabled()) { checkLimitNumberOfPartitions(tblName, get_num_partitions_by_expr(catName, dbName, tblName, - filterExpr), maxParts); + filterExpr, validWriteIdList), maxParts); } } @@ -5093,7 +5074,7 @@ private void checkLimitNumberOfPartitions(String tblName, int numPartitions, int } @Override - public List get_partitions_pspec(final String db_name, final String tbl_name, final int max_parts) + public List get_partitions_pspec(final String db_name, final String tbl_name, final int max_parts, String validWriteIdList) throws NoSuchObjectException, MetaException { String[] parsedDbName = parseDbName(db_name, conf); @@ -5103,9 +5084,9 @@ private void checkLimitNumberOfPartitions(String tblName, int numPartitions, int List partitionSpecs = null; try { - Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName); + Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, validWriteIdList); // get_partitions will parse out the catalog and db names itself - List partitions = get_partitions(db_name, tableName, (short) max_parts); + List partitions = get_partitions(db_name, tableName, (short) max_parts, validWriteIdList); if (is_partition_spec_grouping_enabled(table)) { partitionSpecs = MetaStoreServerUtils @@ -5146,10 +5127,10 @@ public GetPartitionsResponse get_partitions_with_specs(GetPartitionsRequest requ GetPartitionsResponse response = null; Exception ex = null; try { - Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName); + Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, request.getValidWriteIdList()); List partitions = getMS() .getPartitionSpecsByFilterAndProjection(table, request.getProjectionSpec(), - request.getFilterSpec()); + request.getFilterSpec(), request.getValidWriteIdList()); List processorCapabilities = request.getProcessorCapabilities(); String processorId = request.getProcessorIdentifier(); if (processorCapabilities == null || processorCapabilities.size() == 0 || @@ -5182,16 +5163,16 @@ private static boolean is_partition_spec_grouping_enabled(Table table) { @Override public List get_partition_names(final String db_name, final String tbl_name, - final short max_parts) throws NoSuchObjectException, MetaException { + final short max_parts, String validWriteIdList) throws NoSuchObjectException, MetaException { String[] parsedDbName = parseDbName(db_name, conf); startTableFunction("get_partition_names", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, validWriteIdList); List ret = null; Exception ex = null; try { authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); ret = getMS().listPartitionNames(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, - max_parts); + max_parts, validWriteIdList); ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled, filterHook, parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, ret); } catch (MetaException e) { @@ -5222,7 +5203,7 @@ public PartitionValuesResponse get_partition_values(PartitionValuesRequest reque partCols.add(request.getPartitionKeys().get(0)); return getMS().listPartitionValues(catName, dbName, tblName, request.getPartitionKeys(), request.isApplyDistinct(), request.getFilter(), request.isAscending(), - request.getPartitionOrder(), request.getMaxParts()); + request.getPartitionOrder(), request.getMaxParts(), request.getValidWriteIdList()); } catch (NoSuchObjectException e) { LOG.error(String.format("Unable to get partition for %s.%s.%s", catName, dbName, tblName), e); throw new MetaException(e.getMessage()); @@ -5521,7 +5502,7 @@ private void alter_table_core(String catName, String dbname, String name, Table boolean success = false; Exception ex = null; try { - Table oldt = get_table_core(catName, dbname, name); + Table oldt = get_table_core(catName, dbname, name, null); firePreEvent(new PreAlterTableEvent(oldt, newTable, this)); alterHandler.alterTable(getMS(), wh, catName, dbname, name, newTable, envContext, this, validWriteIdList); @@ -5677,14 +5658,14 @@ private void alter_table_core(String catName, String dbname, String name, Table } @Override - public List get_fields(String db, String tableName) + public List get_fields(String db, String tableName, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException { - return get_fields_with_environment_context(db, tableName, null); + return get_fields_with_environment_context(db, tableName, null, validWriteIdList); } @Override public List get_fields_with_environment_context(String db, String tableName, - final EnvironmentContext envContext) + final EnvironmentContext envContext, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException { startFunction("get_fields_with_environment_context", ": db=" + db + "tbl=" + tableName); String[] names = tableName.split("\\."); @@ -5697,7 +5678,7 @@ private void alter_table_core(String catName, String dbname, String name, Table ClassLoader orgHiveLoader = null; try { try { - tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], base_table_name); + tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], base_table_name, validWriteIdList); firePreEvent(new PreReadTableEvent(tbl, this)); } catch (NoSuchObjectException e) { throw new UnknownTableException(e.getMessage()); @@ -5757,9 +5738,9 @@ private StorageSchemaReader getStorageSchemaReader() throws MetaException { * @throws UnknownDBException */ @Override - public List get_schema(String db, String tableName) + public List get_schema(String db, String tableName, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException { - return get_schema_with_environment_context(db,tableName, null); + return get_schema_with_environment_context(db,tableName, null, validWriteIdList); } @@ -5780,7 +5761,7 @@ private StorageSchemaReader getStorageSchemaReader() throws MetaException { */ @Override public List get_schema_with_environment_context(String db, String tableName, - final EnvironmentContext envContext) + final EnvironmentContext envContext, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException { startFunction("get_schema_with_environment_context", ": db=" + db + "tbl=" + tableName); boolean success = false; @@ -5792,12 +5773,12 @@ private StorageSchemaReader getStorageSchemaReader() throws MetaException { Table tbl; try { - tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], base_table_name); + tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], base_table_name, validWriteIdList); } catch (NoSuchObjectException e) { throw new UnknownTableException(e.getMessage()); } // Pass unparsed db name here - List fieldSchemas = get_fields_with_environment_context(db, base_table_name,envContext); + List fieldSchemas = get_fields_with_environment_context(db, base_table_name,envContext, validWriteIdList); if (tbl == null || fieldSchemas == null) { throw new UnknownTableException(tableName + " doesn't exist"); @@ -5908,15 +5889,15 @@ public String get_config_value(String name, String defaultValue) private Partition get_partition_by_name_core(final RawStore ms, final String catName, final String db_name, final String tbl_name, - final String part_name) throws TException { - fireReadTablePreEvent(catName, db_name, tbl_name); + final String part_name, String validWriteIdList) throws TException { + fireReadTablePreEvent(catName, db_name, tbl_name, validWriteIdList); List partVals; try { partVals = getPartValsFromName(ms, catName, db_name, tbl_name, part_name); } catch (InvalidObjectException e) { throw new NoSuchObjectException(e.getMessage()); } - Partition p = ms.getPartition(catName, db_name, tbl_name, partVals); + Partition p = ms.getPartition(catName, db_name, tbl_name, partVals, validWriteIdList); p = FilterUtils.filterPartitionIfEnabled(isServerFilterEnabled, filterHook, p); if (p == null) { @@ -5928,7 +5909,7 @@ private Partition get_partition_by_name_core(final RawStore ms, final String cat @Override public Partition get_partition_by_name(final String db_name, final String tbl_name, - final String part_name) throws TException { + final String part_name, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(db_name, conf); startFunction("get_partition_by_name", ": tbl=" + @@ -5938,7 +5919,7 @@ public Partition get_partition_by_name(final String db_name, final String tbl_na Exception ex = null; try { ret = get_partition_by_name_core(getMS(), parsedDbName[CAT_NAME], - parsedDbName[DB_NAME], tbl_name, part_name); + parsedDbName[DB_NAME], tbl_name, part_name, validWriteIdList); ret = FilterUtils.filterPartitionIfEnabled(isServerFilterEnabled, filterHook, ret); } catch (Exception e) { ex = e; @@ -6035,7 +6016,7 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n @Override public List get_partitions_ps(final String db_name, final String tbl_name, final List part_vals, - final short max_parts) throws TException { + final short max_parts, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(db_name, conf); startPartitionFunction("get_partitions_ps", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals); @@ -6046,7 +6027,7 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); // Don't send the parsedDbName, as this method will parse itself. ret = get_partitions_ps_with_auth(db_name, tbl_name, part_vals, - max_parts, null, null); + max_parts, null, null, validWriteIdList); ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); } catch (Exception e) { ex = e; @@ -6062,17 +6043,17 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n public List get_partitions_ps_with_auth(final String db_name, final String tbl_name, final List part_vals, final short max_parts, final String userName, - final List groupNames) throws TException { + final List groupNames, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(db_name, conf); startPartitionFunction("get_partitions_ps_with_auth", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, validWriteIdList); List ret = null; Exception ex = null; try { authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); ret = getMS().listPartitionsPsWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tbl_name, part_vals, max_parts, userName, groupNames); + tbl_name, part_vals, max_parts, userName, groupNames, validWriteIdList); ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); } catch (InvalidObjectException e) { ex = e; @@ -6088,18 +6069,18 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n @Override public List get_partition_names_ps(final String db_name, - final String tbl_name, final List part_vals, final short max_parts) + final String tbl_name, final List part_vals, final short max_parts, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(db_name, conf); startPartitionFunction("get_partitions_names_ps", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, validWriteIdList); List ret = null; Exception ex = null; try { authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); ret = getMS().listPartitionNamesPs(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, - part_vals, max_parts); + part_vals, max_parts, validWriteIdList); ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled, filterHook, parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, ret); } catch (Exception e) { @@ -6153,7 +6134,7 @@ private String lowerCaseConvertPartName(String partName) throws MetaException { @Deprecated @Override public ColumnStatistics get_table_column_statistics(String dbName, String tableName, - String colName) throws TException { + String colName, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(dbName, conf); parsedDbName[CAT_NAME] = parsedDbName[CAT_NAME].toLowerCase(); parsedDbName[DB_NAME] = parsedDbName[DB_NAME].toLowerCase(); @@ -6210,7 +6191,7 @@ public TableStatsResult get_table_statistics_req(TableStatsRequest request) thro @Deprecated @Override public ColumnStatistics get_partition_column_statistics(String dbName, String tableName, - String partName, String colName) throws TException { + String partName, String colName, String validWriteIdList) throws TException { // Note: this method appears to be unused within Hive. // It doesn't take txn stats into account. dbName = dbName.toLowerCase(); @@ -6226,7 +6207,7 @@ public ColumnStatistics get_partition_column_statistics(String dbName, String ta try { List list = getMS().getPartitionColumnStatistics( parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, - Lists.newArrayList(convertedPartName), Lists.newArrayList(colName)); + Lists.newArrayList(convertedPartName), Lists.newArrayList(colName), validWriteIdList); if (list.isEmpty()) { return null; } @@ -6314,6 +6295,7 @@ private boolean updateTableColumnStatsInternal(ColumnStatistics colStats, colStats.getStatsDesc().getTableName())); Map parameters = null; + getMS().openTransaction(); boolean committed = false; try { @@ -6326,13 +6308,13 @@ private boolean updateTableColumnStatsInternal(ColumnStatistics colStats, MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.UPDATE_TABLE_COLUMN_STAT, new UpdateTableColumnStatEvent(colStats, tableObj, parameters, - writeId, this)); + writeId, validWriteIds, this)); } if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.UPDATE_TABLE_COLUMN_STAT, new UpdateTableColumnStatEvent(colStats, tableObj, parameters, - writeId,this)); + writeId, validWriteIds, this)); } } committed = getMS().commitTransaction(); @@ -6380,6 +6362,7 @@ private boolean updatePartitonColStatsInternal(Table tbl, ColumnStatistics colSt Map parameters; List partVals; boolean committed = false; + getMS().openTransaction(); try { if (tbl == null) { @@ -6392,13 +6375,13 @@ private boolean updatePartitonColStatsInternal(Table tbl, ColumnStatistics colSt MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.UPDATE_PARTITION_COLUMN_STAT, new UpdatePartitionColumnStatEvent(colStats, partVals, parameters, tbl, - writeId, this)); + writeId, validWriteIds, this)); } if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.UPDATE_PARTITION_COLUMN_STAT, new UpdatePartitionColumnStatEvent(colStats, partVals, parameters, tbl, - writeId, this)); + writeId, validWriteIds, this)); } } committed = getMS().commitTransaction(); @@ -6454,7 +6437,7 @@ public boolean delete_partition_column_statistics(String dbName, String tableNam getMS().openTransaction(); try { List partVals = getPartValsFromName(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, convertedPartName); - Table table = getMS().getTable(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName); + Table table = getMS().getTable(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, null); // This API looks unused; if it were used we'd need to update stats state and write ID. // We cannot just randomly nuke some txn stats. if (TxnUtils.isTransactionalTable(table)) { @@ -6504,9 +6487,10 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S boolean ret = false, committed = false; + getMS().openTransaction(); try { - Table table = getMS().getTable(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName); + Table table = getMS().getTable(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, null); // This API looks unused; if it were used we'd need to update stats state and write ID. // We cannot just randomly nuke some txn stats. if (TxnUtils.isTransactionalTable(table)) { @@ -6540,22 +6524,22 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S @Override public List get_partitions_by_filter(final String dbName, final String tblName, - final String filter, final short maxParts) + final String filter, final short maxParts, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(dbName, conf); startTableFunction("get_partitions_by_filter", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, validWriteIdList); List ret = null; Exception ex = null; try { checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tblName, filter, maxParts); + tblName, filter, maxParts, validWriteIdList); authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); ret = getMS().getPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, - filter, maxParts); + filter, maxParts, validWriteIdList); ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); } catch (Exception e) { ex = e; @@ -6568,7 +6552,7 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S @Override public List get_part_specs_by_filter(final String dbName, final String tblName, - final String filter, final int maxParts) + final String filter, final int maxParts, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(dbName, conf); @@ -6576,9 +6560,9 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S List partitionSpecs = null; try { - Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); + Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, validWriteIdList); // Don't pass the parsed db name, as get_partitions_by_filter will parse it itself - List partitions = get_partitions_by_filter(dbName, tblName, filter, (short) maxParts); + List partitions = get_partitions_by_filter(dbName, tblName, filter, (short) maxParts, validWriteIdList); if (is_partition_spec_grouping_enabled(table)) { partitionSpecs = MetaStoreServerUtils @@ -6607,14 +6591,14 @@ public PartitionsByExprResult get_partitions_by_expr( String dbName = req.getDbName(), tblName = req.getTblName(); String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); startTableFunction("get_partitions_by_expr", catName, dbName, tblName); - fireReadTablePreEvent(catName, dbName, tblName); + fireReadTablePreEvent(catName, dbName, tblName, req.getValidWriteIdList()); PartitionsByExprResult ret = null; Exception ex = null; try { - checkLimitNumberOfPartitionsByExpr(catName, dbName, tblName, req.getExpr(), UNLIMITED_MAX_PARTITIONS); + checkLimitNumberOfPartitionsByExpr(catName, dbName, tblName, req.getExpr(), UNLIMITED_MAX_PARTITIONS, req.getValidWriteIdList()); List partitions = new LinkedList<>(); boolean hasUnknownPartitions = getMS().getPartitionsByExpr(catName, dbName, tblName, - req.getExpr(), req.getDefaultPartitionName(), req.getMaxParts(), partitions); + req.getExpr(), req.getDefaultPartitionName(), req.getMaxParts(), partitions, req.getValidWriteIdList()); ret = new PartitionsByExprResult(partitions, hasUnknownPartitions); } catch (Exception e) { ex = e; @@ -6640,7 +6624,7 @@ private void rethrowException(Exception e) throws TException { @Override public int get_num_partitions_by_filter(final String dbName, - final String tblName, final String filter) + final String tblName, final String filter, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(dbName, conf); if (parsedDbName[DB_NAME] == null || tblName == null) { @@ -6653,7 +6637,7 @@ public int get_num_partitions_by_filter(final String dbName, Exception ex = null; try { ret = getMS().getNumPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tblName, filter); + tblName, filter, validWriteIdList); } catch (Exception e) { ex = e; rethrowException(e); @@ -6664,12 +6648,12 @@ public int get_num_partitions_by_filter(final String dbName, } private int get_num_partitions_by_expr(final String catName, final String dbName, - final String tblName, final byte[] expr) + final String tblName, final byte[] expr, String validWriteIdList) throws TException { int ret = -1; Exception ex = null; try { - ret = getMS().getNumPartitionsByExpr(catName, dbName, tblName, expr); + ret = getMS().getNumPartitionsByExpr(catName, dbName, tblName, expr, validWriteIdList); } catch (Exception e) { ex = e; rethrowException(e); @@ -6681,8 +6665,8 @@ private int get_num_partitions_by_expr(final String catName, final String dbName @Override public List get_partitions_by_names(final String dbName, final String tblName, - final List partNames) throws TException { - return get_partitions_by_names(dbName, tblName, partNames, false); + final List partNames, String validWriteIdList) throws TException { + return get_partitions_by_names(dbName, tblName, partNames, false, validWriteIdList); } @Override @@ -6691,18 +6675,18 @@ public GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNam List partitions = get_partitions_by_names(gpbnr.getDb_name(), gpbnr.getTbl_name(), gpbnr.getNames(), gpbnr.isSetGet_col_stats() && gpbnr.isGet_col_stats(), gpbnr.getProcessorCapabilities(), - gpbnr.getProcessorIdentifier()); + gpbnr.getProcessorIdentifier(), gpbnr.getValidWriteIdList()); return new GetPartitionsByNamesResult(partitions); } public List get_partitions_by_names(final String dbName, final String tblName, - final List partNames, boolean getColStats) throws TException { - return get_partitions_by_names(dbName, tblName, partNames, getColStats, null, null); + final List partNames, boolean getColStats, String validWriteIdList) throws TException { + return get_partitions_by_names(dbName, tblName, partNames, getColStats, null, null, validWriteIdList); } public List get_partitions_by_names(final String dbName, final String tblName, final List partNames, boolean getColStats, List processorCapabilities, - String processorId) throws TException { + String processorId, String validWriteIdList) throws TException { String[] dbNameParts = parseDbName(dbName, conf); String parsedCatName = dbNameParts[CAT_NAME]; @@ -6717,9 +6701,9 @@ public GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNam getMS().openTransaction(); authorizeTableForPartitionMetadata(parsedCatName, parsedDbName, tblName); - fireReadTablePreEvent(parsedCatName, parsedDbName, tblName); + fireReadTablePreEvent(parsedCatName, parsedDbName, tblName, validWriteIdList); - ret = getMS().getPartitionsByNames(parsedCatName, parsedDbName, tblName, partNames); + ret = getMS().getPartitionsByNames(parsedCatName, parsedDbName, tblName, partNames, validWriteIdList); ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); table = getTable(parsedCatName, parsedDbName, tblName); @@ -6732,7 +6716,7 @@ public GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNam List partColStatsList = getMS().getPartitionColumnStatistics(parsedCatName, parsedDbName, tblName, Collections.singletonList(partName), - StatsSetupConst.getColumnsHavingStats(part.getParameters())); + StatsSetupConst.getColumnsHavingStats(part.getParameters()), validWriteIdList); if (partColStatsList != null && !partColStatsList.isEmpty()) { ColumnStatistics partColStats = partColStatsList.get(0); if (partColStats != null) { @@ -6797,7 +6781,7 @@ private String getPartName(HiveObjectRef hiveObject) throws MetaException { String catName = hiveObject.isSetCatName() ? hiveObject.getCatName() : getDefaultCatalog(conf); Table table = get_table_core(catName, hiveObject.getDbName(), hiveObject - .getObjectName()); + .getObjectName(), null); partName = Warehouse .makePartName(table.getPartitionKeys(), partValue); } catch (NoSuchObjectException e) { @@ -7235,7 +7219,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, if (dbName == null) { return getMS().listPrincipalPartitionColumnGrantsAll(principalName, principalType); } - Table tbl = get_table_core(catName, dbName, tableName); + Table tbl = get_table_core(catName, dbName, tableName, null); String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); if (principalName == null) { return getMS().listPartitionColumnGrantsAll(catName, dbName, tableName, partName, columnName); @@ -7280,7 +7264,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, if (dbName == null) { return getMS().listPrincipalPartitionGrantsAll(principalName, principalType); } - Table tbl = get_table_core(catName, dbName, tableName); + Table tbl = get_table_core(catName, dbName, tableName, null); String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); if (principalName == null) { return getMS().listPartitionGrantsAll(catName, dbName, tableName, partName); @@ -7551,6 +7535,7 @@ public void markPartitionForEvent(final String db_name, final String tbl_name, boolean success = false; try { String[] parsedDbName = parseDbName(db_name, conf); + ms.openTransaction(); startPartitionFunction("markPartitionForEvent", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, partName); @@ -7570,6 +7555,7 @@ public void markPartitionForEvent(final String db_name, final String tbl_name, } success = ms.commitTransaction(); + for (MetaStoreEventListener listener : listeners) { listener.onLoadPartitionDone(new LoadPartitionDoneEvent(true, tbl, partName, this)); } @@ -8086,7 +8072,7 @@ private Table getTblObject(String db, String table) throws MetaException, NoSuch private Partition getPartitionObj(String db, String table, List partitionVals, Table tableObj) throws MetaException, NoSuchObjectException { if (tableObj.isSetPartitionKeys() && !tableObj.getPartitionKeys().isEmpty()) { - return get_partition(db, table, partitionVals); + return get_partition(db, table, partitionVals, null); } return null; } @@ -8291,6 +8277,7 @@ private boolean updatePartColumnStatsWithMerge(String catName, String dbName, St List colNames, Map newStatsMap, SetPartitionsStatsRequest request) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { RawStore ms = getMS(); + ms.openTransaction(); boolean isCommitted = false, result = false; try { @@ -8309,7 +8296,7 @@ private boolean updatePartColumnStatsWithMerge(String catName, String dbName, St } // another single call to get all the partition objects - List partitions = ms.getPartitionsByNames(catName, dbName, tableName, partitionNames); + List partitions = ms.getPartitionsByNames(catName, dbName, tableName, partitionNames, null); Map mapToPart = new HashMap<>(); for (int index = 0; index < partitionNames.size(); index++) { mapToPart.put(partitionNames.get(index), partitions.get(index)); @@ -8370,6 +8357,7 @@ private boolean updateTableColumnStatsWithMerge(String catName, String dbName, S NoSuchObjectException, InvalidObjectException, InvalidInputException { ColumnStatistics firstColStats = request.getColStats().get(0); RawStore ms = getMS(); + ms.openTransaction(); boolean isCommitted = false, result = false; try { @@ -8635,7 +8623,7 @@ public CacheFileMetadataResult cache_file_metadata( ms.openTransaction(); boolean success = false; try { - Table tbl = ms.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); + Table tbl = ms.getTable(DEFAULT_CATALOG_NAME, dbName, tblName, null); if (tbl == null) { throw new NoSuchObjectException(dbName + "." + tblName + " not found"); } @@ -8660,7 +8648,7 @@ public CacheFileMetadataResult cache_file_metadata( if (partName != null) { partNames = Lists.newArrayList(partName); } else if (isAllPart) { - partNames = ms.listPartitionNames(DEFAULT_CATALOG_NAME, dbName, tblName, (short)-1); + partNames = ms.listPartitionNames(DEFAULT_CATALOG_NAME, dbName, tblName, (short)-1, null); } else { throw new MetaException("Table is partitioned"); } @@ -8673,7 +8661,7 @@ public CacheFileMetadataResult cache_file_metadata( int currentBatchSize = Math.min(batchSize, partNames.size() - index); List nameBatch = partNames.subList(index, index + currentBatchSize); index += currentBatchSize; - List parts = ms.getPartitionsByNames(DEFAULT_CATALOG_NAME, dbName, tblName, nameBatch); + List parts = ms.getPartitionsByNames(DEFAULT_CATALOG_NAME, dbName, tblName, nameBatch, null); for (Partition part : parts) { if (!part.isSetSd() || !part.getSd().isSetLocation()) { throw new MetaException("Partition does not have storage location;" + diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java index cee357216f..1707883ab7 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; import org.apache.hadoop.hive.metastore.txn.TxnStore; -import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; /** * An interface wrapper for HMSHandler. This interface contains methods that need to be @@ -83,16 +82,12 @@ Database get_database_core(final String catName, final String name) * @param catName catalog name * @param dbname database name * @param name table name + * @param validWriteIdList valid writeId to read * @return Table object * @throws NoSuchObjectException If the table does not exist. * @throws MetaException If another error occurs. */ - Table get_table_core(final String catName, final String dbname, final String name) - throws MetaException, NoSuchObjectException; - - Table get_table_core(final String catName, final String dbname, - final String name, - final String writeIdList) + Table get_table_core(final String catName, final String dbname, final String name, final String writeIdList) throws MetaException, NoSuchObjectException; /** diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 12bdb6420e..4fcf54135c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -465,6 +465,7 @@ public boolean openTransaction() { @Override @SuppressWarnings("nls") public boolean commitTransaction() { + pm.flush(); if (TXN_STATUS.ROLLBACK == transactionStatus) { debugLog("Commit transaction: rollback"); return false; @@ -1236,13 +1237,6 @@ private static String getFullyQualifiedTableName(String dbName, String tblName) + "\"" + tblName + "\""; } - @Override - public Table - getTable(String catName, String dbName, String tableName) - throws MetaException { - return getTable(catName, dbName, tableName, null); - } - @Override public Table getTable(String catName, String dbName, String tableName, String writeIdList) throws MetaException { @@ -1807,6 +1801,7 @@ private Table convertToTable(MTable mtbl) throws MetaException { t.setRewriteEnabled(mtbl.isRewriteEnabled()); t.setCatName(mtbl.getDatabase().getCatalogName()); t.setWriteId(mtbl.getWriteId()); + t.setTemporary(false); return t; } @@ -2175,7 +2170,7 @@ private boolean isValidPartition( MetaStoreServerUtils.validatePartitionNameCharacters(part.getValues(), partitionValidationPattern); boolean doesExist = doesPartitionExist(part.getCatName(), - part.getDbName(), part.getTableName(), partitionKeys, part.getValues()); + part.getDbName(), part.getTableName(), partitionKeys, part.getValues(), null); if (doesExist && !ifNotExists) { throw new MetaException("Partition already exists: " + part); } @@ -2298,12 +2293,6 @@ public boolean addPartition(Partition part) throws InvalidObjectException, return success; } - @Override - public Partition getPartition(String catName, String dbName, String tableName, - List part_vals) throws NoSuchObjectException, MetaException { - return getPartition(catName, dbName, tableName, part_vals, null); - } - @Override public Partition getPartition(String catName, String dbName, String tableName, List part_vals, @@ -2646,13 +2635,13 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio @Override public List getPartitions(String catName, String dbName, String tableName, - int maxParts) throws MetaException, NoSuchObjectException { + int maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException { return getPartitionsInternal(catName, dbName, tableName, maxParts, true, true); } @Override public Map getPartitionLocations(String catName, String dbName, String tblName, - String baseLocationToNotShow, int max) { + String baseLocationToNotShow, int max, String validWriteIdList) { catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); tblName = normalizeIdentifier(tblName); @@ -2719,7 +2708,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio @Override public List getPartitionsWithAuth(String catName, String dbName, String tblName, - short max, String userName, List groupNames) + short max, String userName, List groupNames, String validWriteIdList) throws MetaException, InvalidObjectException { boolean success = false; QueryWrapper queryWrapper = new QueryWrapper(); @@ -2752,7 +2741,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio @Override public Partition getPartitionWithAuth(String catName, String dbName, String tblName, - List partVals, String user_name, List group_names) + List partVals, String user_name, List group_names, String validWriteIdList) throws NoSuchObjectException, MetaException, InvalidObjectException { boolean success = false; try { @@ -2815,7 +2804,7 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN // TODO:pc implement max @Override public List listPartitionNames(String catName, String dbName, String tableName, - short max) throws MetaException { + short max, String validWriteIdList) throws MetaException { List pns = null; boolean success = false; try { @@ -2875,7 +2864,7 @@ public PartitionValuesResponse listPartitionValues(String catName, String dbName String tableName, List cols, boolean applyDistinct, String filter, boolean ascending, List order, - long maxParts) throws MetaException { + long maxParts, String validWriteIdList) throws MetaException { catName = normalizeIdentifier(catName); dbName = dbName.toLowerCase().trim(); @@ -2919,7 +2908,7 @@ private PartitionValuesResponse extractPartitionNamesByFilter( } if (partitionNames == null) { - partitions = getPartitionsByFilter(catName, dbName, tableName, filter, (short) maxParts); + partitions = getPartitionsByFilter(catName, dbName, tableName, filter, (short) maxParts, null); } if (partitions != null) { @@ -3149,7 +3138,7 @@ private Collection getPartitionPsQueryResults(String catName, String dbName, Str @Override public List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name, - List part_vals, short max_parts, String userName, List groupNames) + List part_vals, short max_parts, String userName, List groupNames, String validWriteIdList) throws MetaException, InvalidObjectException, NoSuchObjectException { List partitions = new ArrayList<>(); boolean success = false; @@ -3183,7 +3172,7 @@ private Collection getPartitionPsQueryResults(String catName, String dbName, Str @Override public List listPartitionNamesPs(String catName, String dbName, String tableName, - List part_vals, short max_parts) throws MetaException, NoSuchObjectException { + List part_vals, short max_parts, String validWriteIdList) throws MetaException, NoSuchObjectException { List partitionNames = new ArrayList<>(); boolean success = false; QueryWrapper queryWrapper = new QueryWrapper(); @@ -3286,7 +3275,7 @@ private Collection getPartitionPsQueryResults(String catName, String dbName, Str @Override public List getPartitionsByNames(String catName, String dbName, String tblName, - List partNames) throws MetaException, NoSuchObjectException { + List partNames, String validWriteIdList) throws MetaException, NoSuchObjectException { return getPartitionsByNamesInternal(catName, dbName, tblName, partNames, true, true); } @@ -3310,7 +3299,7 @@ private Collection getPartitionPsQueryResults(String catName, String dbName, Str @Override public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, - String defaultPartitionName, short maxParts, List result) throws TException { + String defaultPartitionName, short maxParts, List result, String validWriteIdList) throws TException { return getPartitionsByExprInternal( catName, dbName, tblName, expr, defaultPartitionName, maxParts, result, true, true); } @@ -3562,7 +3551,7 @@ private String getJDOFilterStrForPartitionVals(Table table, List vals, @Override public List getPartitionsByFilter(String catName, String dbName, String tblName, - String filter, short maxParts) throws MetaException, NoSuchObjectException { + String filter, short maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException { return getPartitionsByFilterInternal(catName, dbName, tblName, filter, maxParts, true, true); } @@ -3820,7 +3809,7 @@ protected String describeResult() { @Override public int getNumPartitionsByFilter(String catName, String dbName, String tblName, - String filter) throws MetaException, NoSuchObjectException { + String filter, String validWriteIdList) throws MetaException, NoSuchObjectException { final ExpressionTree exprTree = org.apache.commons.lang.StringUtils.isNotEmpty(filter) ? PartFilterExprUtil.getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; @@ -3851,7 +3840,7 @@ protected Integer getJdoResult( @Override public int getNumPartitionsByExpr(String catName, String dbName, String tblName, - byte[] expr) throws MetaException, NoSuchObjectException { + byte[] expr, String validWriteIdList) throws MetaException, NoSuchObjectException { final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr, null); final byte[] tempExpr = expr; // Need to be final to pass it to an inner class @@ -3928,7 +3917,7 @@ protected boolean canUseDirectSql(GetHelper> ctx) throws MetaExc @Override public List getPartitionSpecsByFilterAndProjection(final Table table, GetPartitionsProjectionSpec partitionsProjectSpec, - final GetPartitionsFilterSpec filterSpec) throws MetaException, NoSuchObjectException { + final GetPartitionsFilterSpec filterSpec, String validWriteIdList) throws MetaException, NoSuchObjectException { List fieldList = null; String inputIncludePattern = null; String inputExcludePattern = null; @@ -8797,17 +8786,6 @@ public void validateTableCols(Table table, List colNames) throws MetaExc } } - @Override - public ColumnStatistics getTableColumnStatistics( - String catName, - String dbName, - String tableName, - List colNames) throws MetaException, NoSuchObjectException { - // Note: this will get stats without verifying ACID. - return getTableColumnStatisticsInternal( - catName, dbName, tableName, colNames, true, true); - } - @Override public ColumnStatistics getTableColumnStatistics( String catName, @@ -8871,14 +8849,6 @@ protected ColumnStatistics getJdoResult( }.run(true); } - @Override - public List getPartitionColumnStatistics(String catName, String dbName, String tableName, - List partNames, List colNames) throws MetaException, NoSuchObjectException { - // Note: this will get stats without verifying ACID. - return getPartitionColumnStatisticsInternal( - catName, dbName, tableName, partNames, colNames, true, true); - } - @Override public List getPartitionColumnStatistics( String catName, String dbName, String tableName, @@ -8976,7 +8946,7 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblNam return null; } - Table table = getTable(catName, dbName, tblName); + Table table = getTable(catName, dbName, tblName, writeIdList); boolean isTxn = TxnUtils.isTransactionalTable(table.getParameters()); if (isTxn && !areTxnStatsSupported) { return null; @@ -8987,7 +8957,7 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblNam GetPartitionsProjectionSpec ps = new GetPartitionsProjectionSpec(); ps.setIncludeParamKeyPattern(StatsSetupConst.COLUMN_STATS_ACCURATE + '%'); ps.setFieldList(Lists.newArrayList("writeId", "parameters", "values")); - List parts = getPartitionSpecsByFilterAndProjection(table, ps, fs); + List parts = getPartitionSpecsByFilterAndProjection(table, ps, fs, writeIdList); // Loop through the given "partNames" list // checking isolation-level-compliance of each partition column stats. @@ -9001,13 +8971,7 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblNam } } } - return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); - } - @Override - public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, - final List partNames, final List colNames) - throws MetaException, NoSuchObjectException { final boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION); final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER); @@ -9662,7 +9626,7 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro @Override public boolean doesPartitionExist(String catName, String dbName, String tableName, - List partKeys, List partVals) + List partKeys, List partVals, String validWriteIdList) throws MetaException { String name = Warehouse.makePartName(partKeys, partVals); return this.getMPartition(catName, dbName, tableName, name) != null; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index c5e1a10869..22cae21ef5 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -198,17 +198,6 @@ void createTable(Table tbl) throws InvalidObjectException, boolean dropTable(String catalogName, String dbName, String tableName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; - /** - * Get a table object. - * @param catalogName catalog the table is in. - * @param dbName database the table is in. - * @param tableName table name. - * @return table object, or null if no such table exists (wow it would be nice if we either - * consistently returned null or consistently threw NoSuchObjectException). - * @throws MetaException something went wrong in the RDBMS - */ - Table getTable(String catalogName, String dbName, String tableName) throws MetaException; - /** * Get a table object. * @param catalogName catalog the table is in. @@ -262,18 +251,6 @@ boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException; - /** - * Get a partition. - * @param catName catalog name. - * @param dbName database name. - * @param tableName table name. - * @param part_vals partition values for this table. - * @return the partition. - * @throws MetaException error reading from RDBMS. - * @throws NoSuchObjectException no partition matching this specification exists. - */ - Partition getPartition(String catName, String dbName, String tableName, - List part_vals) throws MetaException, NoSuchObjectException; /** * Get a partition. * @param catName catalog name. @@ -297,12 +274,13 @@ Partition getPartition(String catName, String dbName, String tableName, * @param tableName table name. * @param partKeys list of partition keys used to generate the partition name. * @param part_vals list of partition values. + * @param validWriteIdList valid writeId to read * @return true if the partition exists, false otherwise. * @throws MetaException failure reading RDBMS * @throws NoSuchObjectException this is never thrown. */ boolean doesPartitionExist(String catName, String dbName, String tableName, - List partKeys, List part_vals) + List partKeys, List part_vals, String validWriteIdList) throws MetaException, NoSuchObjectException; /** @@ -327,12 +305,13 @@ boolean dropPartition(String catName, String dbName, String tableName, * @param dbName database name. * @param tableName table name * @param max maximum number of partitions, or -1 to get all partitions. + * @param validWriteIdList valid writeId to read * @return list of partitions * @throws MetaException error access the RDBMS. * @throws NoSuchObjectException no such table exists */ List getPartitions(String catName, String dbName, - String tableName, int max) throws MetaException, NoSuchObjectException; + String tableName, int max, String validWriteIdList) throws MetaException, NoSuchObjectException; /** * Get the location for every partition of a given table. If a partition location is a child of @@ -343,11 +322,12 @@ boolean dropPartition(String catName, String dbName, String tableName, * @param tblName table name. * @param baseLocationToNotShow Partition locations which are child of this path are omitted, and * null value returned instead. + * @param validWriteIdList valid writeId to read * @param max The maximum number of partition locations returned, or -1 for all * @return The map of the partitionName, location pairs */ Map getPartitionLocations(String catName, String dbName, String tblName, - String baseLocationToNotShow, int max); + String baseLocationToNotShow, int max, String validWriteIdList); /** * Alter a table. @@ -476,11 +456,12 @@ void updateCreationMetadata(String catName, String dbname, String tablename, Cre * @param db_name database name. * @param tbl_name table name. * @param max_parts maximum number of partitions to retrieve, -1 for all. + * @param validWriteIdList valid writeId to read * @return list of partition names. * @throws MetaException there was an error accessing the RDBMS */ List listPartitionNames(String catName, String db_name, - String tbl_name, short max_parts) throws MetaException; + String tbl_name, short max_parts, String validWriteIdList) throws MetaException; /** * Get a list of partition values as one big struct. @@ -492,13 +473,14 @@ void updateCreationMetadata(String catName, String dbname, String tablename, Cre * @param filter filter to apply to the partition names * @param ascending whether to put in ascending order * @param order whether to order + * @param validWriteIdList valid writeId to read * @param maxParts maximum number of parts to return, or -1 for all * @return struct with all of the partition value information * @throws MetaException error access the RDBMS */ PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, boolean ascending, - List order, long maxParts) throws MetaException; + List order, long maxParts, String validWriteIdList) throws MetaException; /** * Alter a partition. @@ -544,12 +526,13 @@ Partition alterPartition(String catName, String db_name, String tbl_name, List getPartitionsByFilter( - String catName, String dbName, String tblName, String filter, short maxParts) + String catName, String dbName, String tblName, String filter, short maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException; /** @@ -576,12 +559,13 @@ Partition alterPartition(String catName, String db_name, String tbl_name, List getPartitionSpecsByFilterAndProjection(Table table, - GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec) + GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec, String validWriteIdList) throws MetaException, NoSuchObjectException; /** * Get partitions using an already parsed expression. @@ -592,11 +576,12 @@ Partition alterPartition(String catName, String db_name, String tbl_name, List result) + byte[] expr, String defaultPartitionName, short maxParts, List result, String validWriteIdList) throws TException; /** @@ -605,11 +590,12 @@ boolean getPartitionsByExpr(String catName, String dbName, String tblName, * @param dbName database name. * @param tblName table name. * @param filter filter from Hive's SQL where clause + * @param validWriteIdList valid writeId to read * @return number of matching partitions. * @throws MetaException error accessing the RDBMS or executing the filter * @throws NoSuchObjectException no such table */ - int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) + int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter, String validWriteIdList) throws MetaException, NoSuchObjectException; /** @@ -618,11 +604,12 @@ int getNumPartitionsByFilter(String catName, String dbName, String tblName, Stri * @param dbName database name. * @param tblName table name. * @param expr an already parsed Hive expression + * @param validWriteIdList valid writeId to read * @return number of matching partitions. * @throws MetaException error accessing the RDBMS or working with the expression. * @throws NoSuchObjectException no such table. */ - int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) + int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String validWriteIdList) throws MetaException, NoSuchObjectException; /** @@ -632,12 +619,13 @@ int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] * @param tblName table name. * @param partNames list of partition names. These are names not values, so they will include * both the key and the value. + * @param validWriteIdList valid writeId to read * @return list of matching partitions * @throws MetaException error accessing the RDBMS. * @throws NoSuchObjectException No such table. */ List getPartitionsByNames(String catName, String dbName, String tblName, - List partNames) + List partNames, String validWriteIdList) throws MetaException, NoSuchObjectException; Table markPartitionForEvent(String catName, String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; @@ -824,13 +812,14 @@ boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, Privile * @param partVals partition values * @param user_name user to get privilege information for. * @param group_names groups to get privilege information for. + * @param validWriteIdList valid writeId to read * @return a partition * @throws MetaException error accessing the RDBMS. * @throws NoSuchObjectException no such partition exists * @throws InvalidObjectException error fetching privilege information */ Partition getPartitionWithAuth(String catName, String dbName, String tblName, - List partVals, String user_name, List group_names) + List partVals, String user_name, List group_names, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException; /** @@ -842,13 +831,14 @@ Partition getPartitionWithAuth(String catName, String dbName, String tblName, * @param maxParts maximum number of partitions to fetch, -1 for all partitions. * @param userName user to get privilege information for. * @param groupNames groups to get privilege information for. + * @param validWriteIdList valid writeId to read * @return list of partitions. * @throws MetaException error access the RDBMS. * @throws NoSuchObjectException no such table exists * @throws InvalidObjectException error fetching privilege information. */ List getPartitionsWithAuth(String catName, String dbName, - String tblName, short maxParts, String userName, List groupNames) + String tblName, short maxParts, String userName, List groupNames, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException; /** @@ -863,12 +853,13 @@ Partition getPartitionWithAuth(String catName, String dbName, String tblName, * Entries can be empty if you only want to specify latter partitions. * @param max_parts * The maximum number of partitions to return + * @param validWriteIdList valid writeId to read * @return A list of partition names that match the partial spec. * @throws MetaException error accessing RDBMS * @throws NoSuchObjectException No such table exists */ List listPartitionNamesPs(String catName, String db_name, String tbl_name, - List part_vals, short max_parts) + List part_vals, short max_parts, String validWriteIdList) throws MetaException, NoSuchObjectException; /** @@ -888,13 +879,14 @@ Partition getPartitionWithAuth(String catName, String dbName, String tblName, * The user name for the partition for authentication privileges * @param groupNames * The groupNames for the partition for authentication privileges + * @param validWriteIdList valid writeId to read * @return A list of partitions that match the partial spec. * @throws MetaException error access RDBMS * @throws NoSuchObjectException No such table exists * @throws InvalidObjectException error access privilege information */ List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name, - List part_vals, short max_parts, String userName, List groupNames) + List part_vals, short max_parts, String userName, List groupNames, String validWriteIdList) throws MetaException, InvalidObjectException, NoSuchObjectException; /** Persists the given column statistics object to the metastore @@ -921,21 +913,6 @@ Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, String validWriteIds, long writeId) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; - /** - * Returns the relevant column statistics for a given column in a given table in a given database - * if such statistics exist. - * @param catName catalog name. - * @param dbName name of the database, defaults to current database - * @param tableName name of the table - * @param colName names of the columns for which statistics is requested - * @return Relevant column statistics for the column for the given table - * @throws NoSuchObjectException No such table - * @throws MetaException error accessing the RDBMS - * - */ - ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, - List colName) throws MetaException, NoSuchObjectException; - /** * Returns the relevant column statistics for a given column in a given table in a given database * if such statistics exist. @@ -954,21 +931,6 @@ ColumnStatistics getTableColumnStatistics( List colName, String writeIdList) throws MetaException, NoSuchObjectException; - /** - * Get statistics for a partition for a set of columns. - * @param catName catalog name. - * @param dbName database name. - * @param tblName table name. - * @param partNames list of partition names. These are names so must be key1=val1[/key2=val2...] - * @param colNames list of columns to get stats for - * @return list of statistics objects - * @throws MetaException error accessing the RDBMS - * @throws NoSuchObjectException no such partition. - */ - List getPartitionColumnStatistics( - String catName, String dbName, String tblName, List partNames, List colNames) - throws MetaException, NoSuchObjectException; - /** * Get statistics for a partition for a set of columns. * @param catName catalog name. @@ -1215,21 +1177,6 @@ void dropFunction(String catName, String dbName, String funcName) */ List getFunctions(String catName, String dbName, String pattern) throws MetaException; - /** - * Get aggregated stats for a table or partition(s). - * @param catName catalog name. - * @param dbName database name. - * @param tblName table name. - * @param partNames list of partition names. These are the names of the partitions, not - * values. - * @param colNames list of column names - * @return aggregated stats - * @throws MetaException error accessing RDBMS - * @throws NoSuchObjectException no such table or partition - */ - AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, - List partNames, List colNames) throws MetaException, NoSuchObjectException; - /** * Get aggregated stats for a table or partition(s). * @param catName catalog name. diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java index b1a92ef03c..987d8aae25 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java @@ -476,7 +476,7 @@ private void validateTableStructure(IHMSHandler hmsHandler, Table table) try { RawStore rawStore = hmsHandler.getMS(); String catName = getTableCatalog(table); - List partitions = rawStore.getPartitions(catName, table.getDbName(), table.getTableName(), -1); + List partitions = rawStore.getPartitions(catName, table.getDbName(), table.getTableName(), -1, null); return partitions; } catch (Exception err) { String msg = "Error getting partitions for " + Warehouse.getQualifiedName(table); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 511e6c1f64..d67f8dc42f 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -20,7 +20,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; +import java.util.BitSet; import java.util.EmptyStackException; import java.util.HashMap; import java.util.LinkedList; @@ -42,6 +42,9 @@ import org.apache.hadoop.hive.common.DatabaseName; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.Deadline; import org.apache.hadoop.hive.metastore.FileMetadataHandler; import org.apache.hadoop.hive.metastore.ObjectStore; @@ -51,21 +54,25 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.HiveAlterHandler; +import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.cache.SharedCache.StatsType; import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator; import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; -import org.apache.hadoop.hive.metastore.messaging.AlterDatabaseMessage; -import org.apache.hadoop.hive.metastore.messaging.CreateDatabaseMessage; import org.apache.hadoop.hive.metastore.messaging.CreateTableMessage; import org.apache.hadoop.hive.metastore.messaging.DropTableMessage; import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; +import org.apache.hadoop.hive.metastore.messaging.CommitTxnMessage; +import org.apache.hadoop.hive.metastore.messaging.AbortTxnMessage; import org.apache.hadoop.hive.metastore.messaging.AddPartitionMessage; +import org.apache.hadoop.hive.metastore.messaging.AllocWriteIdMessage; import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; import org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage; import org.apache.hadoop.hive.metastore.messaging.UpdateTableColumnStatMessage; +import org.apache.hadoop.hive.metastore.metrics.Metrics; +import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; import org.apache.hadoop.hive.metastore.messaging.DeleteTableColumnStatMessage; import org.apache.hadoop.hive.metastore.messaging.UpdatePartitionColumnStatMessage; import org.apache.hadoop.hive.metastore.messaging.DeletePartitionColumnStatMessage; @@ -73,6 +80,9 @@ import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.messaging.MessageFactory; +import org.apache.hadoop.hive.metastore.messaging.OpenTxnMessage; +import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils; +import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.FileUtils; import org.apache.hadoop.hive.metastore.utils.JavaUtils; @@ -84,10 +94,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.codahale.metrics.Counter; import com.google.common.annotations.VisibleForTesting; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; -import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; // TODO filter->expr @@ -118,15 +127,30 @@ private Configuration conf; private static boolean areTxnStatsSupported; private PartitionExpressionProxy expressionProxy = null; + private static String startUpdateServiceLock = "L"; private static String lock = "L"; private static boolean sharedCacheInited = false; private static SharedCache sharedCache = new SharedCache(); - private static boolean canUseEvents = false; private static long lastEventId; + private static Map> txnIdToWriteId = new HashMap<>(); + private static boolean counterInited = false; + private static Counter cacheHit; + private static Counter cacheMiss; private static final Logger LOG = LoggerFactory.getLogger(CachedStore.class.getName()); @Override public void setConf(Configuration conf) { + if (MetastoreConf.getVar(conf, ConfVars.TRANSACTIONAL_EVENT_LISTENERS)==null || + MetastoreConf.getVar(conf, ConfVars.TRANSACTIONAL_EVENT_LISTENERS).isEmpty()) { + throw new RuntimeException("CahcedStore can not use events for invalidation as there is no " + + " TransactionalMetaStoreEventListener to add events to notification table"); + } + if (!counterInited && MetastoreConf.getBoolVar(conf, ConfVars.METRICS_ENABLED)) { + Metrics.initialize(conf); + cacheHit = Metrics.getOrCreateCounter(MetricsConstants.METADATA_CACHE_HIT); + cacheMiss = Metrics.getOrCreateCounter(MetricsConstants.METADATA_CACHE_MISS); + counterInited = true; + } setConfInternal(conf); initBlackListWhiteList(conf); initSharedCache(conf); @@ -148,7 +172,7 @@ void setConfForTestExceptSharedCache(Configuration conf) { initBlackListWhiteList(conf); } - private static synchronized void triggerUpdateUsingEvent(RawStore rawStore) { + private static synchronized void triggerUpdateUsingEvent(RawStore rawStore, Configuration conf) { if (!isCachePrewarmed.get()) { LOG.error("cache update should be done only after prewarm"); throw new RuntimeException("cache update should be done only after prewarm"); @@ -156,7 +180,7 @@ private static synchronized void triggerUpdateUsingEvent(RawStore rawStore) { long startTime = System.nanoTime(); long preEventId = lastEventId; try { - lastEventId = updateUsingNotificationEvents(rawStore, lastEventId); + lastEventId = updateUsingNotificationEvents(rawStore, lastEventId, conf); } catch (Exception e) { LOG.error(" cache update failed for start event id " + lastEventId + " with error ", e); throw new RuntimeException(e.getMessage()); @@ -167,19 +191,12 @@ private static synchronized void triggerUpdateUsingEvent(RawStore rawStore) { } } - private static synchronized void triggerPreWarm(RawStore rawStore) { + private static synchronized void triggerPreWarm(RawStore rawStore, Configuration conf) { lastEventId = rawStore.getCurrentNotificationEventId().getEventId(); - prewarm(rawStore); + prewarm(rawStore, conf); } private void setConfInternal(Configuration conf) { - if (MetastoreConf.getBoolVar(conf, ConfVars.METASTORE_CACHE_CAN_USE_EVENT)) { - canUseEvents = true; - } else { - canUseEvents = false; - } - LOG.info("canUseEvents is set to " + canUseEvents + " in cached Store"); - String rawStoreClassName = MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, ObjectStore.class.getName()); if (rawStore == null) { try { @@ -251,17 +268,14 @@ private static void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, } } - @VisibleForTesting public static long updateUsingNotificationEvents(RawStore rawStore, long lastEventId) + @VisibleForTesting public static long updateUsingNotificationEvents(RawStore rawStore, long lastEventId, Configuration conf) throws Exception { + Deadline.registerIfNot(1000000); LOG.debug("updating cache using notification events starting from event id " + lastEventId); NotificationEventRequest rqst = new NotificationEventRequest(lastEventId); //Add the events which are not related to metadata update rqst.addToEventTypeSkipList(MessageBuilder.INSERT_EVENT); - rqst.addToEventTypeSkipList(MessageBuilder.OPEN_TXN_EVENT); - rqst.addToEventTypeSkipList(MessageBuilder.COMMIT_TXN_EVENT); - rqst.addToEventTypeSkipList(MessageBuilder.ABORT_TXN_EVENT); - rqst.addToEventTypeSkipList(MessageBuilder.ALLOC_WRITE_ID_EVENT); rqst.addToEventTypeSkipList(MessageBuilder.ACID_WRITE_EVENT); rqst.addToEventTypeSkipList(MessageBuilder.CREATE_FUNCTION_EVENT); rqst.addToEventTypeSkipList(MessageBuilder.DROP_FUNCTION_EVENT); @@ -327,7 +341,7 @@ private static void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, break; case MessageBuilder.CREATE_TABLE_EVENT: CreateTableMessage createTableMessage = deserializer.getCreateTableMessage(message); - sharedCache.addTableToCache(catalogName, dbName, tableName, createTableMessage.getTableObj()); + sharedCache.addTableToCache(catalogName, dbName, tableName, createTableMessage.getTableObj(), newTableWriteIds(dbName, tableName)); break; case MessageBuilder.ALTER_TABLE_EVENT: AlterTableMessage alterTableMessage = deserializer.getAlterTableMessage(message); @@ -340,14 +354,15 @@ private static void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, DropTableMessage dropTableMessage = deserializer.getDropTableMessage(message); int batchSize = MetastoreConf.getIntVar(rawStore.getConf(), ConfVars.BATCH_RETRIEVE_OBJECTS_MAX); String tableDnsPath = null; - Path tablePath = new Path(dropTableMessage.getTableObj().getSd().getLocation()); + Path tablePath = dropTableMessage.getTableObj().getSd().getLocation()!=null? + new Path(dropTableMessage.getTableObj().getSd().getLocation()):null; if (tablePath != null) { tableDnsPath = new Warehouse(rawStore.getConf()).getDnsPath(tablePath).toString(); } while (true) { - Map partitionLocations = - rawStore.getPartitionLocations(catalogName, dbName, tableName, tableDnsPath, batchSize); + Map partitionLocations = rawStore.getPartitionLocations(catalogName, dbName, tableName, + tableDnsPath, batchSize, null); if (partitionLocations == null || partitionLocations.isEmpty()) { break; } @@ -356,25 +371,34 @@ private static void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, } sharedCache.removeTableFromCache(catalogName, dbName, tableName); break; - case MessageBuilder.CREATE_DATABASE_EVENT: - CreateDatabaseMessage createDatabaseMessage = deserializer.getCreateDatabaseMessage(message); - sharedCache.addDatabaseToCache(createDatabaseMessage.getDatabaseObject()); - break; - case MessageBuilder.ALTER_DATABASE_EVENT: - AlterDatabaseMessage alterDatabaseMessage = deserializer.getAlterDatabaseMessage(message); - sharedCache.alterDatabaseInCache(catalogName, dbName, alterDatabaseMessage.getDbObjAfter()); - break; - case MessageBuilder.DROP_DATABASE_EVENT: - sharedCache.removeDatabaseFromCache(catalogName, dbName); - break; - case MessageBuilder.CREATE_CATALOG_EVENT: - case MessageBuilder.DROP_CATALOG_EVENT: - case MessageBuilder.ALTER_CATALOG_EVENT: - // TODO : Need to add cache invalidation for catalog events - LOG.error("catalog Events are not supported for cache invalidation : " + event.getEventType()); - break; case MessageBuilder.UPDATE_TBL_COL_STAT_EVENT: UpdateTableColumnStatMessage msg = deserializer.getUpdateTableColumnStatMessage(message); + Table tbl = msg.getTableObject(); + Map newParams = new HashMap<>(tbl.getParameters()); + List colNames = new ArrayList<>(); + for (ColumnStatisticsObj statsObj : msg.getColumnStatistics().getStatsObj()) { + colNames.add(statsObj.getColName()); + } + StatsSetupConst.setColumnStatsState(newParams, colNames); + long writeId = msg.getWriteId(); + String validWriteIds = msg.getWriteIds(); + if (validWriteIds != null) { + if (!areTxnStatsSupported) { + StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); + } else { + String errorMsg = ObjectStore.verifyStatsChangeCtx(TableName.getDbTable(dbName, tableName), + tbl.getParameters(), newParams, writeId, validWriteIds, true); + if (errorMsg != null) { + throw new MetaException(errorMsg); + } + if (!ObjectStore.isCurrentStatsValidForTheQuery(newParams, writeId, validWriteIds, true)) { + // Make sure we set the flag to invalid regardless of the current value. + StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table " + + dbName + "." + tableName); + } + } + } sharedCache.alterTableAndStatsInCache(catalogName, dbName, tableName, msg.getWriteId(), msg.getColumnStatistics().getStatsObj(), msg.getParameters()); break; @@ -384,6 +408,32 @@ private static void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, break; case MessageBuilder.UPDATE_PART_COL_STAT_EVENT: UpdatePartitionColumnStatMessage msgPartUpdate = deserializer.getUpdatePartitionColumnStatMessage(message); + Partition partition = sharedCache.getPartitionFromCache(catalogName, dbName, tableName, msgPartUpdate.getPartVals()); + newParams = new HashMap<>(partition.getParameters()); + colNames = new ArrayList<>(); + for (ColumnStatisticsObj statsObj : msgPartUpdate.getColumnStatistics().getStatsObj()) { + colNames.add(statsObj.getColName()); + } + StatsSetupConst.setColumnStatsState(newParams, colNames); + writeId = msgPartUpdate.getWriteId(); + validWriteIds = msgPartUpdate.getWriteIds(); + if (validWriteIds != null) { + if (!areTxnStatsSupported) { + StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); + } else { + String errorMsg = ObjectStore.verifyStatsChangeCtx(TableName.getDbTable(dbName, tableName), + partition.getParameters(), newParams, writeId, validWriteIds, true); + if (errorMsg != null) { + throw new MetaException(errorMsg); + } + if (!ObjectStore.isCurrentStatsValidForTheQuery(newParams, writeId, validWriteIds, true)) { + // Make sure we set the flag to invalid regardless of the current value. + StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the partition " + + dbName + "." + tableName + "." + msgPartUpdate.getPartVals()); + } + } + } sharedCache.alterPartitionAndStatsInCache(catalogName, dbName, tableName, msgPartUpdate.getWriteId(), msgPartUpdate.getPartVals(), msgPartUpdate.getParameters(), msgPartUpdate.getColumnStatistics().getStatsObj()); @@ -393,6 +443,48 @@ private static void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, sharedCache.removePartitionColStatsFromCache(catalogName, dbName, tableName, msgPart.getPartValues(), msgPart.getColName()); break; + case MessageBuilder.OPEN_TXN_EVENT: + OpenTxnMessage openTxn = deserializer.getOpenTxnMessage(message); + for (long txnId : openTxn.getTxnIds()) { + txnIdToWriteId.put(txnId, new HashMap<>()); + } + break; + case MessageBuilder.ABORT_TXN_EVENT: + AbortTxnMessage abortTxn = deserializer.getAbortTxnMessage(message); + txnIdToWriteId.put(abortTxn.getTxnId(), new HashMap<>()); + break; + case MessageBuilder.ALLOC_WRITE_ID_EVENT: + AllocWriteIdMessage allocWriteId = deserializer.getAllocWriteIdMessage(message); + List txnToWriteIdList = allocWriteId.getTxnToWriteIdList(); + for (TxnToWriteId txnToWriteId : txnToWriteIdList) { + long txnId = txnToWriteId.getTxnId(); + if (txnIdToWriteId.containsKey(txnId)) { + Map m = txnIdToWriteId.get(txnId); + String fullTableName = TableName.getDbTable(dbName, tableName); + m.put(fullTableName, txnToWriteId.getWriteId()); + } + } + break; + case MessageBuilder.COMMIT_TXN_EVENT: + CommitTxnMessage commitTxn = deserializer.getCommitTxnMessage(message); + if (txnIdToWriteId.containsKey(commitTxn.getTxnId())) { + Map m = txnIdToWriteId.get(commitTxn.getTxnId()); + for (Map.Entry entry : m.entrySet()) { + String tblNameToFlag = entry.getKey(); + long writeIdToCommit = entry.getValue(); + TableName tname = TableName.fromString(tblNameToFlag, Warehouse.DEFAULT_CATALOG_NAME, Warehouse.DEFAULT_DATABASE_NAME); + sharedCache.commitWriteId(tname.getCat(), tname.getDb(), tname.getTable(), writeIdToCommit); + } + txnIdToWriteId.remove(commitTxn.getTxnId()); + } else { + GetTxnTableWriteIdsResponse getTxnTableWriteIdsResponse = HMSHandler.getMsThreadTxnHandler(conf) + .getTxnTableWriteIds(commitTxn.getTxnId()); + for (TableWriteId tableWriteId : getTxnTableWriteIdsResponse.getTableWriteIds()) { + TableName tname = TableName.fromString(tableWriteId.getFullTableName(), Warehouse.DEFAULT_CATALOG_NAME, Warehouse.DEFAULT_DATABASE_NAME); + sharedCache.commitWriteId(tname.getCat(), tname.getDb(), tname.getTable(), tableWriteId.getWriteId()); + } + } + break; default: LOG.error("Event is not supported for cache invalidation : " + event.getEventType()); } @@ -405,27 +497,23 @@ private static void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, * This initializes the caches in SharedCache by getting the objects from Metastore DB via * ObjectStore and populating the respective caches */ - static void prewarm(RawStore rawStore) { + public static void prewarm(RawStore rawStore, Configuration conf) { if (isCachePrewarmed.get()) { return; } long startTime = System.nanoTime(); LOG.info("Prewarming CachedStore"); long sleepTime = 100; + TxnStore txn = TxnUtils.getTxnStore(conf); while (!isCachePrewarmed.get()) { // Prevents throwing exceptions in our raw store calls since we're not using RawStoreProxy Deadline.registerIfNot(1000000); - Collection catalogsToCache; + + List catNames = new ArrayList<>(); try { - catalogsToCache = catalogsToCache(rawStore); - LOG.info("Going to cache catalogs: " + org.apache.commons.lang.StringUtils.join(catalogsToCache, ", ")); - List catalogs = new ArrayList<>(catalogsToCache.size()); - for (String catName : catalogsToCache) { - catalogs.add(rawStore.getCatalog(catName)); - } - sharedCache.populateCatalogsInCache(catalogs); - } catch (MetaException | NoSuchObjectException e) { - LOG.warn("Failed to populate catalogs in cache, going to try again", e); + catNames = rawStore.getCatalogs(); + } catch (MetaException e) { + LOG.warn("Failed to get catalogs, going to try again", e); try { Thread.sleep(sleepTime); sleepTime = sleepTime * 2; @@ -435,12 +523,11 @@ static void prewarm(RawStore rawStore) { // try again continue; } - LOG.info("Finished prewarming catalogs, starting on databases"); + List databases = new ArrayList<>(); - for (String catName : catalogsToCache) { - try { + try { + for (String catName : catNames) { List dbNames = rawStore.getAllDatabases(catName); - LOG.info("Number of databases to prewarm in catalog {}: {}", catName, dbNames.size()); for (String dbName : dbNames) { try { databases.add(rawStore.getDatabase(catName, dbName)); @@ -449,11 +536,10 @@ static void prewarm(RawStore rawStore) { LOG.warn("Failed to cache database " + DatabaseName.getQualified(catName, dbName) + ", moving on", e); } } - } catch (MetaException e) { - LOG.warn("Failed to cache databases in catalog " + catName + ", moving on", e); } + } catch (MetaException e) { + LOG.warn("Failed to fetch databases, moving on", e); } - sharedCache.populateDatabasesInCache(databases); LOG.info("Databases cache is now prewarmed. Now adding tables, partitions and statistics to the cache"); int numberOfDatabasesCachedSoFar = 0; for (Database db : databases) { @@ -477,13 +563,21 @@ static void prewarm(RawStore rawStore) { continue; } Table table; + ValidWriteIdList writeIds; try { - table = rawStore.getTable(catName, dbName, tblName); + ValidTxnList currentTxnList = TxnCommonUtils.createValidReadTxnList(txn.getOpenTxns(), 0); + GetValidWriteIdsRequest rqst = new GetValidWriteIdsRequest(Arrays.asList(TableName.getDbTable(dbName, tblName))); + rqst.setValidTxnList(currentTxnList.toString()); + writeIds = TxnCommonUtils.createValidReaderWriteIdList(txn.getValidWriteIds(rqst).getTblValidWriteIds().get(0)); + table = rawStore.getTable(catName, dbName, tblName, null); } catch (MetaException e) { LOG.debug(ExceptionUtils.getStackTrace(e)); // It is possible the table is deleted during fetching tables of the database, // in that case, continue with the next table continue; + } catch (NoSuchTxnException e) { + LOG.warn("Cannot find transaction", e); + continue; } List colNames = MetaStoreUtils.getColumnNamesForTable(table); try { @@ -494,7 +588,7 @@ static void prewarm(RawStore rawStore) { AggrStats aggrStatsAllButDefaultPartition = null; if (!table.getPartitionKeys().isEmpty()) { Deadline.startTimer("getPartitions"); - partitions = rawStore.getPartitions(catName, dbName, tblName, -1); + partitions = rawStore.getPartitions(catName, dbName, tblName, -1, null); Deadline.stopTimer(); List partNames = new ArrayList<>(partitions.size()); for (Partition p : partitions) { @@ -504,12 +598,12 @@ static void prewarm(RawStore rawStore) { // Get partition column stats for this table Deadline.startTimer("getPartitionColumnStatistics"); partitionColStats = - rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames); + rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, null); Deadline.stopTimer(); // Get aggregate stats for all partitions of a table and for all but default // partition Deadline.startTimer("getAggrPartitionColumnStatistics"); - aggrStatsAllPartitions = rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + aggrStatsAllPartitions = rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, null); Deadline.stopTimer(); // Remove default partition from partition names and get aggregate // stats again @@ -526,18 +620,18 @@ static void prewarm(RawStore rawStore) { partNames.remove(defaultPartitionName); Deadline.startTimer("getAggrPartitionColumnStatistics"); aggrStatsAllButDefaultPartition = - rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, null); Deadline.stopTimer(); } } else { Deadline.startTimer("getTableColumnStatistics"); - tableColStats = rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); + tableColStats = rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, null); Deadline.stopTimer(); } // If the table could not cached due to memory limit, stop prewarm boolean isSuccess = sharedCache .populateTableInCache(table, tableColStats, partitions, partitionColStats, aggrStatsAllPartitions, - aggrStatsAllButDefaultPartition); + aggrStatsAllButDefaultPartition, writeIds); if (isSuccess) { LOG.trace("Cached Database: {}'s Table: {}.", dbName, tblName); } else { @@ -553,8 +647,9 @@ static void prewarm(RawStore rawStore) { } LOG.debug("Processed database: {}'s table: {}. Cached {} / {} tables so far.", dbName, tblName, ++numberOfTablesCachedSoFar, totalTablesToCache); - } catch (EmptyStackException e) { - // We've prewarmed this database, continue with the next one + } catch (Exception e) { + LOG.debug(ExceptionUtils.getStackTrace(e)); + // skip table, continue with the next one continue; } } @@ -572,11 +667,12 @@ static void prewarm(RawStore rawStore) { * a singleton. */ @VisibleForTesting - static void clearSharedCache() { + public static void clearSharedCache() { synchronized (lock) { sharedCacheInited = false; } sharedCache = new SharedCache(); + isCachePrewarmed.set(false); } static void completePrewarm(long startTime, boolean cachedAllMetadata) { @@ -625,15 +721,6 @@ private static void initBlackListWhiteList(Configuration conf) { MetastoreConf.getAsString(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_CACHED_OBJECTS_BLACKLIST)); } - private static Collection catalogsToCache(RawStore rs) throws MetaException { - Collection confValue = MetastoreConf.getStringCollection(rs.getConf(), ConfVars.CATALOGS_TO_CACHE); - if (confValue == null || confValue.isEmpty() || (confValue.size() == 1 && confValue.contains(""))) { - return rs.getCatalogs(); - } else { - return confValue; - } - } - @VisibleForTesting /** * This starts a background thread, which initially populates the SharedCache and later @@ -642,32 +729,34 @@ private static void initBlackListWhiteList(Configuration conf) { * @param conf * @param runOnlyOnce * @param shouldRunPrewarm - */ static synchronized void startCacheUpdateService(Configuration conf, boolean runOnlyOnce, + */ static void startCacheUpdateService(Configuration conf, boolean runOnlyOnce, boolean shouldRunPrewarm) { - if (cacheUpdateMaster == null) { - initBlackListWhiteList(conf); - if (!MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) { - cacheRefreshPeriodMS = - MetastoreConf.getTimeVar(conf, ConfVars.CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY, TimeUnit.MILLISECONDS); - } - LOG.info("CachedStore: starting cache update service (run every {} ms)", cacheRefreshPeriodMS); - cacheUpdateMaster = Executors.newScheduledThreadPool(1, new ThreadFactory() { - @Override public Thread newThread(Runnable r) { - Thread t = Executors.defaultThreadFactory().newThread(r); - t.setName("CachedStore-CacheUpdateService: Thread-" + t.getId()); - t.setDaemon(true); - return t; + synchronized (startUpdateServiceLock) { + if (cacheUpdateMaster == null) { + initBlackListWhiteList(conf); + if (!MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) { + cacheRefreshPeriodMS = + MetastoreConf.getTimeVar(conf, ConfVars.CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY, TimeUnit.MILLISECONDS); + } + LOG.info("CachedStore: starting cache update service (run every {} ms)", cacheRefreshPeriodMS); + cacheUpdateMaster = Executors.newScheduledThreadPool(1, new ThreadFactory() { + @Override public Thread newThread(Runnable r) { + Thread t = Executors.defaultThreadFactory().newThread(r); + t.setName("CachedStore-CacheUpdateService: Thread-" + t.getId()); + t.setDaemon(true); + return t; + } + }); + if (!runOnlyOnce) { + cacheUpdateMaster + .scheduleAtFixedRate(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0, cacheRefreshPeriodMS, + TimeUnit.MILLISECONDS); } - }); - if (!runOnlyOnce) { - cacheUpdateMaster - .scheduleAtFixedRate(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0, cacheRefreshPeriodMS, - TimeUnit.MILLISECONDS); } - } - if (runOnlyOnce) { - // Some tests control the execution of the background update thread - cacheUpdateMaster.schedule(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0, TimeUnit.MILLISECONDS); + if (runOnlyOnce) { + // Some tests control the execution of the background update thread + cacheUpdateMaster.schedule(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0, TimeUnit.MILLISECONDS); + } } } @@ -694,14 +783,16 @@ private static void initBlackListWhiteList(Configuration conf) { static class CacheUpdateMasterWork implements Runnable { private boolean shouldRunPrewarm = true; private final RawStore rawStore; + private Configuration conf; CacheUpdateMasterWork(Configuration conf, boolean shouldRunPrewarm) { this.shouldRunPrewarm = shouldRunPrewarm; + this.conf = new Configuration(conf); String rawStoreClassName = MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, ObjectStore.class.getName()); try { rawStore = JavaUtils.getClass(rawStoreClassName, RawStore.class).newInstance(); - rawStore.setConf(conf); + rawStore.setConf(this.conf); } catch (InstantiationException | IllegalAccessException | MetaException e) { // MetaException here really means ClassNotFound (see the utility method). // So, if any of these happen, that means we can never succeed. @@ -711,23 +802,15 @@ private static void initBlackListWhiteList(Configuration conf) { @Override public void run() { if (!shouldRunPrewarm) { - if (canUseEvents) { - try { - triggerUpdateUsingEvent(rawStore); - } catch (Exception e) { - LOG.error("failed to update cache using events ", e); - } - } else { - // TODO: prewarm and update can probably be merged. - try { - update(); - } catch (Exception e) { - LOG.error("periodical refresh fail ", e); - } + try { + triggerUpdateUsingEvent(rawStore, conf); + } catch (Exception e) { + LOG.error("failed to update cache using events ", e); } + sharedCache.incrementUpdateCount(); } else { try { - triggerPreWarm(rawStore); + triggerPreWarm(rawStore, conf); shouldRunPrewarm = false; } catch (Exception e) { LOG.error("Prewarm failure", e); @@ -735,226 +818,6 @@ private static void initBlackListWhiteList(Configuration conf) { } } } - - void update() { - Deadline.registerIfNot(1000000); - LOG.debug("CachedStore: updating cached objects. Shared cache has been update {} times so far.", - sharedCache.getUpdateCount()); - try { - for (String catName : catalogsToCache(rawStore)) { - List dbNames = rawStore.getAllDatabases(catName); - // Update the database in cache - updateDatabases(rawStore, catName, dbNames); - for (String dbName : dbNames) { - // Update the tables in cache - updateTables(rawStore, catName, dbName); - List tblNames; - try { - tblNames = rawStore.getAllTables(catName, dbName); - } catch (MetaException e) { - LOG.debug(ExceptionUtils.getStackTrace(e)); - // Continue with next database - continue; - } - for (String tblName : tblNames) { - if (!shouldCacheTable(catName, dbName, tblName)) { - continue; - } - // Update the table column stats for a table in cache - updateTableColStats(rawStore, catName, dbName, tblName); - // Update the partitions for a table in cache - updateTablePartitions(rawStore, catName, dbName, tblName); - // Update the partition col stats for a table in cache - updateTablePartitionColStats(rawStore, catName, dbName, tblName); - // Update aggregate partition column stats for a table in cache - updateTableAggregatePartitionColStats(rawStore, catName, dbName, tblName); - } - } - } - sharedCache.incrementUpdateCount(); - LOG.debug("CachedStore: updated cached objects. Shared cache update count is: {}", - sharedCache.getUpdateCount()); - } catch (MetaException e) { - LOG.error("Updating CachedStore: error happen when refresh; skipping this iteration", e); - } - } - - private void updateDatabases(RawStore rawStore, String catName, List dbNames) { - LOG.debug("CachedStore: updating cached database objects for catalog: {}", catName); - boolean success = false; - // Try MAX_RETRIES times, then move to next method - int maxTries = MAX_RETRIES; - while (!success && (maxTries-- > 0)) { - // Prepare the list of databases - List databases = new ArrayList<>(); - for (String dbName : dbNames) { - Database db; - try { - db = rawStore.getDatabase(catName, dbName); - databases.add(db); - } catch (NoSuchObjectException e) { - LOG.info("Updating CachedStore: database: " + catName + "." + dbName + " does not exist.", e); - } - } - success = sharedCache.refreshDatabasesInCache(databases); - LOG.debug("CachedStore: updated cached database objects for catalog: {}", catName); - } - } - - private void updateTables(RawStore rawStore, String catName, String dbName) { - LOG.debug("CachedStore: updating cached table objects for catalog: {}, database: {}", catName, dbName); - boolean success = false; - // Try MAX_RETRIES times, then move to next method - int maxTries = MAX_RETRIES; - while (!success && (maxTries-- > 0)) { - List
tables = new ArrayList<>(); - try { - List tblNames = rawStore.getAllTables(catName, dbName); - for (String tblName : tblNames) { - if (!shouldCacheTable(catName, dbName, tblName)) { - continue; - } - Table table = rawStore - .getTable(StringUtils.normalizeIdentifier(catName), StringUtils.normalizeIdentifier(dbName), - StringUtils.normalizeIdentifier(tblName)); - tables.add(table); - } - success = sharedCache.refreshTablesInCache(catName, dbName, tables); - LOG.debug("CachedStore: updated cached table objects for catalog: {}, database: {}", catName, dbName); - } catch (MetaException e) { - LOG.debug("Unable to refresh cached tables for database: " + dbName, e); - } - } - } - - private void updateTableColStats(RawStore rawStore, String catName, String dbName, String tblName) { - LOG.debug("CachedStore: updating cached table col stats objects for catalog: {}, database: {}", catName, dbName); - boolean committed = false; - rawStore.openTransaction(); - try { - Table table = rawStore.getTable(catName, dbName, tblName); - if (table != null && !table.isSetPartitionKeys()) { - List colNames = MetaStoreUtils.getColumnNamesForTable(table); - Deadline.startTimer("getTableColumnStatistics"); - ColumnStatistics tableColStats = rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); - Deadline.stopTimer(); - if (tableColStats != null) { - sharedCache.refreshTableColStatsInCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), - tableColStats.getStatsObj()); - // Update the table to get consistent stats state. - sharedCache.alterTableInCache(catName, dbName, tblName, table); - } - } - committed = rawStore.commitTransaction(); - LOG.debug("CachedStore: updated cached table col stats objects for catalog: {}, database: {}", catName, dbName); - } catch (MetaException | NoSuchObjectException e) { - LOG.info("Unable to refresh table column stats for table: " + tblName, e); - } finally { - if (!committed) { - sharedCache.removeAllTableColStatsFromCache(catName, dbName, tblName); - rawStore.rollbackTransaction(); - } - } - } - - private void updateTablePartitions(RawStore rawStore, String catName, String dbName, String tblName) { - LOG.debug("CachedStore: updating cached partition objects for catalog: {}, database: {}, table: {}", catName, - dbName, tblName); - try { - Deadline.startTimer("getPartitions"); - List partitions = rawStore.getPartitions(catName, dbName, tblName, -1); - Deadline.stopTimer(); - sharedCache - .refreshPartitionsInCache(StringUtils.normalizeIdentifier(catName), StringUtils.normalizeIdentifier(dbName), - StringUtils.normalizeIdentifier(tblName), partitions); - LOG.debug("CachedStore: updated cached partition objects for catalog: {}, database: {}, table: {}", catName, - dbName, tblName); - } catch (MetaException | NoSuchObjectException e) { - LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e); - } - } - - private void updateTablePartitionColStats(RawStore rawStore, String catName, String dbName, String tblName) { - LOG.debug("CachedStore: updating cached partition col stats objects for catalog: {}, database: {}, table: {}", - catName, dbName, tblName); - boolean committed = false; - rawStore.openTransaction(); - try { - Table table = rawStore.getTable(catName, dbName, tblName); - if (table != null) { - List colNames = MetaStoreUtils.getColumnNamesForTable(table); - List partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); - // Get partition column stats for this table - Deadline.startTimer("getPartitionColumnStatistics"); - List partitionColStats = - rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames); - Deadline.stopTimer(); - sharedCache.refreshPartitionColStatsInCache(catName, dbName, tblName, partitionColStats); - Deadline.startTimer("getPartitionsByNames"); - List parts = rawStore.getPartitionsByNames(catName, dbName, tblName, partNames); - Deadline.stopTimer(); - // Also save partitions for consistency as they have the stats state. - for (Partition part : parts) { - sharedCache.alterPartitionInCache(catName, dbName, tblName, part.getValues(), part); - } - } - committed = rawStore.commitTransaction(); - LOG.debug("CachedStore: updated cached partition col stats objects for catalog: {}, database: {}, table: {}", - catName, dbName, tblName); - } catch (MetaException | NoSuchObjectException e) { - LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e); - } finally { - if (!committed) { - sharedCache.removeAllPartitionColStatsFromCache(catName, dbName, tblName); - rawStore.rollbackTransaction(); - } - } - } - - // Update cached aggregate stats for all partitions of a table and for all - // but default partition - private static void updateTableAggregatePartitionColStats(RawStore rawStore, String catName, String dbName, - String tblName) { - LOG.debug( - "CachedStore: updating cached aggregate partition col stats objects for catalog: {}, database: {}, table: {}", - catName, dbName, tblName); - try { - Table table = rawStore.getTable(catName, dbName, tblName); - if (table == null) { - return; - } - List partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); - List colNames = MetaStoreUtils.getColumnNamesForTable(table); - if ((partNames != null) && (partNames.size() > 0)) { - Deadline.startTimer("getAggregareStatsForAllPartitions"); - AggrStats aggrStatsAllPartitions = rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); - Deadline.stopTimer(); - // Remove default partition from partition names and get aggregate stats again - List partKeys = table.getPartitionKeys(); - String defaultPartitionValue = MetastoreConf.getVar(rawStore.getConf(), ConfVars.DEFAULTPARTITIONNAME); - List partCols = new ArrayList(); - List partVals = new ArrayList(); - for (FieldSchema fs : partKeys) { - partCols.add(fs.getName()); - partVals.add(defaultPartitionValue); - } - String defaultPartitionName = FileUtils.makePartName(partCols, partVals); - partNames.remove(defaultPartitionName); - Deadline.startTimer("getAggregareStatsForAllPartitionsExceptDefault"); - AggrStats aggrStatsAllButDefaultPartition = - rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); - Deadline.stopTimer(); - sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), aggrStatsAllPartitions, - aggrStatsAllButDefaultPartition, null); - LOG.debug("CachedStore: updated cached aggregate partition col stats objects for catalog:" - + " {}, database: {}, table: {}", catName, dbName, tblName); - } - } catch (MetaException | NoSuchObjectException e) { - LOG.info("Updating CachedStore: unable to read aggregate column stats of table: " + tblName, e); - } - } } @Override public Configuration getConf() { @@ -970,27 +833,7 @@ private static void updateTableAggregatePartitionColStats(RawStore rawStore, Str } @Override public boolean commitTransaction() { - if (!rawStore.commitTransaction()) { - return false; - } - - // In case of event based update, shared cache is not updated directly to avoid inconsistency. - // For example, if metastore B add a partition, then metastore A drop a partition later. However, on metastore A, - // it first get drop partition request, then from notification, create the partition. If there's no tombstone - // entry in partition cache to tell drop is after creation, we end up consumes the creation request. Though - // eventually there's drop partition notification, but during the interim, later event takes precedence. - // So we will not update the cache during raw store operation but wait during commit transaction to make sure that - // the event related to the current transactions are updated in the cache and thus we can support strong - // consistency in case there is only one metastore. - if (canUseEvents) { - try { - triggerUpdateUsingEvent(rawStore); - } catch (Exception e) { - //TODO : Not sure how to handle it as the commit is already done in the object store. - LOG.error("Failed to update cache", e); - } - } - return true; + return rawStore.commitTransaction(); } @Override public boolean isActiveTransaction() { @@ -1003,107 +846,47 @@ private static void updateTableAggregatePartitionColStats(RawStore rawStore, Str @Override public void createCatalog(Catalog cat) throws MetaException { rawStore.createCatalog(cat); - // in case of event based cache update, cache will not be updated for catalog. - if (!canUseEvents) { - sharedCache.addCatalogToCache(cat); - } } @Override public void alterCatalog(String catName, Catalog cat) throws MetaException, InvalidOperationException { rawStore.alterCatalog(catName, cat); - // in case of event based cache update, cache will not be updated for catalog. - if (!canUseEvents) { - sharedCache.alterCatalogInCache(StringUtils.normalizeIdentifier(catName), cat); - } } @Override public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException { - // in case of event based cache update, cache will not be updated for catalog. - if (!sharedCache.isCatalogCachePrewarmed() || canUseEvents) { - return rawStore.getCatalog(catalogName); - } - Catalog cat = sharedCache.getCatalogFromCache(normalizeIdentifier(catalogName)); - if (cat == null) { - throw new NoSuchObjectException(); - } - return cat; + return rawStore.getCatalog(catalogName); } @Override public List getCatalogs() throws MetaException { - // in case of event based cache update, cache will not be updated for catalog. - if (!sharedCache.isCatalogCachePrewarmed() || canUseEvents) { - return rawStore.getCatalogs(); - } - return sharedCache.listCachedCatalogs(); + return rawStore.getCatalogs(); } @Override public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException { rawStore.dropCatalog(catalogName); - - // in case of event based cache update, cache will not be updated for catalog. - if (!canUseEvents) { - catalogName = catalogName.toLowerCase(); - sharedCache.removeCatalogFromCache(catalogName); - } } @Override public void createDatabase(Database db) throws InvalidObjectException, MetaException { rawStore.createDatabase(db); - // in case of event based cache update, cache will be updated during commit. - if (!canUseEvents) { - sharedCache.addDatabaseToCache(db); - } } @Override public Database getDatabase(String catName, String dbName) throws NoSuchObjectException { - // in case of event based cache update, cache will be updated during commit. So within active transaction, read - // directly from rawStore to avoid reading stale data as the data updated during same transaction will not be - // updated in the cache. - if (!sharedCache.isDatabaseCachePrewarmed() || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getDatabase(catName, dbName); - } - dbName = dbName.toLowerCase(); - Database db = sharedCache - .getDatabaseFromCache(StringUtils.normalizeIdentifier(catName), StringUtils.normalizeIdentifier(dbName)); - if (db == null) { - throw new NoSuchObjectException(); - } - return db; + return rawStore.getDatabase(catName, dbName); } @Override public boolean dropDatabase(String catName, String dbName) throws NoSuchObjectException, MetaException { - boolean succ = rawStore.dropDatabase(catName, dbName); - if (succ && !canUseEvents) { - // in case of event based cache update, cache will be updated during commit. - sharedCache - .removeDatabaseFromCache(StringUtils.normalizeIdentifier(catName), StringUtils.normalizeIdentifier(dbName)); - } - return succ; + return rawStore.dropDatabase(catName, dbName); } @Override public boolean alterDatabase(String catName, String dbName, Database db) throws NoSuchObjectException, MetaException { - boolean succ = rawStore.alterDatabase(catName, dbName, db); - if (succ && !canUseEvents) { - // in case of event based cache update, cache will be updated during commit. - sharedCache - .alterDatabaseInCache(StringUtils.normalizeIdentifier(catName), StringUtils.normalizeIdentifier(dbName), db); - } - return succ; + return rawStore.alterDatabase(catName, dbName, db); } @Override public List getDatabases(String catName, String pattern) throws MetaException { - if (!sharedCache.isDatabaseCachePrewarmed() || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getDatabases(catName, pattern); - } - return sharedCache.listCachedDatabases(catName, pattern); + return rawStore.getDatabases(catName, pattern); } @Override public List getAllDatabases(String catName) throws MetaException { - if (!sharedCache.isDatabaseCachePrewarmed() || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getAllDatabases(catName); - } - return sharedCache.listCachedDatabases(catName); + return rawStore.getAllDatabases(catName); } @Override public boolean createType(Type type) { @@ -1138,38 +921,11 @@ private void validateTableType(Table tbl) { @Override public void createTable(Table tbl) throws InvalidObjectException, MetaException { rawStore.createTable(tbl); - // in case of event based cache update, cache will be updated during commit. - if (canUseEvents) { - return; - } - String catName = normalizeIdentifier(tbl.getCatName()); - String dbName = normalizeIdentifier(tbl.getDbName()); - String tblName = normalizeIdentifier(tbl.getTableName()); - if (!shouldCacheTable(catName, dbName, tblName)) { - return; - } - validateTableType(tbl); - sharedCache.addTableToCache(catName, dbName, tblName, tbl); } @Override public boolean dropTable(String catName, String dbName, String tblName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.dropTable(catName, dbName, tblName); - // in case of event based cache update, cache will be updated during commit. - if (succ && !canUseEvents) { - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; - } - sharedCache.removeTableFromCache(catName, dbName, tblName); - } - return succ; - } - - @Override public Table getTable(String catName, String dbName, String tblName) throws MetaException { - return getTable(catName, dbName, tblName, null); + return rawStore.dropTable(catName, dbName, tblName); } @Override public Table getTable(String catName, String dbName, String tblName, String validWriteIds) @@ -1177,26 +933,38 @@ private void validateTableType(Table tbl) { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { + ValidWriteIdList writeIdsToRead = validWriteIds!=null?new ValidReaderWriteIdList(validWriteIds):null; + if (writeIdsToRead == null || !shouldCacheTable(catName, dbName, tblName)) { + if (writeIdsToRead == null) { + LOG.debug("writeIdsToRead==null, read " + catName + "." + dbName + "." + tblName + " from db"); + } return rawStore.getTable(catName, dbName, tblName, validWriteIds); } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); if (tbl == null) { - // This table is not yet loaded in cache + // no valid entry in cache // If the prewarm thread is working on this table's database, // let's move this table to the top of tblNamesBeingPrewarmed stack, // so that it gets loaded to the cache faster and is available for subsequent requests tblsPendingPrewarm.prioritizeTableForPrewarm(tblName); + if (cacheMiss!=null) cacheMiss.inc(); Table t = rawStore.getTable(catName, dbName, tblName, validWriteIds); if (t != null) { - sharedCache.addTableToCache(catName, dbName, tblName, t); + LOG.debug("cache miss, read " + catName + "." + dbName + "." + tblName + " from db"); } return t; } + + if (!isTransactionalTable(tbl)) { + LOG.debug("read " + catName + "." + dbName + "." + tblName + " from db since it is not transactional"); + return rawStore.getTable(catName, dbName, tblName, validWriteIds); + } + + if (cacheHit!=null) cacheHit.inc(); + if (validWriteIds != null) { - tbl.setParameters( - adjustStatsParamsForGet(tbl.getParameters(), tbl.getParameters(), tbl.getWriteId(), validWriteIds)); + adjustStatsParamsForGet(tbl, validWriteIds); } tbl.unsetPrivileges(); @@ -1221,59 +989,17 @@ private void validateTableType(Table tbl) { } @Override public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { - boolean succ = rawStore.addPartition(part); - // in case of event based cache update, cache will be updated during commit. - if (succ && !canUseEvents) { - String dbName = normalizeIdentifier(part.getDbName()); - String tblName = normalizeIdentifier(part.getTableName()); - String catName = part.isSetCatName() ? normalizeIdentifier(part.getCatName()) : DEFAULT_CATALOG_NAME; - if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; - } - sharedCache.addPartitionToCache(catName, dbName, tblName, part); - } - return succ; + return rawStore.addPartition(part); } @Override public boolean addPartitions(String catName, String dbName, String tblName, List parts) throws InvalidObjectException, MetaException { - boolean succ = rawStore.addPartitions(catName, dbName, tblName, parts); - // in case of event based cache update, cache will be updated during commit. - if (succ && !canUseEvents) { - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; - } - sharedCache.addPartitionsToCache(catName, dbName, tblName, parts); - } - return succ; + return rawStore.addPartitions(catName, dbName, tblName, parts); } @Override public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { - boolean succ = rawStore.addPartitions(catName, dbName, tblName, partitionSpec, ifNotExists); - // in case of event based cache update, cache will be updated during commit. - if (succ && !canUseEvents) { - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; - } - PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator(); - while (iterator.hasNext()) { - Partition part = iterator.next(); - sharedCache.addPartitionToCache(catName, dbName, tblName, part); - } - } - return succ; - } - - @Override public Partition getPartition(String catName, String dbName, String tblName, List partVals) - throws MetaException, NoSuchObjectException { - return getPartition(catName, dbName, tblName, partVals, null); + return rawStore.addPartitions(catName, dbName, tblName, partitionSpec, ifNotExists); } @Override public Partition getPartition(String catName, String dbName, String tblName, List partVals, @@ -1281,131 +1007,104 @@ private void validateTableType(Table tbl) { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { + ValidWriteIdList writeIdsToRead = validWriteIds!=null?new ValidReaderWriteIdList(validWriteIds):null; + if (writeIdsToRead == null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartition(catName, dbName, tblName, partVals, validWriteIds); + } + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + + if (table == null) { + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.getPartition(catName, dbName, tblName, partVals, validWriteIds); + } + + + if (!isTransactionalTable(table)) { return rawStore.getPartition(catName, dbName, tblName, partVals, validWriteIds); } + + if (cacheHit!=null) cacheHit.inc(); Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, partVals); if (part == null) { // The table containing the partition is not yet loaded in cache return rawStore.getPartition(catName, dbName, tblName, partVals, validWriteIds); } if (validWriteIds != null) { - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); - if (table == null) { - // The table containing the partition is not yet loaded in cache - return rawStore.getPartition(catName, dbName, tblName, partVals, validWriteIds); - } - part.setParameters( - adjustStatsParamsForGet(table.getParameters(), part.getParameters(), part.getWriteId(), validWriteIds)); + adjustStatsParamsForGet(table, validWriteIds); } return part; } @Override public boolean doesPartitionExist(String catName, String dbName, String tblName, List partKeys, - List partVals) throws MetaException, NoSuchObjectException { + List partVals, String validWriteIdList) throws MetaException, NoSuchObjectException { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, partVals); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (validWriteIdList == null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, partVals, validWriteIdList); } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (tbl == null) { - // The table containing the partition is not yet loaded in cache - return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, partVals); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, partVals, validWriteIdList); + } + + if (!isTransactionalTable(tbl)) { + return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, partVals, validWriteIdList); } + return sharedCache.existPartitionFromCache(catName, dbName, tblName, partVals); } @Override public boolean dropPartition(String catName, String dbName, String tblName, List partVals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.dropPartition(catName, dbName, tblName, partVals); - // in case of event based cache update, cache will be updated during commit. - if (succ && !canUseEvents) { - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; - } - sharedCache.removePartitionFromCache(catName, dbName, tblName, partVals); - } - return succ; + return rawStore.dropPartition(catName, dbName, tblName, partVals); } @Override public void dropPartitions(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { rawStore.dropPartitions(catName, dbName, tblName, partNames); - // in case of event based cache update, cache will be updated during commit. - if (canUseEvents) { - return; - } - catName = normalizeIdentifier(catName); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return; - } - List> partVals = new ArrayList<>(); - for (String partName : partNames) { - partVals.add(partNameToVals(partName)); - } - sharedCache.removePartitionsFromCache(catName, dbName, tblName, partVals); } - @Override public List getPartitions(String catName, String dbName, String tblName, int max) + @Override public List getPartitions(String catName, String dbName, String tblName, int max, String validWriteIdList) throws MetaException, NoSuchObjectException { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getPartitions(catName, dbName, tblName, max); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitions(catName, dbName, tblName, max, validWriteIdList); } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (tbl == null) { - // The table containing the partitions is not yet loaded in cache - return rawStore.getPartitions(catName, dbName, tblName, max); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.getPartitions(catName, dbName, tblName, max, validWriteIdList); + } + + if (!isTransactionalTable(tbl)) { + return rawStore.getPartitions(catName, dbName, tblName, max, validWriteIdList); } + + if (cacheHit!=null) cacheHit.inc(); List parts = sharedCache.listCachedPartitions(catName, dbName, tblName, max); return parts; } @Override public Map getPartitionLocations(String catName, String dbName, String tblName, - String baseLocationToNotShow, int max) { - return rawStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max); + String baseLocationToNotShow, int max, String validWriteIdList) { + return rawStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max, validWriteIdList); } @Override public Table alterTable(String catName, String dbName, String tblName, Table newTable, String validWriteIds) throws InvalidObjectException, MetaException { - newTable = rawStore.alterTable(catName, dbName, tblName, newTable, validWriteIds); - // in case of event based cache update, cache will be updated during commit. - if (canUseEvents) { - return newTable; - } - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - String newTblName = normalizeIdentifier(newTable.getTableName()); - if (!shouldCacheTable(catName, dbName, tblName) && !shouldCacheTable(catName, dbName, newTblName)) { - return newTable; - } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); - if (tbl == null) { - // The table is not yet loaded in cache - return newTable; - } - if (shouldCacheTable(catName, dbName, tblName) && shouldCacheTable(catName, dbName, newTblName)) { - // If old table is in the cache and the new table can also be cached - sharedCache.alterTableInCache(catName, dbName, tblName, newTable); - } else if (!shouldCacheTable(catName, dbName, tblName) && shouldCacheTable(catName, dbName, newTblName)) { - // If old table is *not* in the cache but the new table can be cached - sharedCache.addTableToCache(catName, dbName, newTblName, newTable); - } else if (shouldCacheTable(catName, dbName, tblName) && !shouldCacheTable(catName, dbName, newTblName)) { - // If old table is in the cache but the new table *cannot* be cached - sharedCache.removeTableFromCache(catName, dbName, tblName); - } - return newTable; + return rawStore.alterTable(catName, dbName, tblName, newTable, validWriteIds); } @Override public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) @@ -1439,40 +1138,7 @@ private void validateTableType(Table tbl) { @Override public List
getTableObjectsByName(String catName, String dbName, List tblNames) throws MetaException, UnknownDBException { - if (canUseEvents && rawStore.isActiveTransaction()) { - return rawStore.getTableObjectsByName(catName, dbName, tblNames); - } - dbName = normalizeIdentifier(dbName); - catName = normalizeIdentifier(catName); - boolean missSomeInCache = false; - for (String tblName : tblNames) { - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - missSomeInCache = true; - break; - } - } - if (!isCachePrewarmed.get() || missSomeInCache) { - return rawStore.getTableObjectsByName(catName, dbName, tblNames); - } - Database db = sharedCache.getDatabaseFromCache(catName, dbName); - if (db == null) { - throw new UnknownDBException("Could not find database " + dbName); - } - List
tables = new ArrayList<>(); - for (String tblName : tblNames) { - tblName = normalizeIdentifier(tblName); - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); - if (tbl == null) { - tbl = rawStore.getTable(catName, dbName, tblName); - sharedCache.addTableToCache(catName, dbName, tblName, tbl); - } - if (tbl != null) { - tables.add(tbl); - } - tables.add(tbl); - } - return tables; + return rawStore.getTableObjectsByName(catName, dbName, tblNames); } @Override public List getAllTables(String catName, String dbName) throws MetaException { @@ -1486,19 +1152,28 @@ private void validateTableType(Table tbl) { return rawStore.listTableNamesByFilter(catName, dbName, filter, maxTables); } - @Override public List listPartitionNames(String catName, String dbName, String tblName, short maxParts) + @Override public List listPartitionNames(String catName, String dbName, String tblName, short maxParts, String validWriteIdList) throws MetaException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.listPartitionNames(catName, dbName, tblName, maxParts); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.listPartitionNames(catName, dbName, tblName, maxParts, validWriteIdList); } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (tbl == null) { - // The table is not yet loaded in cache - return rawStore.listPartitionNames(catName, dbName, tblName, maxParts); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.listPartitionNames(catName, dbName, tblName, maxParts, validWriteIdList); + } + + if (!isTransactionalTable(tbl)) { + return rawStore.listPartitionNames(catName, dbName, tblName, maxParts, validWriteIdList); } + + if (cacheHit!=null) cacheHit.inc(); List partitionNames = new ArrayList<>(); int count = 0; for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) { @@ -1511,43 +1186,19 @@ private void validateTableType(Table tbl) { @Override public PartitionValuesResponse listPartitionValues(String catName, String dbName, String tblName, List cols, boolean applyDistinct, String filter, boolean ascending, List order, - long maxParts) throws MetaException { + long maxParts, String validWriteIdList) throws MetaException { throw new UnsupportedOperationException(); } @Override public Partition alterPartition(String catName, String dbName, String tblName, List partVals, Partition newPart, String validWriteIds) throws InvalidObjectException, MetaException { - newPart = rawStore.alterPartition(catName, dbName, tblName, partVals, newPart, validWriteIds); - // in case of event based cache update, cache will be updated during commit. - if (canUseEvents) { - return newPart; - } - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return newPart; - } - sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, newPart); - return newPart; + return rawStore.alterPartition(catName, dbName, tblName, partVals, newPart, validWriteIds); } @Override public List alterPartitions(String catName, String dbName, String tblName, List> partValsList, List newParts, long writeId, String validWriteIds) throws InvalidObjectException, MetaException { - newParts = rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, writeId, validWriteIds); - // in case of event based cache update, cache will be updated during commit. - if (canUseEvents) { - return newParts; - } - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return newParts; - } - sharedCache.alterPartitionsInCache(catName, dbName, tblName, partValsList, newParts); - return newParts; + return rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, writeId, validWriteIds); } private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, String defaultPartName, short maxParts, @@ -1567,33 +1218,42 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, Str @Override // TODO: implement using SharedCache public List getPartitionsByFilter(String catName, String dbName, String tblName, String filter, - short maxParts) throws MetaException, NoSuchObjectException { - return rawStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts); + short maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException { + return rawStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts, validWriteIdList); } @Override /** * getPartitionSpecsByFilterAndProjection interface is currently non-cacheable. */ public List getPartitionSpecsByFilterAndProjection(Table table, - GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec) + GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec, String validWriteIdList) throws MetaException, NoSuchObjectException { - return rawStore.getPartitionSpecsByFilterAndProjection(table, projectionSpec, filterSpec); + return rawStore.getPartitionSpecsByFilterAndProjection(table, projectionSpec, filterSpec, validWriteIdList); } @Override public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, - String defaultPartitionName, short maxParts, List result) throws TException { + String defaultPartitionName, short maxParts, List result, String validWriteIdList) throws TException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result, validWriteIdList); } List partNames = new LinkedList<>(); - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (table == null) { - // The table is not yet loaded in cache - return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result, validWriteIdList); + } + + if (!isTransactionalTable(table)) { + return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result, validWriteIdList); } + + if (cacheHit!=null) cacheHit.inc(); boolean hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartitionName, maxParts, partNames, sharedCache); for (String partName : partNames) { @@ -1604,26 +1264,34 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, Str return hasUnknownPartitions; } - @Override public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) + @Override public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter, String validWriteIdList) throws MetaException, NoSuchObjectException { - return rawStore.getNumPartitionsByFilter(catName, dbName, tblName, filter); + return rawStore.getNumPartitionsByFilter(catName, dbName, tblName, filter, validWriteIdList); } - @Override public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) + @Override public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String validWriteIdList) throws MetaException, NoSuchObjectException { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr, validWriteIdList); } String defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); List partNames = new LinkedList<>(); - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (table == null) { - // The table is not yet loaded in cache - return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr, validWriteIdList); + } + + if (!isTransactionalTable(table)) { + return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr, validWriteIdList); } + getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartName, Short.MAX_VALUE, partNames, sharedCache); return partNames.size(); } @@ -1641,18 +1309,27 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, Str } @Override public List getPartitionsByNames(String catName, String dbName, String tblName, - List partNames) throws MetaException, NoSuchObjectException { + List partNames, String validWriteIdList) throws MetaException, NoSuchObjectException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames, validWriteIdList); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (table == null) { - // The table is not yet loaded in cache - return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames, validWriteIdList); } + + if (!isTransactionalTable(table)) { + return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames, validWriteIdList); + } + + if (cacheHit!=null) cacheHit.inc(); List partitions = new ArrayList<>(); for (String partName : partNames) { Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, partNameToVals(partName)); @@ -1793,18 +1470,27 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, Str } @Override public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, - String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { + String userName, List groupNames, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames, validWriteIdList); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (table == null) { - // The table is not yet loaded in cache - return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames, validWriteIdList); + } + + if (!isTransactionalTable(table)) { + return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames, validWriteIdList); } + + if (cacheHit!=null) cacheHit.inc(); Partition p = sharedCache.getPartitionFromCache(catName, dbName, tblName, partVals); if (p != null) { String partName = Warehouse.makePartName(table.getPartitionKeys(), partVals); @@ -1817,18 +1503,27 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, Str } @Override public List getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts, - String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { + String userName, List groupNames, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames, validWriteIdList); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (table == null) { - // The table is not yet loaded in cache - return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames, validWriteIdList); } + + if (!isTransactionalTable(table)) { + return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames, validWriteIdList); + } + + if (cacheHit!=null) cacheHit.inc(); List partitions = new ArrayList<>(); int count = 0; for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) { @@ -1845,18 +1540,26 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, Str } @Override public List listPartitionNamesPs(String catName, String dbName, String tblName, - List partSpecs, short maxParts) throws MetaException, NoSuchObjectException { + List partSpecs, short maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.listPartitionNamesPs(catName, dbName, tblName, partSpecs, maxParts); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.listPartitionNamesPs(catName, dbName, tblName, partSpecs, maxParts, validWriteIdList); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (table == null) { - // The table is not yet loaded in cache - return rawStore.listPartitionNamesPs(catName, dbName, tblName, partSpecs, maxParts); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.listPartitionNamesPs(catName, dbName, tblName, partSpecs, maxParts, validWriteIdList); + } + + if (!isTransactionalTable(table)) { + return rawStore.listPartitionNamesPs(catName, dbName, tblName, partSpecs, maxParts, validWriteIdList); } + String partNameMatcher = getPartNameMatcher(table, partSpecs); List partitionNames = new ArrayList<>(); List allPartitions = sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts); @@ -1872,19 +1575,28 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, Str } @Override public List listPartitionsPsWithAuth(String catName, String dbName, String tblName, - List partSpecs, short maxParts, String userName, List groupNames) + List partSpecs, short maxParts, String userName, List groupNames, String validWriteIdList) throws MetaException, InvalidObjectException, NoSuchObjectException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partSpecs, maxParts, userName, groupNames); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partSpecs, maxParts, userName, groupNames, validWriteIdList); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (table == null) { - // The table is not yet loaded in cache - return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partSpecs, maxParts, userName, groupNames); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partSpecs, maxParts, userName, groupNames, validWriteIdList); } + + if (!isTransactionalTable(table)) { + return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partSpecs, maxParts, userName, groupNames, validWriteIdList); + } + + if (cacheHit!=null) cacheHit.inc(); String partNameMatcher = getPartNameMatcher(table, partSpecs); List partitions = new ArrayList<>(); List allPartitions = sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts); @@ -1924,21 +1636,21 @@ private String getPartNameMatcher(Table table, List partSpecs) throws Me } // Note: ideally this should be above both CachedStore and ObjectStore. - private Map adjustStatsParamsForGet(Map tableParams, Map params, - long statsWriteId, String validWriteIds) throws MetaException { - if (!TxnUtils.isTransactionalTable(tableParams)) { - return params; // Not a txn table. - } - if (areTxnStatsSupported && ((validWriteIds == null) || ObjectStore - .isCurrentStatsValidForTheQuery(params, statsWriteId, validWriteIds, false))) { - // Valid stats are supported for txn tables, and either no verification was requested by the - // caller, or the verification has succeeded. - return params; + private void adjustStatsParamsForGet(Table tbl, String validWriteIds) throws MetaException { + boolean isTxn = tbl != null && TxnUtils.isTransactionalTable(tbl); + if (isTxn && !areTxnStatsSupported) { + StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters."); + } else if (isTxn && tbl.getPartitionKeysSize() == 0) { + if (ObjectStore.isCurrentStatsValidForTheQuery(tbl.getParameters(), tbl.getWriteId(), validWriteIds, false)) { + tbl.setIsStatsCompliant(true); + } else { + tbl.setIsStatsCompliant(false); + // Do not make persistent the following state since it is the query specific (not global). + StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters."); + } } - // Clone the map to avoid affecting the cached value. - params = new HashMap<>(params); - StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE); - return params; } // Note: ideally this should be above both CachedStore and ObjectStore. @@ -1959,60 +1671,9 @@ public static ColumnStatistics adjustColStatForGet(Map tablePara return colStat; } - private static void updateTableColumnsStatsInternal(Configuration conf, ColumnStatistics colStats, - Map newParams, String validWriteIds, long writeId) throws MetaException { - String catName = colStats.getStatsDesc().isSetCatName() ? normalizeIdentifier( - colStats.getStatsDesc().getCatName()) : getDefaultCatalog(conf); - String dbName = normalizeIdentifier(colStats.getStatsDesc().getDbName()); - String tblName = normalizeIdentifier(colStats.getStatsDesc().getTableName()); - if (!shouldCacheTable(catName, dbName, tblName)) { - return; - } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); - if (table == null) { - // The table is not yet loaded in cache - return; - } - - boolean isTxn = TxnUtils.isTransactionalTable(table.getParameters()); - if (isTxn && validWriteIds != null) { - if (!areTxnStatsSupported) { - StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); - } else { - String errorMsg = ObjectStore - .verifyStatsChangeCtx(TableName.getDbTable(dbName, tblName), table.getParameters(), newParams, writeId, - validWriteIds, true); - if (errorMsg != null) { - throw new MetaException(errorMsg); - } - if (!ObjectStore.isCurrentStatsValidForTheQuery(newParams, table.getWriteId(), validWriteIds, true)) { - // Make sure we set the flag to invalid regardless of the current value. - StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); - LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table " + table.getDbName() + "." + table - .getTableName()); - } - } - } - - table.setWriteId(writeId); - table.setParameters(newParams); - sharedCache.alterTableInCache(catName, dbName, tblName, table); - sharedCache.updateTableColStatsInCache(catName, dbName, tblName, colStats.getStatsObj()); - } - @Override public Map updateTableColumnStatistics(ColumnStatistics colStats, String validWriteIds, long writeId) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - Map newParams = rawStore.updateTableColumnStatistics(colStats, validWriteIds, writeId); - // in case of event based cache update, cache will be updated during commit. - if (newParams != null && !canUseEvents) { - updateTableColumnsStatsInternal(conf, colStats, newParams, null, writeId); - } - return newParams; - } - - @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName, - List colNames) throws MetaException, NoSuchObjectException { - return getTableColumnStatistics(catName, dbName, tblName, colNames, null); + return rawStore.updateTableColumnStatistics(colStats, validWriteIds, writeId); } @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName, @@ -2020,14 +1681,23 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { + ValidWriteIdList writeIdsToRead = validWriteIds!=null?new ValidReaderWriteIdList(validWriteIds):null; + if (validWriteIds==null || !shouldCacheTable(catName, dbName, tblName)) { return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, validWriteIds); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (table == null) { - // The table is not yet loaded in cache + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, validWriteIds); } + + if (!isTransactionalTable(table)) { + return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, validWriteIds); + } + + if (cacheHit!=null) cacheHit.inc(); ColumnStatistics columnStatistics = sharedCache.getTableColStatsFromCache(catName, dbName, tblName, colNames, validWriteIds, areTxnStatsSupported); if (columnStatistics == null) { @@ -2040,50 +1710,40 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt @Override public boolean deleteTableColumnStatistics(String catName, String dbName, String tblName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.deleteTableColumnStatistics(catName, dbName, tblName, colName); - // in case of event based cache update, cache is updated during commit txn - if (succ && !canUseEvents) { - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; - } - sharedCache.removeTableColStatsFromCache(catName, dbName, tblName, colName); - } - return succ; + return rawStore.deleteTableColumnStatistics(catName, dbName, tblName, colName); } @Override public Map updatePartitionColumnStatistics(ColumnStatistics colStats, List partVals, String validWriteIds, long writeId) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - Map newParams = - rawStore.updatePartitionColumnStatistics(colStats, partVals, validWriteIds, writeId); - // in case of event based cache update, cache is updated during commit txn - if (newParams != null && !canUseEvents) { - String catName = colStats.getStatsDesc().isSetCatName() ? normalizeIdentifier( - colStats.getStatsDesc().getCatName()) : DEFAULT_CATALOG_NAME; - String dbName = normalizeIdentifier(colStats.getStatsDesc().getDbName()); - String tblName = normalizeIdentifier(colStats.getStatsDesc().getTableName()); - if (!shouldCacheTable(catName, dbName, tblName)) { - return newParams; - } - Partition part = getPartition(catName, dbName, tblName, partVals); - part.setParameters(newParams); - sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, part); - sharedCache.updatePartitionColStatsInCache(catName, dbName, tblName, partVals, colStats.getStatsObj()); - } - return newParams; - } - - @Override public List getPartitionColumnStatistics(String catName, String dbName, String tblName, - List partNames, List colNames) throws MetaException, NoSuchObjectException { - return getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, null); + return rawStore.updatePartitionColumnStatistics(colStats, partVals, validWriteIds, writeId); } @Override public List getPartitionColumnStatistics(String catName, String dbName, String tblName, List partNames, List colNames, String writeIdList) throws MetaException, NoSuchObjectException { + catName = StringUtils.normalizeIdentifier(catName); + dbName = StringUtils.normalizeIdentifier(dbName); + tblName = StringUtils.normalizeIdentifier(tblName); + ValidWriteIdList writeIdsToRead = writeIdList!=null?new ValidReaderWriteIdList(writeIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, writeIdList); + } + + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + + if (table == null) { + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, writeIdList); + } + + if (!isTransactionalTable(table)) { + return rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, writeIdList); + } + + if (cacheHit!=null) cacheHit.inc(); + // If writeIdList is not null, that means stats are requested within a txn context. So set stats compliant to false, // if areTxnStatsSupported is false or the write id which has updated the stats in not compatible with writeIdList. // This is done within table lock as the number of partitions may be more than one and we need a consistent view @@ -2100,23 +1760,7 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt @Override public boolean deletePartitionColumnStatistics(String catName, String dbName, String tblName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.deletePartitionColumnStatistics(catName, dbName, tblName, partName, partVals, colName); - // in case of event based cache update, cache is updated during commit txn. - if (succ && !canUseEvents) { - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; - } - sharedCache.removePartitionColStatsFromCache(catName, dbName, tblName, partVals, colName); - } - return succ; - } - - @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, - List colNames) throws MetaException, NoSuchObjectException { - return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, null); + return rawStore.deletePartitionColumnStatistics(catName, dbName, tblName, partName, partVals, colName); } @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, @@ -2125,19 +1769,27 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); + ValidWriteIdList writeIdsToRead = writeIdList!=null?new ValidReaderWriteIdList(writeIdList):null; // TODO: we currently cannot do transactional checks for stats here // (incl. due to lack of sync w.r.t. the below rawStore call). // In case the cache is updated using events, aggregate is calculated locally and thus can be read from cache. - if (!shouldCacheTable(catName, dbName, tblName) || (writeIdList != null && !canUseEvents)) { - return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, writeIdList); + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName) || writeIdList != null) { + return rawStore.get_aggr_stats_for( + catName, dbName, tblName, partNames, colNames, writeIdList); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (table == null) { - // The table is not yet loaded in cache + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, writeIdList); + } + + if (!isTransactionalTable(table)) { return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, writeIdList); } - List allPartNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); + List allPartNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1, writeIdList); StatsType type = StatsType.PARTIAL; if (partNames.size() == allPartNames.size()) { colStats = sharedCache.getAggrStatsFromCache(catName, dbName, tblName, colNames, StatsType.ALL); @@ -2159,7 +1811,9 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt LOG.debug("Didn't find aggr stats in cache. Merging them. tblName= {}, parts= {}, cols= {}", tblName, partNames, colNames); MergedColumnStatsForPartitions mergedColStats = - mergeColStatsForPartitions(catName, dbName, tblName, partNames, colNames, sharedCache, type, writeIdList); + mergeColStatsForPartitions(catName, dbName, tblName, partNames, colNames, sharedCache, type, writeIdList, + MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION), + MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER)); if (mergedColStats == null) { LOG.info("Aggregate stats of partition " + TableName.getQualified(catName, dbName, tblName) + "." + partNames + " for columns " + colNames + " is not present in cache. Getting it from raw store"); @@ -2168,12 +1822,10 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt return new AggrStats(mergedColStats.getColStats(), mergedColStats.getPartsFound()); } - private MergedColumnStatsForPartitions mergeColStatsForPartitions(String catName, String dbName, String tblName, - List partNames, List colNames, SharedCache sharedCache, StatsType type, String writeIdList) + static MergedColumnStatsForPartitions mergeColStatsForPartitions(String catName, String dbName, String tblName, + List partNames, List colNames, SharedCache sharedCache, StatsType type, String writeIdList, + boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException { - final boolean useDensityFunctionForNDVEstimation = - MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION); - final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER); Map> colStatsMap = new HashMap<>(); long partsFound = partNames.size(); Map, Long> partNameToWriteId = writeIdList != null ? new HashMap<>() : null; @@ -2234,21 +1886,19 @@ private MergedColumnStatsForPartitions mergeColStatsForPartitions(String catName .aggrPartitionStats(colStatsMap, partNames, partsFound == partNames.size(), useDensityFunctionForNDVEstimation, ndvTuner); - if (canUseEvents) { - if (type == StatsType.ALL) { - sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), - new AggrStats(colAggrStats, partsFound), null, partNameToWriteId); - } else if (type == StatsType.ALLBUTDEFAULT) { - sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), null, - new AggrStats(colAggrStats, partsFound), partNameToWriteId); - } + if (type == StatsType.ALL) { + sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), + new AggrStats(colAggrStats, partsFound), null, partNameToWriteId); + } else if (type == StatsType.ALLBUTDEFAULT) { + sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), null, + new AggrStats(colAggrStats, partsFound), partNameToWriteId); } return new MergedColumnStatsForPartitions(colAggrStats, partsFound); } - class MergedColumnStatsForPartitions { + static class MergedColumnStatsForPartitions { List colStats = new ArrayList(); long partsFound; @@ -2496,23 +2146,8 @@ long getPartsFound() { List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints) throws InvalidObjectException, MetaException { - // TODO constraintCache - List constraintNames = rawStore - .createTableWithConstraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, - defaultConstraints, checkConstraints); - // in case of event based cache update, cache is updated during commit. - if (canUseEvents) { - return constraintNames; - } - String dbName = normalizeIdentifier(tbl.getDbName()); - String tblName = normalizeIdentifier(tbl.getTableName()); - String catName = tbl.isSetCatName() ? normalizeIdentifier(tbl.getCatName()) : DEFAULT_CATALOG_NAME; - if (!shouldCacheTable(catName, dbName, tblName)) { - return constraintNames; - } - sharedCache.addTableToCache(StringUtils.normalizeIdentifier(tbl.getCatName()), - StringUtils.normalizeIdentifier(tbl.getDbName()), StringUtils.normalizeIdentifier(tbl.getTableName()), tbl); - return constraintNames; + return rawStore.createTableWithConstraints(tbl, primaryKeys, + foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); } @Override public void dropConstraint(String catName, String dbName, String tableName, String constraintName, @@ -2799,11 +2434,6 @@ static boolean isBlacklistWhitelistEmpty(Configuration conf) { .isEmpty(); } - @VisibleForTesting void resetCatalogCache() { - sharedCache.resetCatalogCache(); - setCachePrewarmedState(false); - } - @Override public void addRuntimeStat(RuntimeStat stat) throws MetaException { rawStore.addRuntimeStat(stat); } @@ -2828,4 +2458,22 @@ static boolean isBlacklistWhitelistEmpty(Configuration conf) { throws MetaException, NoSuchObjectException { return rawStore.getPartitionColsWithStats(catName, dbName, tableName); } + + public static boolean isTransactionalTable(org.apache.hadoop.hive.metastore.api.Table table) { + return table != null && table.getParameters() != null && + isTablePropertyTransactional(table.getParameters()); + } + + public static boolean isTablePropertyTransactional(Map parameters) { + String resultStr = parameters.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL); + if (resultStr == null) { + resultStr = parameters.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL.toUpperCase()); + } + return resultStr != null && resultStr.equalsIgnoreCase("true"); + } + + public static ValidWriteIdList newTableWriteIds(String dbName, String tableName) { + String fullTableName = TableName.getDbTable(dbName, tableName); + return new ValidReaderWriteIdList(fullTableName, new long[]{1}, new BitSet(), 1); + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java index 45b1b0d0bf..13aa1c5998 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java @@ -21,7 +21,6 @@ import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; -import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -51,26 +50,23 @@ import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.StatObjectConverter; -import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; import org.apache.hadoop.hive.metastore.utils.StringUtils; import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator; import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator.ObjectEstimator; +import org.apache.hive.common.util.TxnIdUtils; import org.eclipse.jetty.util.ConcurrentHashSet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -82,16 +78,6 @@ public class SharedCache { private static ReentrantReadWriteLock cacheLock = new ReentrantReadWriteLock(true); private static final long MAX_DEFAULT_CACHE_SIZE = 1024 * 1024; - private boolean isCatalogCachePrewarmed = false; - private Map catalogCache = new TreeMap<>(); - private HashSet catalogsDeletedDuringPrewarm = new HashSet<>(); - private AtomicBoolean isCatalogCacheDirty = new AtomicBoolean(false); - - // For caching Database objects. Key is database name - private Map databaseCache = new TreeMap<>(); - private boolean isDatabaseCachePrewarmed = false; - private HashSet databasesDeletedDuringPrewarm = new HashSet<>(); - private AtomicBoolean isDatabaseCacheDirty = new AtomicBoolean(false); // For caching TableWrapper objects. Key is aggregate of database name and table name private Cache tableCache = null; @@ -195,11 +181,11 @@ public int weigh(String key, TableWrapper value) { }).removalListener(new RemovalListener() { @Override public void onRemoval(RemovalNotification notification) { - LOG.debug("Eviction happened for table " + notification.getKey()); - LOG.debug("current table cache contains " + tableCache.size() + "entries"); TableWrapper tblWrapper = notification.getValue(); RemovalCause cause = notification.getCause(); - if (cause.equals(RemovalCause.COLLECTED) || cause.equals(RemovalCause.EXPIRED)) { + if (cause.equals(RemovalCause.COLLECTED) || cause.equals(RemovalCause.EXPIRED) || cause.equals(RemovalCause.SIZE)) { + LOG.debug("Eviction happened for table " + notification.getKey()); + LOG.debug("current table cache contains " + tableCache.size() + "entries"); byte[] sdHash = tblWrapper.getSdHash(); if (sdHash != null) { decrSd(sdHash); @@ -286,12 +272,16 @@ public int getObjectSize(Class clazz, Object obj) { private Map> aggrColStatsCache = new ConcurrentHashMap>(); private AtomicBoolean isAggrPartitionColStatsCacheDirty = new AtomicBoolean(false); + private ValidWriteIdList writeIds; + private boolean valid = false; - TableWrapper(Table t, byte[] sdHash, String location, Map parameters) { + TableWrapper(Table t, byte[] sdHash, String location, Map parameters, ValidWriteIdList writeIds) { this.t = t; this.sdHash = sdHash; this.location = location; this.parameters = parameters; + this.writeIds = writeIds; + this.valid = false; this.tableColStatsCacheSize = 0; this.partitionCacheSize = 0; this.partitionColStatsCacheSize = 0; @@ -580,12 +570,11 @@ public void alterPartitionAndStats(List partVals, SharedCache sharedCach Map parameters, List colStatsObjs) { try { tableLock.writeLock().lock(); - PartitionWrapper partitionWrapper = partitionCache.get(CacheUtils.buildPartitionCacheKey(partVals)); - if (partitionWrapper == null) { + Partition newPart = getPartition(partVals, sharedCache); + if (newPart == null) { LOG.info("Partition " + partVals + " is missing from cache. Cannot update the partition stats in cache."); return; } - Partition newPart = partitionWrapper.getPartition(); newPart.setParameters(parameters); newPart.setWriteId(writeId); removePartition(partVals, sharedCache); @@ -609,35 +598,6 @@ public void alterPartitions(List> partValsList, List new } } - public void refreshPartitions(List partitions, SharedCache sharedCache) { - Map newPartitionCache = new HashMap(); - try { - tableLock.writeLock().lock(); - int size = 0; - for (Partition part : partitions) { - if (isPartitionCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping partition cache update for table: " + getTable().getTableName() - + "; the partition list we have is dirty."); - return; - } - String key = CacheUtils.buildPartitionCacheKey(part.getValues()); - PartitionWrapper wrapper = partitionCache.get(key); - if (wrapper != null) { - if (wrapper.getSdHash() != null) { - sharedCache.decrSd(wrapper.getSdHash()); - } - } - wrapper = makePartitionWrapper(part, sharedCache); - newPartitionCache.put(key, wrapper); - size += getObjectSize(PartitionWrapper.class, wrapper); - } - partitionCache = newPartitionCache; - updateMemberSize(MemberName.PARTITION_CACHE, size, SizeMode.Snapshot); - } finally { - tableLock.writeLock().unlock(); - } - } - public boolean updateTableColStats(List colStatsForTable) { try { tableLock.writeLock().lock(); @@ -665,29 +625,6 @@ public boolean updateTableColStats(List colStatsForTable) { } } - public void refreshTableColStats(List colStatsForTable) { - Map newTableColStatsCache = new HashMap(); - try { - tableLock.writeLock().lock(); - int statsSize = 0; - for (ColumnStatisticsObj colStatObj : colStatsForTable) { - if (isTableColStatsCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping table col stats cache update for table: " + getTable().getTableName() - + "; the table col stats list we have is dirty."); - return; - } - String key = colStatObj.getColName(); - // TODO: get rid of deepCopy after making sure callers don't use references - newTableColStatsCache.put(key, colStatObj.deepCopy()); - statsSize += getObjectSize(ColumnStatisticsObj.class, colStatObj); - } - tableColStatsCache = newTableColStatsCache; - updateMemberSize(MemberName.TABLE_COL_STATS_CACHE, statsSize, SizeMode.Snapshot); - } finally { - tableLock.writeLock().unlock(); - } - } - public ColumnStatistics getCachedTableColStats(ColumnStatisticsDesc csd, List colNames, String validWriteIds, boolean areTxnStatsSupported) throws MetaException { List colStatObjs = new ArrayList(); @@ -882,43 +819,6 @@ public void removeAllPartitionColStats() { } } - public void refreshPartitionColStats(List partitionColStats) { - Map newPartitionColStatsCache = new HashMap(); - try { - tableLock.writeLock().lock(); - String tableName = StringUtils.normalizeIdentifier(getTable().getTableName()); - int statsSize = 0; - for (ColumnStatistics cs : partitionColStats) { - if (isPartitionColStatsCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping partition column stats cache update for table: " + getTable().getTableName() - + "; the partition column stats list we have is dirty"); - return; - } - List partVal; - try { - partVal = Warehouse.makeValsFromName(cs.getStatsDesc().getPartName(), null); - List colStatsObjs = cs.getStatsObj(); - for (ColumnStatisticsObj colStatObj : colStatsObjs) { - if (isPartitionColStatsCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping partition column stats cache update for table: " + getTable().getTableName() - + "; the partition column list we have is dirty"); - return; - } - String key = CacheUtils.buildPartitonColStatsCacheKey(partVal, colStatObj.getColName()); - newPartitionColStatsCache.put(key, colStatObj.deepCopy()); - statsSize += getObjectSize(ColumnStatisticsObj.class, colStatObj); - } - } catch (MetaException e) { - LOG.debug("Unable to cache partition column stats for table: " + tableName, e); - } - } - partitionColStatsCache = newPartitionColStatsCache; - updateMemberSize(MemberName.PARTITION_COL_STATS_CACHE, statsSize, SizeMode.Snapshot); - } finally { - tableLock.writeLock().unlock(); - } - } - public List getAggrPartitionColStats(List colNames, StatsType statsType) { List colStats = new ArrayList(); try { @@ -1082,6 +982,18 @@ private PartitionWrapper makePartitionWrapper(Partition part, SharedCache shared } return wrapper; } + + ValidWriteIdList getWriteIds() { + return writeIds; + } + + boolean isValid() { + return valid; + } + + void setValid(boolean valid) { + this.valid = valid; + } } static class PartitionWrapper { @@ -1150,247 +1062,10 @@ public ColumnStatisticsObj getColumnStatisticsObj() { } } - public void populateCatalogsInCache(Collection catalogs) { - for (Catalog cat : catalogs) { - Catalog catCopy = cat.deepCopy(); - // ObjectStore also stores db name in lowercase - catCopy.setName(catCopy.getName().toLowerCase()); - try { - cacheLock.writeLock().lock(); - // Since we allow write operations on cache while prewarm is happening: - // 1. Don't add databases that were deleted while we were preparing list for prewarm - // 2. Skip overwriting exisiting db object - // (which is present because it was added after prewarm started) - if (catalogsDeletedDuringPrewarm.contains(catCopy.getName())) { - continue; - } - catalogCache.putIfAbsent(catCopy.getName(), catCopy); - catalogsDeletedDuringPrewarm.clear(); - isCatalogCachePrewarmed = true; - } finally { - cacheLock.writeLock().unlock(); - } - } - } - - public Catalog getCatalogFromCache(String name) { - Catalog cat = null; - try { - cacheLock.readLock().lock(); - if (catalogCache.get(name) != null) { - cat = catalogCache.get(name).deepCopy(); - } - } finally { - cacheLock.readLock().unlock(); - } - return cat; - } - - public void addCatalogToCache(Catalog cat) { - try { - cacheLock.writeLock().lock(); - Catalog catCopy = cat.deepCopy(); - // ObjectStore also stores db name in lowercase - catCopy.setName(catCopy.getName().toLowerCase()); - catalogCache.put(cat.getName(), catCopy); - isCatalogCacheDirty.set(true); - } finally { - cacheLock.writeLock().unlock(); - } - } - - public void alterCatalogInCache(String catName, Catalog newCat) { - try { - cacheLock.writeLock().lock(); - removeCatalogFromCache(catName); - addCatalogToCache(newCat.deepCopy()); - } finally { - cacheLock.writeLock().unlock(); - } - } - - public void removeCatalogFromCache(String name) { - name = normalizeIdentifier(name); - try { - cacheLock.writeLock().lock(); - // If db cache is not yet prewarmed, add this to a set which the prewarm thread can check - // so that the prewarm thread does not add it back - if (!isCatalogCachePrewarmed) { - catalogsDeletedDuringPrewarm.add(name); - } - if (catalogCache.remove(name) != null) { - isCatalogCacheDirty.set(true); - } - } finally { - cacheLock.writeLock().unlock(); - } - } - - public List listCachedCatalogs() { - try { - cacheLock.readLock().lock(); - return new ArrayList<>(catalogCache.keySet()); - } finally { - cacheLock.readLock().unlock(); - } - } - - public boolean isCatalogCachePrewarmed() { - return isCatalogCachePrewarmed; - } - - public Database getDatabaseFromCache(String catName, String name) { - Database db = null; - try { - cacheLock.readLock().lock(); - String key = CacheUtils.buildDbKey(catName, name); - if (databaseCache.get(key) != null) { - db = databaseCache.get(key).deepCopy(); - } - } finally { - cacheLock.readLock().unlock(); - } - return db; - } - - public void populateDatabasesInCache(List databases) { - for (Database db : databases) { - Database dbCopy = db.deepCopy(); - // ObjectStore also stores db name in lowercase - dbCopy.setName(dbCopy.getName().toLowerCase()); - try { - cacheLock.writeLock().lock(); - // Since we allow write operations on cache while prewarm is happening: - // 1. Don't add databases that were deleted while we were preparing list for prewarm - // 2. Skip overwriting exisiting db object - // (which is present because it was added after prewarm started) - String key = CacheUtils.buildDbKey(dbCopy.getCatalogName().toLowerCase(), dbCopy.getName().toLowerCase()); - if (databasesDeletedDuringPrewarm.contains(key)) { - continue; - } - databaseCache.putIfAbsent(key, dbCopy); - databasesDeletedDuringPrewarm.clear(); - isDatabaseCachePrewarmed = true; - } finally { - cacheLock.writeLock().unlock(); - } - } - } - - public boolean isDatabaseCachePrewarmed() { - return isDatabaseCachePrewarmed; - } - - public void addDatabaseToCache(Database db) { - try { - cacheLock.writeLock().lock(); - Database dbCopy = db.deepCopy(); - // ObjectStore also stores db name in lowercase - dbCopy.setName(dbCopy.getName().toLowerCase()); - dbCopy.setCatalogName(dbCopy.getCatalogName().toLowerCase()); - databaseCache.put(CacheUtils.buildDbKey(dbCopy.getCatalogName(), dbCopy.getName()), dbCopy); - isDatabaseCacheDirty.set(true); - } finally { - cacheLock.writeLock().unlock(); - } - } - - public void removeDatabaseFromCache(String catName, String dbName) { - try { - cacheLock.writeLock().lock(); - // If db cache is not yet prewarmed, add this to a set which the prewarm thread can check - // so that the prewarm thread does not add it back - String key = CacheUtils.buildDbKey(catName, dbName); - if (!isDatabaseCachePrewarmed) { - databasesDeletedDuringPrewarm.add(key); - } - if (databaseCache.remove(key) != null) { - isDatabaseCacheDirty.set(true); - } - } finally { - cacheLock.writeLock().unlock(); - } - } - - public List listCachedDatabases(String catName) { - List results = new ArrayList<>(); - try { - cacheLock.readLock().lock(); - for (String pair : databaseCache.keySet()) { - String[] n = CacheUtils.splitDbName(pair); - if (catName.equals(n[0])) { - results.add(n[1]); - } - } - } finally { - cacheLock.readLock().unlock(); - } - return results; - } - - public List listCachedDatabases(String catName, String pattern) { - List results = new ArrayList<>(); - try { - cacheLock.readLock().lock(); - for (String pair : databaseCache.keySet()) { - String[] n = CacheUtils.splitDbName(pair); - if (catName.equals(n[0])) { - n[1] = StringUtils.normalizeIdentifier(n[1]); - if (CacheUtils.matches(n[1], pattern)) { - results.add(n[1]); - } - } - } - } finally { - cacheLock.readLock().unlock(); - } - return results; - } - - /** - * Replaces the old db object with the new one. This will add the new database to cache if it does - * not exist. - */ - public void alterDatabaseInCache(String catName, String dbName, Database newDb) { - try { - cacheLock.writeLock().lock(); - removeDatabaseFromCache(catName, dbName); - addDatabaseToCache(newDb.deepCopy()); - isDatabaseCacheDirty.set(true); - } finally { - cacheLock.writeLock().unlock(); - } - } - - public boolean refreshDatabasesInCache(List databases) { - if (isDatabaseCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping database cache update; the database list we have is dirty."); - return false; - } - try { - cacheLock.writeLock().lock(); - databaseCache.clear(); - for (Database db : databases) { - addDatabaseToCache(db); - } - return true; - } finally { - cacheLock.writeLock().unlock(); - } - } - - public int getCachedDatabaseCount() { - try { - cacheLock.readLock().lock(); - return databaseCache.size(); - } finally { - cacheLock.readLock().unlock(); - } - } - + // This is called during prewarm public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, List partitions, List partitionColStats, AggrStats aggrStatsAllPartitions, - AggrStats aggrStatsAllButDefaultPartition) { + AggrStats aggrStatsAllButDefaultPartition, ValidWriteIdList writeIds) { String catName = StringUtils.normalizeIdentifier(table.getCatName()); String dbName = StringUtils.normalizeIdentifier(table.getDbName()); String tableName = StringUtils.normalizeIdentifier(table.getTableName()); @@ -1399,7 +1074,7 @@ public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, if (tablesDeletedDuringPrewarm.contains(CacheUtils.buildTableKey(catName, dbName, tableName))) { return false; } - TableWrapper tblWrapper = createTableWrapper(catName, dbName, tableName, table); + TableWrapper tblWrapper = createTableWrapper(catName, dbName, tableName, table, writeIds); if (!table.isSetPartitionKeys() && (tableColStats != null)) { if (table.getPartitionKeys().isEmpty() && (tableColStats != null)) { return false; @@ -1431,6 +1106,8 @@ public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, tblWrapper.isTableColStatsCacheDirty.set(false); tblWrapper.isPartitionColStatsCacheDirty.set(false); tblWrapper.isAggrPartitionColStatsCacheDirty.set(false); + tblWrapper.writeIds = writeIds; + tblWrapper.valid = true; try { cacheLock.writeLock().lock(); // 2. Skip overwriting exisiting table object @@ -1452,13 +1129,20 @@ public void completeTableCachePrewarm() { } } - public Table getTableFromCache(String catName, String dbName, String tableName) { + public Table getTableFromCache(String catName, String dbName, String tableName, ValidWriteIdList validWriteIdList) { Table t = null; try { cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { - t = CacheUtils.assemble(tblWrapper, this); + // If the request writeIds is newer than the cached version + if (validWriteIdList == null || (tblWrapper.isValid() && tblWrapper.getWriteIds() != null && TxnIdUtils.compare(tblWrapper.getWriteIds(), validWriteIdList) >= 0)) { + t = CacheUtils.assemble(tblWrapper, this); + LOG.debug("read " + catName + "." + dbName + "." + tableName + " from cache"); + } else { + LOG.debug("read " + catName + "." + dbName + "." + tableName + " from db as requested writeId " + validWriteIdList + + " is newer than cached writeId " + tblWrapper.getWriteIds()); + } } } finally { cacheLock.readLock().unlock(); @@ -1466,10 +1150,10 @@ public Table getTableFromCache(String catName, String dbName, String tableName) return t; } - public TableWrapper addTableToCache(String catName, String dbName, String tblName, Table tbl) { + public TableWrapper addTableToCache(String catName, String dbName, String tblName, Table tbl, ValidWriteIdList writeIds) { try { cacheLock.writeLock().lock(); - TableWrapper wrapper = createTableWrapper(catName, dbName, tblName, tbl); + TableWrapper wrapper = createTableWrapper(catName, dbName, tblName, tbl, writeIds); tableCache.put(CacheUtils.buildTableKey(catName, dbName, tblName), wrapper); isTableCacheDirty.set(true); return wrapper; @@ -1478,7 +1162,23 @@ public TableWrapper addTableToCache(String catName, String dbName, String tblNam } } - private TableWrapper createTableWrapper(String catName, String dbName, String tblName, Table tbl) { + public void commitWriteId(String catName, String dbName, String tblName, long writeId) { + try { + cacheLock.readLock().lock(); + TableWrapper tblWrapper = + tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); + if (tblWrapper != null) { + if (tblWrapper.getWriteIds() != null) { + tblWrapper.getWriteIds().commitWriteId(writeId); + } + tblWrapper.setValid(true); + } + } finally { + cacheLock.readLock().unlock(); + } + } + + private TableWrapper createTableWrapper(String catName, String dbName, String tblName, Table tbl, ValidWriteIdList writeIds) { TableWrapper wrapper; Table tblCopy = tbl.deepCopy(); tblCopy.setCatName(normalizeIdentifier(catName)); @@ -1494,9 +1194,9 @@ private TableWrapper createTableWrapper(String catName, String dbName, String tb StorageDescriptor sd = tbl.getSd(); increSd(sd, sdHash); tblCopy.setSd(null); - wrapper = new TableWrapper(tblCopy, sdHash, sd.getLocation(), sd.getParameters()); + wrapper = new TableWrapper(tblCopy, sdHash, sd.getLocation(), sd.getParameters(), writeIds); } else { - wrapper = new TableWrapper(tblCopy, null, null, null); + wrapper = new TableWrapper(tblCopy, null, null, null, writeIds); } return wrapper; } @@ -1532,6 +1232,7 @@ public void alterTableInCache(String catName, String dbName, String tblName, Tab cacheLock.writeLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.updateTableObj(newTable, this); String newDbName = StringUtils.normalizeIdentifier(newTable.getDbName()); String newTblName = StringUtils.normalizeIdentifier(newTable.getTableName()); @@ -1552,6 +1253,7 @@ public void alterTableAndStatsInCache(String catName, String dbName, String tblN LOG.info("Table " + tblName + " is missing from cache. Cannot update table stats in cache"); return; } + tblWrapper.valid = false; Table newTable = tblWrapper.getTable(); newTable.setWriteId(writeId); newTable.setParameters(newParams); @@ -1596,75 +1298,6 @@ public void alterTableAndStatsInCache(String catName, String dbName, String tblN return tableNames; } - public List listCachedTableNames(String catName, String dbName, String pattern, int maxTables) { - List tableNames = new ArrayList<>(); - try { - cacheLock.readLock().lock(); - int count = 0; - for (TableWrapper wrapper : tableCache.asMap().values()) { - if (wrapper.sameDatabase(catName, dbName) && CacheUtils.matches(wrapper.getTable().getTableName(), pattern) && ( - maxTables == -1 || count < maxTables)) { - tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName())); - count++; - } - } - } finally { - cacheLock.readLock().unlock(); - } - return tableNames; - } - - public List listCachedTableNames(String catName, String dbName, String pattern, TableType tableType, - int limit) { - List tableNames = new ArrayList<>(); - try { - cacheLock.readLock().lock(); - int count = 0; - for (TableWrapper wrapper : tableCache.asMap().values()) { - if (wrapper.sameDatabase(catName, dbName) && CacheUtils.matches(wrapper.getTable().getTableName(), pattern) - && wrapper.getTable().getTableType().equals(tableType.toString()) && (limit == -1 || count < limit)) { - tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName())); - count++; - } - } - } finally { - cacheLock.readLock().unlock(); - } - return tableNames; - } - - public boolean refreshTablesInCache(String catName, String dbName, List
tables) { - if (isTableCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping table cache update; the table list we have is dirty."); - return false; - } - Map newCacheForDB = new TreeMap<>(); - for (Table tbl : tables) { - String tblName = StringUtils.normalizeIdentifier(tbl.getTableName()); - TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); - if (tblWrapper != null) { - tblWrapper.updateTableObj(tbl, this); - } else { - tblWrapper = createTableWrapper(catName, dbName, tblName, tbl); - } - newCacheForDB.put(CacheUtils.buildTableKey(catName, dbName, tblName), tblWrapper); - } - try { - cacheLock.writeLock().lock(); - Iterator> entryIterator = tableCache.asMap().entrySet().iterator(); - while (entryIterator.hasNext()) { - String key = entryIterator.next().getKey(); - if (key.startsWith(CacheUtils.buildDbKeyWithDelimiterSuffix(catName, dbName))) { - entryIterator.remove(); - } - } - tableCache.putAll(newCacheForDB); - return true; - } finally { - cacheLock.writeLock().unlock(); - } - } - public ColumnStatistics getTableColStatsFromCache(String catName, String dbName, String tblName, List colNames, String validWriteIds, boolean areTxnStatsSupported) throws MetaException { try { @@ -1686,6 +1319,7 @@ public void removeTableColStatsFromCache(String catName, String dbName, String t cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.removeTableColStats(colName); } else { LOG.info("Table " + tblName + " is missing from cache."); @@ -1695,26 +1329,13 @@ public void removeTableColStatsFromCache(String catName, String dbName, String t } } - public void removeAllTableColStatsFromCache(String catName, String dbName, String tblName) { - try { - cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); - if (tblWrapper != null) { - tblWrapper.removeAllTableColStats(); - } else { - LOG.info("Table " + tblName + " is missing from cache."); - } - } finally { - cacheLock.readLock().unlock(); - } - } - public void updateTableColStatsInCache(String catName, String dbName, String tableName, List colStatsForTable) { try { cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.updateTableColStats(colStatsForTable); } else { LOG.info("Table " + tableName + " is missing from cache."); @@ -1724,21 +1345,6 @@ public void updateTableColStatsInCache(String catName, String dbName, String tab } } - public void refreshTableColStatsInCache(String catName, String dbName, String tableName, - List colStatsForTable) { - try { - cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tableName)); - if (tblWrapper != null) { - tblWrapper.refreshTableColStats(colStatsForTable); - } else { - LOG.info("Table " + tableName + " is missing from cache."); - } - } finally { - cacheLock.readLock().unlock(); - } - } - public int getCachedTableCount() { try { cacheLock.readLock().lock(); @@ -1748,36 +1354,13 @@ public int getCachedTableCount() { } } - public List getTableMeta(String catName, String dbNames, String tableNames, List tableTypes) { - List tableMetas = new ArrayList<>(); - try { - cacheLock.readLock().lock(); - for (String dbName : listCachedDatabases(catName)) { - if (CacheUtils.matches(dbName, dbNames)) { - for (Table table : listCachedTables(catName, dbName)) { - if (CacheUtils.matches(table.getTableName(), tableNames)) { - if (tableTypes == null || tableTypes.contains(table.getTableType())) { - TableMeta metaData = new TableMeta(dbName, table.getTableName(), table.getTableType()); - metaData.setCatName(catName); - metaData.setComments(table.getParameters().get("comment")); - tableMetas.add(metaData); - } - } - } - } - } - } finally { - cacheLock.readLock().unlock(); - } - return tableMetas; - } - public void addPartitionToCache(String catName, String dbName, String tblName, Partition part) { try { cacheLock.readLock().lock(); String tblKey = CacheUtils.buildTableKey(catName, dbName, tblName); TableWrapper tblWrapper = tableCache.getIfPresent(tblKey); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.cachePartition(part, this); } } finally { @@ -1790,6 +1373,7 @@ public void addPartitionsToCache(String catName, String dbName, String tblName, cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.cachePartitions(parts, this, false); } } finally { @@ -1831,6 +1415,7 @@ public Partition removePartitionFromCache(String catName, String dbName, String cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { + tblWrapper.valid = false; part = tblWrapper.removePartition(partVals, this); } else { LOG.warn("This is abnormal"); @@ -1846,6 +1431,7 @@ public void removePartitionsFromCache(String catName, String dbName, String tblN cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.removePartitions(partVals, this); } } finally { @@ -1873,6 +1459,7 @@ public void alterPartitionInCache(String catName, String dbName, String tblName, cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.alterPartition(partVals, newPart, this); } } finally { @@ -1886,6 +1473,7 @@ public void alterPartitionAndStatsInCache(String catName, String dbName, String cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.alterPartitionAndStats(partVals, this, writeId, parameters, colStatsObjs); } } finally { @@ -1899,6 +1487,7 @@ public void alterPartitionsInCache(String catName, String dbName, String tblName cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.alterPartitions(partValsList, newParts, this); } } finally { @@ -1906,24 +1495,13 @@ public void alterPartitionsInCache(String catName, String dbName, String tblName } } - public void refreshPartitionsInCache(String catName, String dbName, String tblName, List partitions) { - try { - cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); - if (tblWrapper != null) { - tblWrapper.refreshPartitions(partitions, this); - } - } finally { - cacheLock.readLock().unlock(); - } - } - public void removePartitionColStatsFromCache(String catName, String dbName, String tblName, List partVals, String colName) { try { cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.removePartitionColStats(partVals, colName); } } finally { @@ -1931,24 +1509,13 @@ public void removePartitionColStatsFromCache(String catName, String dbName, Stri } } - public void removeAllPartitionColStatsFromCache(String catName, String dbName, String tblName) { - try { - cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); - if (tblWrapper != null) { - tblWrapper.removeAllPartitionColStats(); - } - } finally { - cacheLock.readLock().unlock(); - } - } - public void updatePartitionColStatsInCache(String catName, String dbName, String tableName, List partVals, List colStatsObjs) { try { cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.updatePartitionColStats(partVals, colStatsObjs); } } finally { @@ -1988,19 +1555,6 @@ public ColumStatsWithWriteId getPartitionColStatsFromCache(String catName, Strin return colStatObjs; } - public void refreshPartitionColStatsInCache(String catName, String dbName, String tblName, - List partitionColStats) { - try { - cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); - if (tblWrapper != null) { - tblWrapper.refreshPartitionColStats(partitionColStats); - } - } finally { - cacheLock.readLock().unlock(); - } - } - public List getAggrStatsFromCache(String catName, String dbName, String tblName, List colNames, StatsType statsType) { try { @@ -2069,11 +1623,6 @@ public synchronized StorageDescriptor getSdFromCache(byte[] sdHash) { return sdWrapper.getSd(); } - @VisibleForTesting - Map getDatabaseCache() { - return databaseCache; - } - @VisibleForTesting void clearTableCache() { tableCache.invalidateAll(); @@ -2084,19 +1633,7 @@ void clearTableCache() { return sdCache; } - /** - * This resets the contents of the cataog cache so that we can re-fill it in another test. - */ - void resetCatalogCache() { - isCatalogCachePrewarmed = false; - catalogCache.clear(); - catalogsDeletedDuringPrewarm.clear(); - isCatalogCacheDirty.set(false); - } - void clearDirtyFlags() { - isCatalogCacheDirty.set(false); - isDatabaseCacheDirty.set(false); isTableCacheDirty.set(false); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java index 60ad7db60e..a0c2bef4f0 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java @@ -71,7 +71,7 @@ public InsertEvent(String catName, String db, String table, List partVal this.tableObj = handler.get_table_req(req).getTable(); if (partVals != null) { this.ptnObj = handler.get_partition(MetaStoreUtils.prependNotNullCatToDbName(catName, db), - table, partVals); + table, partVals, null); } else { this.ptnObj = null; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdatePartitionColumnStatEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdatePartitionColumnStatEvent.java index ba61a08173..0e57835360 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdatePartitionColumnStatEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdatePartitionColumnStatEvent.java @@ -39,6 +39,7 @@ private Map parameters; private List partVals; private Table tableObj; + private String writeIds; /** * @param statsObj Columns statistics Info. @@ -49,13 +50,14 @@ * @param handler handler that is firing the event */ public UpdatePartitionColumnStatEvent(ColumnStatistics statsObj, List partVals, Map parameters, - Table tableObj, long writeId, IHMSHandler handler) { + Table tableObj, long writeId, String writeIds, IHMSHandler handler) { super(true, handler); this.partColStats = statsObj; this.writeId = writeId; this.parameters = parameters; this.partVals = partVals; this.tableObj = tableObj; + this.writeIds = writeIds; } /** @@ -64,13 +66,14 @@ public UpdatePartitionColumnStatEvent(ColumnStatistics statsObj, List pa * @param handler handler that is firing the event */ public UpdatePartitionColumnStatEvent(ColumnStatistics statsObj, List partVals, - Table tableObj, IHMSHandler handler) { + Table tableObj, String writeIds, IHMSHandler handler) { super(true, handler); this.partColStats = statsObj; this.partVals = partVals; this.writeId = 0; this.parameters = null; this.tableObj = tableObj; + this.writeIds = writeIds; } public ColumnStatistics getPartColStats() { @@ -90,4 +93,9 @@ public long getWriteId() { } public Table getTableObj() { return tableObj; } + + public String getWriteIds() + { + return writeIds; + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdateTableColumnStatEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdateTableColumnStatEvent.java index 71300abf4e..48d1206786 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdateTableColumnStatEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdateTableColumnStatEvent.java @@ -38,22 +38,25 @@ private long writeId; private Map parameters; private Table tableObj; + private String writeIds; /** * @param colStats Columns statistics Info. * @param tableObj table object * @param parameters table parameters to be updated after stats are updated. * @param writeId writeId for the query. + * @param writeIds writeIds for the query * @param handler handler that is firing the event */ public UpdateTableColumnStatEvent(ColumnStatistics colStats, Table tableObj, Map parameters, - long writeId, IHMSHandler handler) { + long writeId, String writeIds, IHMSHandler handler) { super(true, handler); this.colStats = colStats; this.writeId = writeId; this.parameters = parameters; this.tableObj = tableObj; + this.writeIds = writeIds; } /** @@ -76,6 +79,10 @@ public long getWriteId() { return writeId; } + public String getWriteIds() { + return writeIds; + } + public Map getTableParameters() { return parameters; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java index aa83da4ed5..4518d79b1c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java @@ -289,9 +289,9 @@ public AcidWriteMessage buildAcidWriteMessage(AcidWriteEvent acidWriteEvent, public JSONUpdateTableColumnStatMessage buildUpdateTableColumnStatMessage(ColumnStatistics colStats, Table tableObj, Map parameters, - long writeId) { + long writeId, String writeIds) { return new JSONUpdateTableColumnStatMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, now(), - colStats, tableObj, parameters, writeId); + colStats, tableObj, parameters, writeId, writeIds); } public JSONDeleteTableColumnStatMessage buildDeleteTableColumnStatMessage(String dbName, String colName) { @@ -300,9 +300,9 @@ public JSONDeleteTableColumnStatMessage buildDeleteTableColumnStatMessage(String public JSONUpdatePartitionColumnStatMessage buildUpdatePartitionColumnStatMessage(ColumnStatistics colStats, List partVals, Map parameters, - Table tableObj, long writeId) { + Table tableObj, long writeId, String writeIds) { return new JSONUpdatePartitionColumnStatMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, now(), colStats, partVals, - parameters, tableObj, writeId); + parameters, tableObj, writeId, writeIds); } public JSONDeletePartitionColumnStatMessage buildDeletePartitionColumnStatMessage(String dbName, String colName, diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdatePartitionColumnStatMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdatePartitionColumnStatMessage.java index e92a0dc9a3..f685bc4f8a 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdatePartitionColumnStatMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdatePartitionColumnStatMessage.java @@ -41,4 +41,6 @@ protected UpdatePartitionColumnStatMessage() { public abstract List getPartVals(); public abstract Table getTableObject() throws Exception; + + public abstract String getWriteIds(); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdateTableColumnStatMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdateTableColumnStatMessage.java index e3f049c48c..e5f7ef8100 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdateTableColumnStatMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdateTableColumnStatMessage.java @@ -38,4 +38,6 @@ protected UpdateTableColumnStatMessage() { public abstract Map getParameters(); public abstract Table getTableObject() throws Exception; + + public abstract String getWriteIds() throws Exception; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdatePartitionColumnStatMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdatePartitionColumnStatMessage.java index fd7fe00419..2e4d9de9fd 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdatePartitionColumnStatMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdatePartitionColumnStatMessage.java @@ -52,6 +52,9 @@ @JsonProperty private String tableObjJson; + @JsonProperty + private String writeIds; + /** * Default constructor, needed for Jackson. */ @@ -61,7 +64,7 @@ public JSONUpdatePartitionColumnStatMessage() { public JSONUpdatePartitionColumnStatMessage(String server, String servicePrincipal, Long timestamp, ColumnStatistics colStats, List partVals, Map parameters, - Table tableObj, long writeId) { + Table tableObj, long writeId, String writeIds) { this.timestamp = timestamp; this.server = server; this.servicePrincipal = servicePrincipal; @@ -75,6 +78,7 @@ public JSONUpdatePartitionColumnStatMessage(String server, String servicePrincip throw new IllegalArgumentException("Could not serialize JSONUpdatePartitionColumnStatMessage : ", e); } this.parameters = parameters; + this.writeIds = writeIds; } @Override @@ -126,6 +130,11 @@ public Table getTableObject() throws Exception { return (Table) MessageBuilder.getTObj(tableObjJson, Table.class); } + @Override + public String getWriteIds() { + return writeIds; + } + @Override public String toString() { try { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java index 275d204957..40636c8ad2 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java @@ -47,6 +47,9 @@ @JsonProperty private String tableObjJson; + @JsonProperty + private String writeIds; + /** * Default constructor, needed for Jackson. */ @@ -55,7 +58,7 @@ public JSONUpdateTableColumnStatMessage() { public JSONUpdateTableColumnStatMessage(String server, String servicePrincipal, Long timestamp, ColumnStatistics colStats, Table tableObj, Map parameters, - long writeId) { + long writeId, String writeIds) { this.timestamp = timestamp; this.server = server; this.servicePrincipal = servicePrincipal; @@ -68,6 +71,7 @@ public JSONUpdateTableColumnStatMessage(String server, String servicePrincipal, throw new IllegalArgumentException("Could not serialize JSONUpdateTableColumnStatMessage : ", e); } this.parameters = parameters; + this.writeIds = writeIds; } @Override @@ -114,6 +118,11 @@ public Long getWriteId() { return parameters; } + @Override + public String getWriteIds() { + return writeIds; + } + @Override public String toString() { try { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/MetricsConstants.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/MetricsConstants.java index 24c8c4cc3a..a8baf155c5 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/MetricsConstants.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/MetricsConstants.java @@ -45,4 +45,7 @@ public static final String TOTAL_DATABASES = "total_count_dbs"; public static final String TOTAL_TABLES = "total_count_tables"; public static final String TOTAL_PARTITIONS = "total_count_partitions"; + + public static final String METADATA_CACHE_HIT = "metadata_cache_hit"; + public static final String METADATA_CACHE_MISS = "metadata_cache_miss"; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index 83306bf653..2d5fc52d04 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -56,6 +56,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.ValidReadTxnList; import org.apache.hadoop.hive.common.ValidReaderWriteIdList; import org.apache.hadoop.hive.common.ValidTxnList; @@ -1380,10 +1381,24 @@ public void replTableWriteIdState(ReplTblWriteIdStateRequest rqst) throws MetaEx quoteString(dbName), quoteString(tblName)); rs = pStmt.executeQuery(); if (rs.next()) { - LOG.info("Idempotent flow: WriteId state <" + validWriteIdList + "> is already applied for the table: " - + dbName + "." + tblName); - rollbackDBConn(dbConn); - return; + // TODO: Ideally we shall fail in this case, however, currently Hive.java will generate write id + // in both regular and repl load flow. I cannot figure out a way to suppress it within repl load scope. + // Once we can suppress new write id allocation in Hive.java inside repl load, we can bring this back. + LOG.info("Remove writeId state from the table to make it idempotent flow: " + + dbName + "." + tblName); + sql = "delete from TXN_TO_WRITE_ID where t2w_database = ? and t2w_table = ?"; + closeStmt(pStmt); + pStmt = sqlGenerator.prepareStmtWithParameters(dbConn, sql, params); + LOG.debug("Going to execute delete <" + sql.replaceAll("\\?", "{}") + ">", + quoteString(dbName), quoteString(tblName)); + pStmt.executeUpdate(); + + sql = "delete from NEXT_WRITE_ID where nwi_database = ? and nwi_table = ?"; + closeStmt(pStmt); + pStmt = sqlGenerator.prepareStmtWithParameters(dbConn, sql, params); + LOG.debug("Going to execute delete <" + sql.replaceAll("\\?", "{}") + ">", + quoteString(dbName), quoteString(tblName)); + pStmt.executeUpdate(); } if (numAbortedWrites > 0) { @@ -1504,6 +1519,46 @@ private ValidTxnList getValidTxnList(Connection dbConn, String fullTableName, Lo } } + @Override + @RetrySemantics.ReadOnly + public GetTxnTableWriteIdsResponse getTxnTableWriteIds(long txnId) throws MetaException { + try { + PreparedStatement pst = null; + ResultSet rs = null; + Connection dbConn = null; + try { + /** + * This runs at READ_COMMITTED for exactly the same reason as {@link #getOpenTxnsInfo()} + */ + dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); + + List params = Arrays.asList(Long.toString(txnId)); + String s = "select t2w_database, t2w_table, t2w_writeid from TXN_TO_WRITE_ID where t2w_txnid = ?"; + pst = sqlGenerator.prepareStmtWithParameters(dbConn, s, params); + LOG.debug("Going to execute query <" + s.replaceAll("\\?", "{}") + ">", txnId); + rs = pst.executeQuery(); + List tableWriteIds = new ArrayList<>(); + if (rs.next()) { + tableWriteIds.add(new TableWriteId(TableName.getDbTable(rs.getString(1), rs.getString(2)), rs.getLong(3))); + } + return new GetTxnTableWriteIdsResponse(tableWriteIds); + } catch (SQLException e) { + LOG.debug("Going to rollback"); + rollbackDBConn(dbConn); + checkRetryable(dbConn, e, "getTxnTableWriteIds(" + txnId + ")"); + throw new MetaException("Unable to get target transaction id " + + StringUtils.stringifyException(e)); + } finally { + closeStmt(pst); + close(rs); + closeDbConn(dbConn); + unlockInternal(); + } + } catch (RetryException e) { + return getTxnTableWriteIds(txnId); + } + } + @Override @RetrySemantics.ReadOnly public GetValidWriteIdsResponse getValidWriteIds(GetValidWriteIdsRequest rqst) throws MetaException { @@ -1870,13 +1925,22 @@ public void seedWriteIdOnAcidConversion(InitializeTableWriteIdsRequest rqst) dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); handle = getMutexAPI().acquireLock(MUTEX_KEY.WriteIdAllocator.name()); - //since this is on conversion from non-acid to acid, NEXT_WRITE_ID should not have an entry - //for this table. It also has a unique index in case 'should not' is violated + + // It is unlikely to happen (unless there's tons of ddl on source table before conversion), + // but we'd better check if the next_write_id is already + // greater than the seed, if so, we shall fail immediately + String s = "delete from NEXT_WRITE_ID where nwi_database = ? and nwi_table = ?"; + List params = Arrays.asList(rqst.getDbName(), rqst.getTblName()); + pst = sqlGenerator.prepareStmtWithParameters(dbConn, s, params); + LOG.debug("Going to execute delete <" + s.replaceAll("\\?", "{}") + ">", + quoteString(rqst.getDbName()), quoteString(rqst.getTblName())); + pst.executeUpdate(); + closeStmt(pst); // First allocation of write id should add the table to the next_write_id meta table // The initial value for write id should be 1 and hence we add 1 with number of write ids // allocated here - String s = "insert into NEXT_WRITE_ID (nwi_database, nwi_table, nwi_next) values (?, ?, " + s = "insert into NEXT_WRITE_ID (nwi_database, nwi_table, nwi_next) values (?, ?, " + Long.toString(rqst.getSeeWriteId() + 1) + ")"; pst = sqlGenerator.prepareStmtWithParameters(dbConn, s, Arrays.asList(rqst.getDbName(), rqst.getTblName())); LOG.debug("Going to execute insert <" + s.replaceAll("\\?", "{}") + ">", diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java index e840758c9d..ae66663c5c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java @@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.common.classification.RetrySemantics; import org.apache.hadoop.hive.metastore.api.*; @@ -170,6 +171,9 @@ long cleanupMaterializationRebuildLocks(ValidTxnList validTxnList, long timeout) GetValidWriteIdsResponse getValidWriteIds(GetValidWriteIdsRequest rqst) throws NoSuchTxnException, MetaException; + @RetrySemantics.ReadOnly + public GetTxnTableWriteIdsResponse getTxnTableWriteIds(long txnId) throws MetaException; + /** * Allocate a write ID for the given table and associate it with a transaction * @param rqst info on transaction and table to allocate write id diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql index e8af9a1b11..c02c38abb0 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql @@ -1118,6 +1118,7 @@ CREATE TABLE TXN_TO_WRITE_ID ( CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE INDEX TBL_TO_WRITE_ID_IDX2 ON TXN_TO_WRITE_ID (T2W_WRITEID); CREATE TABLE NEXT_WRITE_ID ( NWI_DATABASE varchar(128) NOT NULL, diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index da3c42a1d5..b9681384f6 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -256,11 +256,6 @@ public boolean dropTable(String catName, String dbName, String tableName) return objectStore.dropTable(catName, dbName, tableName); } - @Override - public Table getTable(String catName, String dbName, String tableName) throws MetaException { - return objectStore.getTable(catName, dbName, tableName); - } - @Override public Table getTable(String catName, String dbName, String tableName, String writeIdList) throws MetaException { @@ -273,12 +268,6 @@ public boolean addPartition(Partition part) return objectStore.addPartition(part); } - @Override - public Partition getPartition(String catName, String dbName, String tableName, List partVals) - throws MetaException, NoSuchObjectException { - return objectStore.getPartition(catName, dbName, tableName, partVals); - } - @Override public Partition getPartition(String catName, String dbName, String tableName, List partVals, String writeIdList) @@ -294,15 +283,15 @@ public boolean dropPartition(String catName, String dbName, String tableName, Li } @Override - public List getPartitions(String catName, String dbName, String tableName, int max) + public List getPartitions(String catName, String dbName, String tableName, int max, String validWriteIdList) throws MetaException, NoSuchObjectException { - return objectStore.getPartitions(catName, dbName, tableName, max); + return objectStore.getPartitions(catName, dbName, tableName, max, validWriteIdList); } @Override public Map getPartitionLocations(String catName, String dbName, String tblName, - String baseLocationToNotShow, int max) { - return objectStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max); + String baseLocationToNotShow, int max, String validWriteIdList) { + return objectStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max, validWriteIdList); } @Override @@ -363,15 +352,15 @@ public void updateCreationMetadata(String catName, String dbname, String tablena } @Override - public List listPartitionNames(String catName, String dbName, String tblName, short maxParts) + public List listPartitionNames(String catName, String dbName, String tblName, short maxParts, String validWriteIdList) throws MetaException { - return objectStore.listPartitionNames(catName, dbName, tblName, maxParts); + return objectStore.listPartitionNames(catName, dbName, tblName, maxParts, validWriteIdList); } @Override public PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, - boolean ascending, List order, long maxParts) throws MetaException { + boolean ascending, List order, long maxParts, String validWriteIdList) throws MetaException { return null; } @@ -391,40 +380,40 @@ public Partition alterPartition(String catName, String dbName, String tblName, L @Override public List getPartitionsByFilter(String catName, String dbName, String tblName, - String filter, short maxParts) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts); + String filter, short maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException { + return objectStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts, validWriteIdList); } @Override public List getPartitionSpecsByFilterAndProjection(Table table, - GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec) + GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec, String validWriteIdList) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionSpecsByFilterAndProjection(table, projectionSpec, filterSpec); + return objectStore.getPartitionSpecsByFilterAndProjection(table, projectionSpec, filterSpec, validWriteIdList); } @Override public int getNumPartitionsByFilter(String catName, String dbName, String tblName, - String filter) throws MetaException, NoSuchObjectException { - return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter); + String filter, String validWriteIdList) throws MetaException, NoSuchObjectException { + return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter, validWriteIdList); } @Override public int getNumPartitionsByExpr(String catName, String dbName, String tblName, - byte[] expr) throws MetaException, NoSuchObjectException { - return objectStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); + byte[] expr, String validWriteIdList) throws MetaException, NoSuchObjectException { + return objectStore.getNumPartitionsByExpr(catName, dbName, tblName, expr, validWriteIdList); } @Override public List getPartitionsByNames(String catName, String dbName, String tblName, - List partNames) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionsByNames(catName, dbName, tblName, partNames); + List partNames, String validWriteIdList) throws MetaException, NoSuchObjectException { + return objectStore.getPartitionsByNames(catName, dbName, tblName, partNames, validWriteIdList); } @Override public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, - String defaultPartitionName, short maxParts, List result) throws TException { + String defaultPartitionName, short maxParts, List result, String validWriteIdList) throws TException { return objectStore.getPartitionsByExpr(catName, - dbName, tblName, expr, defaultPartitionName, maxParts, result); + dbName, tblName, expr, defaultPartitionName, maxParts, result, validWriteIdList); } @Override @@ -591,33 +580,33 @@ public Role getRole(String roleName) throws NoSuchObjectException { @Override public Partition getPartitionWithAuth(String catName, String dbName, String tblName, - List partVals, String userName, List groupNames) + List partVals, String userName, List groupNames, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException { return objectStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, - groupNames); + groupNames, validWriteIdList); } @Override public List getPartitionsWithAuth(String catName, String dbName, String tblName, - short maxParts, String userName, List groupNames) + short maxParts, String userName, List groupNames, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException { return objectStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, - groupNames); + groupNames, validWriteIdList); } @Override public List listPartitionNamesPs(String catName, String dbName, String tblName, - List partVals, short maxParts) + List partVals, short maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException { - return objectStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts); + return objectStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts, validWriteIdList); } @Override public List listPartitionsPsWithAuth(String catName, String dbName, String tblName, - List partVals, short maxParts, String userName, List groupNames) + List partVals, short maxParts, String userName, List groupNames, String validWriteIdList) throws MetaException, InvalidObjectException, NoSuchObjectException { return objectStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, - userName, groupNames); + userName, groupNames, validWriteIdList); } @Override @@ -690,17 +679,8 @@ public long cleanupEvents() { @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, - List colNames) throws MetaException, NoSuchObjectException { - return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames); - } - - @Override - public ColumnStatistics getTableColumnStatistics(String catName, String dbName, - String tableName, List colNames, - String writeIdList) - throws MetaException, NoSuchObjectException { - return objectStore.getTableColumnStatistics( - catName, dbName, tableName, colNames, writeIdList); + List colNames, String validWriteIdList) throws MetaException, NoSuchObjectException { + return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames, validWriteIdList); } @Override @@ -788,13 +768,6 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro } - @Override - public List getPartitionColumnStatistics(String catName, String dbName, - String tblName, List colNames, List partNames) - throws MetaException, NoSuchObjectException { - return objectStore.getPartitionColumnStatistics(catName, dbName, tblName , colNames, partNames); - } - @Override public List getPartitionColumnStatistics( String catName, String dbName, String tblName, List partNames, @@ -806,9 +779,9 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro @Override public boolean doesPartitionExist(String catName, String dbName, String tableName, - List partKeys, List partVals) + List partKeys, List partVals, String validWriteIdList) throws MetaException, NoSuchObjectException { - return objectStore.doesPartitionExist(catName, dbName, tableName, partKeys, partVals); + return objectStore.doesPartitionExist(catName, dbName, tableName, partKeys, partVals, validWriteIdList); } @Override @@ -865,13 +838,6 @@ public Function getFunction(String catName, String dbName, String funcName) return objectStore.getFunctions(catName, dbName, pattern); } - @Override - public AggrStats get_aggr_stats_for(String catName, String dbName, - String tblName, List partNames, List colNames) - throws MetaException { - return null; - } - @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index a018c503d1..5226db3b13 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -239,12 +239,6 @@ public boolean dropTable(String catName, String dbName, String tableName) throws return false; } - @Override - public Table getTable(String catName, String dbName, String tableName) throws MetaException { - - return null; - } - @Override public Table getTable(String catalogName, String dbName, String tableName, String writeIdList) throws MetaException { @@ -257,13 +251,6 @@ public boolean addPartition(Partition part) throws InvalidObjectException, MetaE return false; } - @Override - public Partition getPartition(String catName, String dbName, String tableName, List part_vals) - throws MetaException, NoSuchObjectException { - - return null; - } - @Override public Partition getPartition(String catName, String dbName, String tableName, List part_vals, String writeIdList) @@ -279,7 +266,7 @@ public boolean dropPartition(String catName, String dbName, String tableName, Li } @Override - public List getPartitions(String catName, String dbName, String tableName, int max) + public List getPartitions(String catName, String dbName, String tableName, int max, String validWriteIdList) throws MetaException { return Collections.emptyList(); @@ -287,7 +274,7 @@ public boolean dropPartition(String catName, String dbName, String tableName, Li @Override public Map getPartitionLocations(String catName, String dbName, String tblName, - String baseLocationToNotShow, int max) { + String baseLocationToNotShow, int max, String validWriteIdList) { return Collections.emptyMap(); } @@ -350,7 +337,7 @@ public void updateCreationMetadata(String catName, String dbname, String tablena } @Override - public List listPartitionNames(String catName, String db_name, String tbl_name, short max_parts) + public List listPartitionNames(String catName, String db_name, String tbl_name, short max_parts, String validWriteIdList) throws MetaException { return Collections.emptyList(); @@ -361,7 +348,7 @@ public PartitionValuesResponse listPartitionValues(String catName, String db_nam String tbl_name, List cols, boolean applyDistinct, String filter, boolean ascending, List order, - long maxParts) throws MetaException { + long maxParts, String validWriteIdList) throws MetaException { return null; } @@ -380,7 +367,7 @@ public Partition alterPartition(String catName, String db_name, String tbl_name, @Override public List getPartitionsByFilter(String catName, String dbName, String tblName, - String filter, short maxParts) + String filter, short maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException { return Collections.emptyList(); @@ -388,32 +375,32 @@ public Partition alterPartition(String catName, String db_name, String tbl_name, @Override public List getPartitionSpecsByFilterAndProjection(Table table, - GetPartitionsProjectionSpec projectSpec, GetPartitionsFilterSpec filterSpec) + GetPartitionsProjectionSpec projectSpec, GetPartitionsFilterSpec filterSpec, String validWriteIdList) throws MetaException, NoSuchObjectException { return Collections.emptyList(); } @Override public List getPartitionsByNames(String catName, String dbName, String tblName, - List partNames) throws MetaException, NoSuchObjectException { + List partNames, String validWriteIdList) throws MetaException, NoSuchObjectException { return Collections.emptyList(); } @Override public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, - String defaultPartitionName, short maxParts, List result) throws TException { + String defaultPartitionName, short maxParts, List result, String validWriteIdList) throws TException { return false; } @Override - public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) + public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter, String validWriteIdList) throws MetaException, NoSuchObjectException { return -1; } @Override - public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) + public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String validWriteIdList) throws MetaException, NoSuchObjectException { return -1; } @@ -594,7 +581,7 @@ public Role getRole(String roleName) throws NoSuchObjectException { @Override public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, - String user_name, List group_names) throws MetaException, NoSuchObjectException, + String user_name, List group_names, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException { return null; @@ -602,7 +589,7 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN @Override public List getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts, - String userName, List groupNames) throws MetaException, NoSuchObjectException, + String userName, List groupNames, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException { return Collections.emptyList(); @@ -610,14 +597,14 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN @Override public List listPartitionNamesPs(String catName, String db_name, String tbl_name, List part_vals, - short max_parts) throws MetaException, NoSuchObjectException { + short max_parts, String validWriteIdList) throws MetaException, NoSuchObjectException { return Collections.emptyList(); } @Override public List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name, - List part_vals, short max_parts, String userName, List groupNames) + List part_vals, short max_parts, String userName, List groupNames, String validWriteIdList) throws MetaException, InvalidObjectException, NoSuchObjectException { return Collections.emptyList(); @@ -728,12 +715,6 @@ public boolean removeMasterKey(Integer keySeq) { return Collections.emptyList(); } - @Override - public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, - List colName) throws MetaException, NoSuchObjectException { - return null; - } - @Override public ColumnStatistics getTableColumnStatistics( String catName, String dbName, String tableName, List colName, @@ -786,13 +767,6 @@ public String getMetaStoreSchemaVersion() throws MetaException { public void setMetaStoreSchemaVersion(String version, String comment) throws MetaException { } - @Override - public List getPartitionColumnStatistics(String catName, String dbName, - String tblName, List colNames, List partNames) - throws MetaException, NoSuchObjectException { - return Collections.emptyList(); - } - @Override public List getPartitionColumnStatistics( String catName, String dbName, String tblName, List partNames, @@ -803,7 +777,7 @@ public void setMetaStoreSchemaVersion(String version, String comment) throws Met @Override public boolean doesPartitionExist(String catName, String dbName, String tableName, - List partKeys, List partVals) + List partKeys, List partVals, String validWriteIdList) throws MetaException, NoSuchObjectException { return false; } @@ -857,13 +831,6 @@ public Function getFunction(String catName, String dbName, String funcName) return Collections.emptyList(); } - @Override - public AggrStats get_aggr_stats_for(String catName, String dbName, - String tblName, List partNames, List colNames) - throws MetaException { - return null; - } - @Override public AggrStats get_aggr_stats_for( String catName, String dbName, String tblName, List partNames, diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index ccd7af5545..0313d1143c 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -57,6 +57,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -1263,21 +1264,21 @@ public boolean dropType(String type) throws NoSuchObjectException, MetaException @Override public List listPartitions(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, TException { - List parts = client.get_partitions(db_name, tbl_name, max_parts); + List parts = client.get_partitions(db_name, tbl_name, max_parts, null); return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); } @Override public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException { return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs( - client.get_partitions_pspec(dbName, tableName, maxParts))); + client.get_partitions_pspec(dbName, tableName, maxParts, null))); } @Override public List listPartitions(String db_name, String tbl_name, List part_vals, short max_parts) throws NoSuchObjectException, MetaException, TException { - List parts = client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts); + List parts = client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts, null); return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); } @@ -1286,7 +1287,7 @@ public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, in String tbl_name, short max_parts, String user_name, List group_names) throws NoSuchObjectException, MetaException, TException { List parts = client.get_partitions_with_auth(db_name, tbl_name, max_parts, - user_name, group_names); + user_name, group_names, null); return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts)); } @@ -1296,7 +1297,7 @@ public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, in String user_name, List group_names) throws NoSuchObjectException, MetaException, TException { List parts = client.get_partitions_ps_with_auth(db_name, - tbl_name, part_vals, max_parts, user_name, group_names); + tbl_name, part_vals, max_parts, user_name, group_names, null); return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); } @@ -1318,7 +1319,7 @@ public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, in public List listPartitionsByFilter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException, NoSuchObjectException, TException { - List parts = client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts); + List parts = client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts, null); return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts)); } @@ -1327,7 +1328,7 @@ public PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_ String filter, int max_parts) throws MetaException, NoSuchObjectException, TException { return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs( - client.get_part_specs_by_filter(db_name, tbl_name, filter, max_parts))); + client.get_part_specs_by_filter(db_name, tbl_name, filter, max_parts, null))); } @Override @@ -1393,7 +1394,7 @@ public Database getDatabase(String name) throws NoSuchObjectException, @Override public Partition getPartition(String db_name, String tbl_name, List part_vals) throws NoSuchObjectException, MetaException, TException { - Partition p = client.get_partition(db_name, tbl_name, part_vals); + Partition p = client.get_partition(db_name, tbl_name, part_vals, null); return fastpath ? p : deepCopy(filterHook.filterPartition(p)); } @@ -1426,7 +1427,7 @@ public Partition getPartitionWithAuthInfo(String db_name, String tbl_name, throws MetaException, UnknownTableException, NoSuchObjectException, TException { Partition p = client.get_partition_with_auth(db_name, tbl_name, part_vals, user_name, - group_names); + group_names, null); return fastpath ? p : deepCopy(filterHook.filterPartition(p)); } @@ -1607,7 +1608,7 @@ public boolean tableExists(String databaseName, String tableName) throws MetaExc public List listPartitionNames(String dbName, String tblName, short max) throws NoSuchObjectException, MetaException, TException { return filterHook.filterPartitionNames(null, dbName, tblName, - client.get_partition_names(dbName, tblName, max)); + client.get_partition_names(dbName, tblName, max, null)); } @Override @@ -1615,7 +1616,7 @@ public boolean tableExists(String databaseName, String tableName) throws MetaExc List part_vals, short max_parts) throws MetaException, TException, NoSuchObjectException { return filterHook.filterPartitionNames(null, db_name, tbl_name, - client.get_partition_names_ps(db_name, tbl_name, part_vals, max_parts)); + client.get_partition_names_ps(db_name, tbl_name, part_vals, max_parts, null)); } /** @@ -1634,7 +1635,7 @@ public boolean tableExists(String databaseName, String tableName) throws MetaExc public int getNumPartitionsByFilter(String db_name, String tbl_name, String filter) throws MetaException, NoSuchObjectException, TException { - return client.get_num_partitions_by_filter(db_name, tbl_name, filter); + return client.get_num_partitions_by_filter(db_name, tbl_name, filter, null); } @Override @@ -1699,7 +1700,7 @@ public void alterDatabase(String dbName, Database db) public List getFields(String db, String tableName) throws MetaException, TException, UnknownTableException, UnknownDBException { - List fields = client.get_fields(db, tableName); + List fields = client.get_fields(db, tableName, null); return fastpath ? fields : deepCopyFieldSchemas(fields); } @@ -1856,7 +1857,7 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri envCxt = new EnvironmentContext(props); } - List fields = client.get_schema_with_environment_context(db, tableName, envCxt); + List fields = client.get_schema_with_environment_context(db, tableName, envCxt, null); return fastpath ? fields : deepCopyFieldSchemas(fields); } @@ -1869,7 +1870,7 @@ public String getConfigValue(String name, String defaultValue) @Override public Partition getPartition(String db, String tableName, String partName) throws MetaException, TException, UnknownTableException, NoSuchObjectException { - Partition p = client.get_partition_by_name(db, tableName, partName); + Partition p = client.get_partition_by_name(db, tableName, partName, null); return fastpath ? p : deepCopy(filterHook.filterPartition(p)); } @@ -3141,6 +3142,12 @@ public Table getTable(String catName, String dbName, String tableName) throws Me throw new UnsupportedOperationException(); } + @Override + public Table getTable(String catName, String dbName, String tableName, boolean checkTransactional, boolean getColumnStats) + throws MetaException, TException { + throw new UnsupportedOperationException(); + } + @Override public Table getTable(String catName, String dbName, boolean getColumnStats) throws MetaException, TException { @@ -3679,4 +3686,14 @@ public void setHadoopJobid(String jobId, long cqId) throws MetaException, TExcep public String getServerVersion() throws TException { return client.getVersion(); } + + @Override + public void setValidWriteIdList(String txnWriteIdList) { + throw new UnsupportedOperationException(); + } + + @Override + public void clearValidWriteIdList() { + throw new UnsupportedOperationException(); + } } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java index 6c7fe116cc..4f314012c0 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java @@ -153,11 +153,6 @@ public static void resetAlterTableModifier() { } // ObjectStore methods to be overridden with injected behavior - @Override - public Table getTable(String catName, String dbName, String tableName) throws MetaException { - return getTableModifier.apply(super.getTable(catName, dbName, tableName)); - } - @Override public Table getTable(String catName, String dbName, String tableName, String writeIdList) throws MetaException { return getTableModifier.apply(super.getTable(catName, dbName, tableName, writeIdList)); @@ -165,14 +160,14 @@ public Table getTable(String catName, String dbName, String tableName, String wr @Override public Partition getPartition(String catName, String dbName, String tableName, - List partVals) throws NoSuchObjectException, MetaException { - return getPartitionModifier.apply(super.getPartition(catName, dbName, tableName, partVals)); + List partVals, String validWriteIdList) throws NoSuchObjectException, MetaException { + return getPartitionModifier.apply(super.getPartition(catName, dbName, tableName, partVals, validWriteIdList)); } @Override - public List listPartitionNames(String catName, String dbName, String tableName, short max) + public List listPartitionNames(String catName, String dbName, String tableName, short max, String validWriteIdList) throws MetaException { - return listPartitionNamesModifier.apply(super.listPartitionNames(catName, dbName, tableName, max)); + return listPartitionNamesModifier.apply(super.listPartitionNames(catName, dbName, tableName, max, validWriteIdList)); } @Override diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java index 88d5e716e1..9ab8890d4b 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java @@ -57,7 +57,7 @@ public void testAlterTableAddColNotUpdateStats() throws MetaException, InvalidOb RawStore msdb = Mockito.mock(RawStore.class); Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics( - getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3")); + getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3"), null); HiveAlterHandler handler = new HiveAlterHandler(); handler.setConf(conf); handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, null, conf, null); @@ -93,7 +93,7 @@ public void testAlterTableDelColUpdateStats() throws Exception { throw t; } Mockito.verify(msdb, Mockito.times(1)).getTableColumnStatistics( - getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4") + getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4"), null ); } @@ -118,7 +118,7 @@ public void testAlterTableChangePosNotUpdateStats() throws MetaException, Invali RawStore msdb = Mockito.mock(RawStore.class); Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics( - getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4")); + getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4"), null); HiveAlterHandler handler = new HiveAlterHandler(); handler.setConf(conf); handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, null, conf, null); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index 1f7f69a86a..2157310766 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -282,7 +282,7 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO Assert.assertEquals("new" + TABLE1, tables.get(0)); // Verify fields were altered during the alterTable operation - Table alteredTable = objectStore.getTable(DEFAULT_CATALOG_NAME, DB1, "new" + TABLE1); + Table alteredTable = objectStore.getTable(DEFAULT_CATALOG_NAME, DB1, "new" + TABLE1, null); Assert.assertEquals("Owner of table was not altered", newTbl1.getOwner(), alteredTable.getOwner()); Assert.assertEquals("Owner type of table was not altered", newTbl1.getOwnerType(), alteredTable.getOwnerType()); @@ -370,19 +370,19 @@ public void testPartitionOps() throws MetaException, InvalidObjectException, objectStore.addPartition(part2); Deadline.startTimer("getPartition"); - List partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10); + List partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10, null); Assert.assertEquals(2, partitions.size()); Assert.assertEquals(111, partitions.get(0).getCreateTime()); Assert.assertEquals(222, partitions.get(1).getCreateTime()); - int numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, ""); + int numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, "", null); Assert.assertEquals(partitions.size(), numPartitions); - numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, "country = \"US\""); + numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, "country = \"US\"", null); Assert.assertEquals(2, numPartitions); objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1, value1); - partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10); + partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10, null); Assert.assertEquals(1, partitions.size()); Assert.assertEquals(222, partitions.get(0).getCreateTime()); @@ -788,7 +788,7 @@ private static void dropAllStoreObjects(RawStore store) List tbls = store.getAllTables(DEFAULT_CATALOG_NAME, db); for (String tbl : tbls) { Deadline.startTimer("getPartition"); - List parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100); + List parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100, null); for (Partition part : parts) { store.dropPartition(DEFAULT_CATALOG_NAME, db, tbl, part.getValues()); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java index 27c5bba5f7..669213b727 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java @@ -199,7 +199,7 @@ public void checkStats(AggrStats aggrStats) throws Exception { partNames.add("ds=" + i); } AggrStats aggrStats = store.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tableName, partNames, - Arrays.asList("col1")); + Arrays.asList("col1"), null); statChecker.checkStats(aggrStats); } @@ -218,7 +218,7 @@ private static void dropAllStoreObjects(RawStore store) throws MetaException, String db = dbs.get(i); List tbls = store.getAllTables(DEFAULT_CATALOG_NAME, db); for (String tbl : tbls) { - List parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100); + List parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100, null); for (Partition part : parts) { store.dropPartition(DEFAULT_CATALOG_NAME, db, tbl, part.getValues()); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java index c9a6a471cb..9339309553 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java @@ -52,7 +52,7 @@ public VerifyingObjectStore() { @Override public List getPartitionsByFilter(String catName, String dbName, String tblName, - String filter, short maxParts) + String filter, short maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException { List sqlResults = getPartitionsByFilterInternal( catName, dbName, tblName, filter, maxParts, true, false); @@ -64,7 +64,7 @@ public VerifyingObjectStore() { @Override public List getPartitionsByNames(String catName, String dbName, String tblName, - List partNames) throws MetaException, NoSuchObjectException { + List partNames, String validWriteIdList) throws MetaException, NoSuchObjectException { List sqlResults = getPartitionsByNamesInternal( catName, dbName, tblName, partNames, true, false); List ormResults = getPartitionsByNamesInternal( @@ -75,7 +75,7 @@ public VerifyingObjectStore() { @Override public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, - String defaultPartitionName, short maxParts, List result) throws TException { + String defaultPartitionName, short maxParts, List result, String validWriteIdList) throws TException { List ormParts = new LinkedList<>(); boolean sqlResult = getPartitionsByExprInternal( catName, dbName, tblName, expr, defaultPartitionName, maxParts, result, true, false); @@ -92,7 +92,7 @@ public boolean getPartitionsByExpr(String catName, String dbName, String tblName @Override public List getPartitions( - String catName, String dbName, String tableName, int maxParts) throws MetaException, NoSuchObjectException { + String catName, String dbName, String tableName, int maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException { List sqlResults = getPartitionsInternal(catName, dbName, tableName, maxParts, true, false); List ormResults = getPartitionsInternal(catName, dbName, tableName, maxParts, false, true); verifyLists(sqlResults, ormResults, Partition.class); @@ -101,7 +101,7 @@ public boolean getPartitionsByExpr(String catName, String dbName, String tblName @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, - String tableName, List colNames) throws MetaException, NoSuchObjectException { + String tableName, List colNames, String validWriteIdList) throws MetaException, NoSuchObjectException { ColumnStatistics sqlResult = getTableColumnStatisticsInternal( catName, dbName, tableName, colNames, true, false); ColumnStatistics jdoResult = getTableColumnStatisticsInternal( @@ -112,7 +112,7 @@ public ColumnStatistics getTableColumnStatistics(String catName, String dbName, @Override public List getPartitionColumnStatistics(String catName, String dbName, - String tableName, List partNames, List colNames) + String tableName, List partNames, List colNames, String validWriteIdList) throws MetaException, NoSuchObjectException { List sqlResult = getPartitionColumnStatisticsInternal( catName, dbName, tableName, partNames, colNames, true, false); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java index 420369d792..6cf066b017 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java @@ -19,6 +19,7 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.BitSet; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -28,6 +29,9 @@ import java.util.concurrent.ThreadFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog; import org.apache.hadoop.hive.metastore.Deadline; import org.apache.hadoop.hive.metastore.HiveMetaStore; @@ -164,7 +168,7 @@ ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); List allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); Assert.assertEquals(2, allDatabases.size()); Assert.assertTrue(allDatabases.contains(db1.getName())); @@ -178,20 +182,21 @@ Assert.assertTrue(db2Tables.contains(db2Utbl1.getTableName())); Assert.assertTrue(db2Tables.contains(db2Ptbl1.getTableName())); // cs_db1_ptntbl1 + Deadline.startTimer(""); List db1Ptbl1Partitions = - cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db1.getName(), db1Ptbl1.getTableName(), -1); + cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db1.getName(), db1Ptbl1.getTableName(), -1, null); Assert.assertEquals(25, db1Ptbl1Partitions.size()); Deadline.startTimer(""); List db1Ptbl1PartitionsOS = - objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db1Ptbl1.getTableName(), -1); + objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db1Ptbl1.getTableName(), -1, null); Assert.assertTrue(db1Ptbl1Partitions.containsAll(db1Ptbl1PartitionsOS)); // cs_db2_ptntbl1 List db2Ptbl1Partitions = - cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db2Ptbl1.getTableName(), -1); + cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db2Ptbl1.getTableName(), -1, null); Assert.assertEquals(25, db2Ptbl1Partitions.size()); Deadline.startTimer(""); List db2Ptbl1PartitionsOS = - objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db2Ptbl1.getTableName(), -1); + objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db2Ptbl1.getTableName(), -1, null); Assert.assertTrue(db2Ptbl1Partitions.containsAll(db2Ptbl1PartitionsOS)); cachedStore.shutdown(); } @@ -209,7 +214,7 @@ ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); SharedCache sharedCache = CachedStore.getSharedCache(); // cachedStore.getAllTables falls back to objectStore when whitelist/blacklist is set List db1Tables = sharedCache.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); @@ -232,7 +237,7 @@ ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); SharedCache sharedCache = CachedStore.getSharedCache(); // cachedStore.getAllTables falls back to objectStore when whitelist/blacklist is set List db1Tables = sharedCache.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); @@ -256,7 +261,7 @@ public void testPrewarmMemoryEstimation() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); SharedCache sharedCache = CachedStore.getSharedCache(); List db1Tables = sharedCache.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(2, db1Tables.size()); @@ -265,88 +270,6 @@ public void testPrewarmMemoryEstimation() throws Exception { cachedStore.shutdown(); } - @Test public void testCacheUpdate() throws Exception { - Configuration conf = MetastoreConf.newMetastoreConf(); - MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); - MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); - MetaStoreTestUtils.setConfForStandloneMode(conf); - CachedStore cachedStore = new CachedStore(); - CachedStore.clearSharedCache(); - cachedStore.setConfForTest(conf); - ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); - // Prewarm CachedStore - CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); - // Drop basedb1's unpartitioned table - objectStore.dropTable(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName()); - Deadline.startTimer(""); - // Drop a partitions of basedb1's partitioned table - objectStore.dropPartitions(DEFAULT_CATALOG_NAME, db1Ptbl1.getDbName(), db1Ptbl1.getTableName(), db1Ptbl1PtnNames); - // Update SharedCache - updateCache(cachedStore); - List allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); - Assert.assertEquals(2, allDatabases.size()); - Assert.assertTrue(allDatabases.contains(db1.getName())); - Assert.assertTrue(allDatabases.contains(db2.getName())); - // cs_db1_ptntbl1 - List db1Tbls = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName()); - Assert.assertEquals(1, db1Tbls.size()); - Assert.assertTrue(db1Tbls.contains(db1Ptbl1.getTableName())); - List db1Ptns = - cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db1.getName(), db1Ptbl1.getTableName(), -1); - Assert.assertEquals(0, db1Ptns.size()); - // cs_db2_ptntbl1 - List db2Tbls = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName()); - Assert.assertEquals(2, db2Tbls.size()); - Assert.assertTrue(db2Tbls.contains(db2Utbl1.getTableName())); - Assert.assertTrue(db2Tbls.contains(db2Ptbl1.getTableName())); - List db2Ptns = - cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db2Ptbl1.getTableName(), -1); - Assert.assertEquals(25, db2Ptns.size()); - Deadline.startTimer(""); - List db2PtnsOS = - objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db2Ptbl1.getTableName(), -1); - Assert.assertTrue(db2Ptns.containsAll(db2PtnsOS)); - // Create a new unpartitioned table under basedb1 - Table db1Utbl2 = createUnpartitionedTableObject(db1); - db1Utbl2.setTableName(db1.getName() + "_unptntbl2"); - objectStore.createTable(db1Utbl2); - // Add a new partition to db1PartitionedTable - // Create partitions for cs_db1's partitioned table - db1Ptbl1Ptns = createPartitionObjects(db1Ptbl1).getPartitions(); - Deadline.startTimer(""); - objectStore.addPartition(db1Ptbl1Ptns.get(0)); - objectStore.addPartition(db1Ptbl1Ptns.get(1)); - objectStore.addPartition(db1Ptbl1Ptns.get(2)); - objectStore.addPartition(db1Ptbl1Ptns.get(3)); - objectStore.addPartition(db1Ptbl1Ptns.get(4)); - updateCache(cachedStore); - allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); - Assert.assertEquals(2, allDatabases.size()); - Assert.assertTrue(allDatabases.contains(db1.getName())); - Assert.assertTrue(allDatabases.contains(db2.getName())); - db1Tbls = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName()); - Assert.assertEquals(2, db1Tbls.size()); - Assert.assertTrue(db1Tbls.contains(db1Ptbl1.getTableName())); - Assert.assertTrue(db1Tbls.contains(db1Utbl2.getTableName())); - db2Tbls = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName()); - Assert.assertEquals(2, db2Tbls.size()); - Assert.assertTrue(db2Tbls.contains(db2Utbl1.getTableName())); - Assert.assertTrue(db2Tbls.contains(db2Ptbl1.getTableName())); - // cs_db1_ptntbl1 - db1Ptns = cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db1.getName(), db1Ptbl1.getTableName(), -1); - Assert.assertEquals(5, db1Ptns.size()); - // cs_db2_ptntbl1 - db2Ptns = cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db2Ptbl1.getTableName(), -1); - Assert.assertEquals(25, db2Ptns.size()); - Deadline.startTimer(""); - db2PtnsOS = objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db2Ptbl1.getTableName(), -1); - Assert.assertTrue(db2Ptns.containsAll(db2PtnsOS)); - // Clean up - objectStore.dropTable(DEFAULT_CATALOG_NAME, db1Utbl2.getDbName(), db1Utbl2.getTableName()); - cachedStore.shutdown(); - } - @Test public void testCreateAndGetDatabase() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); @@ -364,7 +287,7 @@ public void testPrewarmMemoryEstimation() throws Exception { db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Read database via CachedStore Database dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); Assert.assertEquals(db, dbRead); @@ -403,7 +326,7 @@ public void testPrewarmMemoryEstimation() throws Exception { db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Read database via CachedStore Database dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); Assert.assertEquals(db, dbRead); @@ -424,11 +347,7 @@ public void testPrewarmMemoryEstimation() throws Exception { Assert.assertEquals(localDb1, dbRead); allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); Assert.assertEquals(3, allDatabases.size()); - objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName1); - updateCache(cachedStore); - updateCache(cachedStore); - allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); - Assert.assertEquals(2, allDatabases.size()); + cachedStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName1); cachedStore.shutdown(); } @@ -443,7 +362,7 @@ public void testPrewarmMemoryEstimation() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Read database via CachedStore List allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); Assert.assertEquals(2, allDatabases.size()); @@ -457,17 +376,6 @@ public void testPrewarmMemoryEstimation() throws Exception { // Read db via ObjectStore Database dbRead = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); Assert.assertEquals(db, dbRead); - // Alter db via ObjectStore - dbOwner = "user3"; - db = new Database(db1); - db.setOwnerName(dbOwner); - objectStore.alterDatabase(DEFAULT_CATALOG_NAME, dbName, db); - db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); - updateCache(cachedStore); - updateCache(cachedStore); - // Read db via CachedStore - dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); - Assert.assertEquals(db, dbRead); cachedStore.shutdown(); } @@ -482,7 +390,7 @@ public void testPrewarmMemoryEstimation() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Read database via CachedStore List allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); Assert.assertEquals(2, allDatabases.size()); @@ -497,23 +405,11 @@ public void testPrewarmMemoryEstimation() throws Exception { cachedStore.createTable(db1Utbl2); db1Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(3, db1Tables.size()); - db1Utbl2 = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl2.getDbName(), db1Utbl2.getTableName()); - Table tblRead = objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl2.getDbName(), db1Utbl2.getTableName()); + db1Utbl2 = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl2.getDbName(), db1Utbl2.getTableName(), null); + Table tblRead = objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl2.getDbName(), db1Utbl2.getTableName(), null); Assert.assertEquals(db1Utbl2, tblRead); - // Create a new unpartitioned table under basedb2 via ObjectStore - Table db2Utbl2 = createUnpartitionedTableObject(db2); - db2Utbl2.setTableName(db2.getName() + "_unptntbl2"); - objectStore.createTable(db2Utbl2); - db2Utbl2 = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl2.getDbName(), db2Utbl2.getTableName()); - updateCache(cachedStore); - db2Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName()); - Assert.assertEquals(3, db2Tables.size()); - tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl2.getDbName(), db2Utbl2.getTableName()); - Assert.assertEquals(db2Utbl2, tblRead); - // Clean up objectStore.dropTable(DEFAULT_CATALOG_NAME, db1Utbl2.getDbName(), db1Utbl2.getTableName()); - db1Utbl2 = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl2.getDbName(), db1Utbl2.getTableName()); - objectStore.dropTable(DEFAULT_CATALOG_NAME, db2Utbl2.getDbName(), db2Utbl2.getTableName()); + db1Utbl2 = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl2.getDbName(), db1Utbl2.getTableName(), null); cachedStore.shutdown(); } @@ -530,7 +426,7 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); SharedCache sharedCache = CachedStore.getSharedCache(); List db1Tables = sharedCache.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(2, db1Tables.size()); @@ -555,7 +451,7 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); SharedCache sharedCache = CachedStore.getSharedCache(); // cachedStore.getAllTables falls back to objectStore when whitelist/blacklist is set List db1Tables = sharedCache.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); @@ -580,7 +476,7 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); SharedCache sharedCache = CachedStore.getSharedCache(); // cachedStore.getAllTables falls back to objectStore when whitelist/blacklist is set List db1Tables = sharedCache.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); @@ -603,7 +499,7 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); List db1Tables = cachedStore.getTables(DEFAULT_CATALOG_NAME, db1.getName(), "cs_db1.*"); Assert.assertEquals(2, db1Tables.size()); db1Tables = cachedStore.getTables(DEFAULT_CATALOG_NAME, db1.getName(), "cs_db1.un*"); @@ -628,35 +524,23 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); List db1Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(2, db1Tables.size()); List db2Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName()); Assert.assertEquals(2, db2Tables.size()); // Alter table db1Utbl1 via CachedStore and read via ObjectStore - Table db1Utbl1Read = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName()); + Table db1Utbl1Read = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName(), null); String newOwner = "newOwner"; Table db1Utbl1ReadAlt = new Table(db1Utbl1Read); db1Utbl1ReadAlt.setOwner(newOwner); cachedStore .alterTable(DEFAULT_CATALOG_NAME, db1Utbl1Read.getDbName(), db1Utbl1Read.getTableName(), db1Utbl1ReadAlt, "0"); db1Utbl1Read = - cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1ReadAlt.getDbName(), db1Utbl1ReadAlt.getTableName()); + cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1ReadAlt.getDbName(), db1Utbl1ReadAlt.getTableName(), null); Table db1Utbl1ReadOS = - objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1ReadAlt.getDbName(), db1Utbl1ReadAlt.getTableName()); + objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1ReadAlt.getDbName(), db1Utbl1ReadAlt.getTableName(), null); Assert.assertEquals(db1Utbl1Read, db1Utbl1ReadOS); - // Alter table db2Utbl1 via ObjectStore and read via CachedStore - Table db2Utbl1Read = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName()); - Table db2Utbl1ReadAlt = new Table(db2Utbl1Read); - db2Utbl1ReadAlt.setOwner(newOwner); - objectStore - .alterTable(DEFAULT_CATALOG_NAME, db2Utbl1Read.getDbName(), db2Utbl1Read.getTableName(), db2Utbl1ReadAlt, "0"); - updateCache(cachedStore); - db2Utbl1Read = - objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1ReadAlt.getDbName(), db2Utbl1ReadAlt.getTableName()); - Table d21Utbl1ReadCS = - cachedStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1ReadAlt.getDbName(), db2Utbl1ReadAlt.getTableName()); - Assert.assertEquals(db2Utbl1Read, d21Utbl1ReadCS); cachedStore.shutdown(); } @@ -671,65 +555,19 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); List db1Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(2, db1Tables.size()); List db2Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName()); Assert.assertEquals(2, db2Tables.size()); // Drop table db1Utbl1 via CachedStore and read via ObjectStore - Table db1Utbl1Read = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName()); + Table db1Utbl1Read = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName(), null); cachedStore.dropTable(DEFAULT_CATALOG_NAME, db1Utbl1Read.getDbName(), db1Utbl1Read.getTableName()); db1Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(1, db1Tables.size()); Table db1Utbl1ReadOS = - objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1Read.getDbName(), db1Utbl1Read.getTableName()); + objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1Read.getDbName(), db1Utbl1Read.getTableName(), null); Assert.assertNull(db1Utbl1ReadOS); - // Drop table db2Utbl1 via ObjectStore and read via CachedStore - Table db2Utbl1Read = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName()); - objectStore.dropTable(DEFAULT_CATALOG_NAME, db2Utbl1Read.getDbName(), db2Utbl1Read.getTableName()); - db2Tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName()); - Assert.assertEquals(1, db2Tables.size()); - updateCache(cachedStore); - db2Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName()); - Assert.assertEquals(1, db2Tables.size()); - Table db2Utbl1ReadCS = - cachedStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1Read.getDbName(), db2Utbl1Read.getTableName()); - Assert.assertNull(db2Utbl1ReadCS); - cachedStore.shutdown(); - } - - /********************************************************************************************** - * Methods that test SharedCache - * @throws MetaException - * @throws NoSuchObjectException - *********************************************************************************************/ - - @Test public void testSharedStoreDb() throws NoSuchObjectException, MetaException { - Configuration conf = MetastoreConf.newMetastoreConf(); - MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); - MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); - MetaStoreTestUtils.setConfForStandloneMode(conf); - CachedStore cachedStore = new CachedStore(); - CachedStore.clearSharedCache(); - cachedStore.setConfForTest(conf); - SharedCache sharedCache = CachedStore.getSharedCache(); - - Database localDb1 = createDatabaseObject("db1", "user1"); - Database localDb2 = createDatabaseObject("db2", "user1"); - Database localDb3 = createDatabaseObject("db3", "user1"); - Database newDb1 = createDatabaseObject("newdb1", "user1"); - sharedCache.addDatabaseToCache(localDb1); - sharedCache.addDatabaseToCache(localDb2); - sharedCache.addDatabaseToCache(localDb3); - Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 3); - sharedCache.alterDatabaseInCache(DEFAULT_CATALOG_NAME, "db1", newDb1); - Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 3); - sharedCache.removeDatabaseFromCache(DEFAULT_CATALOG_NAME, "db2"); - Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 2); - List dbs = sharedCache.listCachedDatabases(DEFAULT_CATALOG_NAME); - Assert.assertEquals(dbs.size(), 2); - Assert.assertTrue(dbs.contains("newdb1")); - Assert.assertTrue(dbs.contains("db3")); cachedStore.shutdown(); } @@ -793,15 +631,15 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { newTbl1.setSd(newSd1); newTbl1.setPartitionKeys(new ArrayList<>()); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl1", tbl1); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl2", tbl2); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl3", tbl3); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db2", "tbl1", tbl1); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl1", tbl1, null); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl2", tbl2, null); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl3", tbl3, null); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db2", "tbl1", tbl1, null); Assert.assertEquals(sharedCache.getCachedTableCount(), 4); Assert.assertEquals(sharedCache.getSdCache().size(), 2); - Table t = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1"); + Table t = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1", null); Assert.assertEquals(t.getSd().getLocation(), "loc1"); sharedCache.removeTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1"); @@ -831,8 +669,6 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { String tbl1Name = "tbl1"; String tbl2Name = "tbl2"; String owner = "user1"; - Database db = createDatabaseObject(dbName, owner); - sharedCache.addDatabaseToCache(db); FieldSchema col1 = new FieldSchema("col1", "int", "integer column"); FieldSchema col2 = new FieldSchema("col2", "string", "string column"); List cols = new ArrayList(); @@ -840,9 +676,9 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { cols.add(col2); List ptnCols = new ArrayList(); Table tbl1 = createTestTbl(dbName, tbl1Name, owner, cols, ptnCols); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, tbl1); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, tbl1, null); Table tbl2 = createTestTbl(dbName, tbl2Name, owner, cols, ptnCols); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, tbl2); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, tbl2, null); Partition part1 = new Partition(); StorageDescriptor sd1 = new StorageDescriptor(); @@ -979,9 +815,9 @@ public void testAggrStatsRepeatedRead() throws Exception { List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); - AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames, null); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); - aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames, null); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); objectStore.deletePartitionColumnStatistics(DEFAULT_CATALOG_NAME, db.getName(), tbl.getTableName(), @@ -1064,10 +900,10 @@ public void testPartitionAggrStats() throws Exception { List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); - AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames, null); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40); - aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames, null); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40); cachedStore.shutdown(); @@ -1157,10 +993,10 @@ public void testPartitionAggrStatsBitVector() throws Exception { List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); - AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames, null); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5); - aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames, null); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5); cachedStore.shutdown(); @@ -1186,24 +1022,6 @@ public void testPartitionAggrStatsBitVector() throws Exception { } }); - // Create 5 dbs - for (String dbName : dbNames) { - Callable c = new Callable() { - public Object call() { - Database db = createDatabaseObject(dbName, "user1"); - sharedCache.addDatabaseToCache(db); - return null; - } - }; - tasks.add(c); - } - executor.invokeAll(tasks); - for (String dbName : dbNames) { - Database db = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName); - Assert.assertNotNull(db); - Assert.assertEquals(dbName, db.getName()); - } - // Created 5 tables under "db1" List tblNames = new ArrayList(Arrays.asList("tbl1", "tbl2", "tbl3", "tbl4", "tbl5")); tasks.clear(); @@ -1219,7 +1037,7 @@ public Object call() { Callable c = new Callable() { public Object call() { Table tbl = createTestTbl(dbNames.get(0), tblName, "user1", cols, ptnCols); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, tbl); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, tbl, null); return null; } }; @@ -1227,7 +1045,7 @@ public Object call() { } executor.invokeAll(tasks); for (String tblName : tblNames) { - Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName); + Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, null); Assert.assertNotNull(tbl); Assert.assertEquals(tblName, tbl.getTableName()); } @@ -1236,7 +1054,7 @@ public Object call() { List ptnVals = new ArrayList(Arrays.asList("aaa", "bbb", "ccc", "ddd", "eee")); tasks.clear(); for (String tblName : tblNames) { - Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName); + Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, null); for (String ptnVal : ptnVals) { Map partParams = new HashMap(); Callable c = new Callable() { @@ -1279,7 +1097,7 @@ public Object call() { } } for (String tblName : addPtnTblNames) { - Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName); + Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, null); for (String ptnVal : newPtnVals) { Map partParams = new HashMap(); Callable c = new Callable() { @@ -1396,20 +1214,19 @@ public Object call() { sharedCache.setTableSizeMap(tableSizeMap); sharedCache.initialize(conf); - sharedCache.addDatabaseToCache(db); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, tbl1); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, tbl2); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, tbl1, null); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, tbl2, null); - sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part1); - sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part2); - sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part3); - - Partition p = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701")); - Assert.assertNull(p); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, part1); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, part2); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, part3); - sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, newPart1); - p = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, Arrays.asList("201701")); + Partition p = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, Arrays.asList("201701")); Assert.assertNotNull(p); + + Table t = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, new ValidReaderWriteIdList()); + Assert.assertNull(t); + cachedStore.shutdown(); } @@ -1445,7 +1262,7 @@ public Object call() { // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); List db1Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(2, db1Tables.size()); @@ -1479,22 +1296,20 @@ public Object call() { tableSizeMap.put(db1Ptbl1TblKey, 4000); tableSizeMap.put(db2Utbl1TblKey, 4000); tableSizeMap.put(db2Ptbl1TblKey, 4000); - Table tblDb1Utbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName()); - Table tblDb1Ptbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db1Ptbl1.getDbName(), db1Ptbl1.getTableName()); - Table tblDb2Utbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName()); - Table tblDb2Ptbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Ptbl1.getDbName(), db2Ptbl1.getTableName()); + Table tblDb1Utbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName(), null); + Table tblDb1Ptbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db1Ptbl1.getDbName(), db1Ptbl1.getTableName(), null); + Table tblDb2Utbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName(), null); + Table tblDb2Ptbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Ptbl1.getDbName(), db2Ptbl1.getTableName(), null); SharedCache sc = cachedStore.getSharedCache(); sc.setConcurrencyLevel(1); sc.setTableSizeMap(tableSizeMap); sc.initialize(conf); - sc.addDatabaseToCache(db1); - sc.addDatabaseToCache(db2); - sc.addTableToCache(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName(), tblDb1Utbl1); - sc.addTableToCache(DEFAULT_CATALOG_NAME, db1Ptbl1.getDbName(), db1Ptbl1.getTableName(), tblDb1Ptbl1); - sc.addTableToCache(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName(), tblDb2Utbl1); - sc.addTableToCache(DEFAULT_CATALOG_NAME, db2Ptbl1.getDbName(), db2Ptbl1.getTableName(), tblDb2Ptbl1); + sc.addTableToCache(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName(), tblDb1Utbl1, null); + sc.addTableToCache(DEFAULT_CATALOG_NAME, db1Ptbl1.getDbName(), db1Ptbl1.getTableName(), tblDb1Ptbl1, null); + sc.addTableToCache(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName(), tblDb2Utbl1, null); + sc.addTableToCache(DEFAULT_CATALOG_NAME, db2Ptbl1.getDbName(), db2Ptbl1.getTableName(), tblDb2Ptbl1, null); List db1Tables = sc.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(0, db1Tables.size()); @@ -1780,19 +1595,4 @@ private PartitionObjectsAndNames createPartitionObjects(Table table) { return ptnNames; } } - - // This method will return only after the cache has updated once - private void updateCache(CachedStore cachedStore) throws Exception { - int maxTries = 100; - long updateCountBefore = cachedStore.getCacheUpdateCount(); - // Start the CachedStore update service - CachedStore.startCacheUpdateService(cachedStore.getConf(), true, false); - while ((cachedStore.getCacheUpdateCount() != (updateCountBefore + 1)) && (maxTries-- > 0)) { - Thread.sleep(1000); - } - if (maxTries <= 0) { - throw new Exception("Unable to update SharedCache in 100 attempts; possibly some bug"); - } - CachedStore.stopCacheUpdateService(100); - } } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java deleted file mode 100644 index 423dce8a68..0000000000 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.metastore.cache; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.metastore.HiveMetaStore; -import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; -import org.apache.hadoop.hive.metastore.ObjectStore; -import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; -import org.apache.hadoop.hive.metastore.api.Catalog; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import java.util.Comparator; -import java.util.List; - -/** - * Tests that catalogs are properly cached. - */ -@Category(MetastoreCheckinTest.class) -public class TestCatalogCaching { - private static final String CAT1_NAME = "cat1"; - private static final String CAT2_NAME = "cat2"; - - private ObjectStore objectStore; - private Configuration conf; - private CachedStore cachedStore; - - @Before - public void createObjectStore() throws MetaException, InvalidOperationException { - conf = MetastoreConf.newMetastoreConf(); - MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); - MetaStoreTestUtils.setConfForStandloneMode(conf); - objectStore = new ObjectStore(); - objectStore.setConf(conf); - - // Create three catalogs - HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf)); - - Catalog cat1 = new CatalogBuilder() - .setName(CAT1_NAME) - .setLocation("/tmp/cat1") - .build(); - objectStore.createCatalog(cat1); - Catalog cat2 = new CatalogBuilder() - .setName(CAT2_NAME) - .setLocation("/tmp/cat2") - .build(); - objectStore.createCatalog(cat2); - } - - @After - public void clearCatalogCache() throws MetaException, NoSuchObjectException { - List catalogs = objectStore.getCatalogs(); - for (String catalog : catalogs) objectStore.dropCatalog(catalog); - } - - @Test - public void defaultHiveOnly() throws Exception { - // By default just the Hive catalog should be cached. - cachedStore = new CachedStore(); - cachedStore.setConf(conf); - CachedStore.stopCacheUpdateService(1); - cachedStore.resetCatalogCache(); - - CachedStore.prewarm(objectStore); - - // Only the hive catalog should be cached - List cachedCatalogs = cachedStore.getCatalogs(); - Assert.assertEquals(1, cachedCatalogs.size()); - Assert.assertEquals(Warehouse.DEFAULT_CATALOG_NAME, cachedCatalogs.get(0)); - } - - @Test - public void cacheAll() throws Exception { - // Set the config value to empty string, which should result in all catalogs being cached. - Configuration newConf = new Configuration(conf); - MetastoreConf.setVar(newConf, MetastoreConf.ConfVars.CATALOGS_TO_CACHE, ""); - cachedStore = new CachedStore(); - cachedStore.setConf(newConf); - CachedStore.stopCacheUpdateService(1); - objectStore.setConf(newConf); // have to override it with the new conf since this is where - // prewarm gets the conf object - cachedStore.resetCatalogCache(); - - CachedStore.prewarm(objectStore); - - // All the catalogs should be cached - List cachedCatalogs = cachedStore.getCatalogs(); - Assert.assertEquals(3, cachedCatalogs.size()); - cachedCatalogs.sort(Comparator.naturalOrder()); - Assert.assertEquals(CAT1_NAME, cachedCatalogs.get(0)); - Assert.assertEquals(CAT2_NAME, cachedCatalogs.get(1)); - Assert.assertEquals(Warehouse.DEFAULT_CATALOG_NAME, cachedCatalogs.get(2)); - } - - @Test - public void cacheSome() throws Exception { - // Set the config value to 2 catalogs other than hive - Configuration newConf = new Configuration(conf); - MetastoreConf.setVar(newConf, MetastoreConf.ConfVars.CATALOGS_TO_CACHE, CAT1_NAME + "," + CAT2_NAME); - cachedStore = new CachedStore(); - cachedStore.setConf(newConf); - CachedStore.stopCacheUpdateService(1); - objectStore.setConf(newConf); // have to override it with the new conf since this is where - // prewarm gets the conf object - cachedStore.resetCatalogCache(); - - CachedStore.prewarm(objectStore); - - // All the catalogs should be cached - List cachedCatalogs = cachedStore.getCatalogs(); - Assert.assertEquals(2, cachedCatalogs.size()); - cachedCatalogs.sort(Comparator.naturalOrder()); - Assert.assertEquals(CAT1_NAME, cachedCatalogs.get(0)); - Assert.assertEquals(CAT2_NAME, cachedCatalogs.get(1)); - } -} diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java index 08ec6c4d83..5fc3f3d85c 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java @@ -242,6 +242,7 @@ public void testCreateGetDeleteTable() throws Exception { table.setParameters(createdTable.getParameters()); table.setCreationMetadata(createdTable.getCreationMetadata()); table.setWriteId(createdTable.getWriteId()); + table.setTemporary(false); Assert.assertTrue(createdTable.isSetId()); createdTable.unsetId(); @@ -715,6 +716,7 @@ public void testAlterTable() throws Exception { newTable.setCreateTime(alteredTable.getCreateTime()); newTable.setCreationMetadata(alteredTable.getCreationMetadata()); newTable.setWriteId(alteredTable.getWriteId()); + newTable.setTemporary(false); Assert.assertTrue(alteredTable.isSetId()); alteredTable.unsetId(); diff --git a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/BenchmarkTool.java b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/BenchmarkTool.java index 041cd76234..a89e49c503 100644 --- a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/BenchmarkTool.java +++ b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/BenchmarkTool.java @@ -214,7 +214,7 @@ public void run() { client.createDatabase(dbName); } - if (client.tableExists(dbName, tableName)) { + if (client.tableExists(dbName, tableName, null)) { client.dropTable(dbName, tableName); } diff --git a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java index f53f2ef43b..6c636bb29b 100644 --- a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java +++ b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java @@ -69,7 +69,7 @@ static DescriptiveStatistics benchmarkListAllTables(@NotNull MicroBenchmark benc String dbName = data.dbName; return benchmark.measure(() -> - throwingSupplierWrapper(() -> client.getAllTables(dbName, null))); + throwingSupplierWrapper(() -> client.getAllTables(dbName, null, null))); } static DescriptiveStatistics benchmarkTableCreate(@NotNull MicroBenchmark bench, @@ -131,7 +131,7 @@ static DescriptiveStatistics benchmarkGetTable(@NotNull MicroBenchmark bench, createPartitionedTable(client, dbName, tableName); try { return bench.measure(() -> - throwingSupplierWrapper(() -> client.getTable(dbName, tableName))); + throwingSupplierWrapper(() -> client.getTable(dbName, tableName, null))); } finally { throwingSupplierWrapper(() -> client.dropTable(dbName, tableName)); } @@ -148,7 +148,7 @@ static DescriptiveStatistics benchmarkListTables(@NotNull MicroBenchmark bench, try { createManyTables(client, count, dbName, format); return bench.measure(() -> - throwingSupplierWrapper(() -> client.getAllTables(dbName, null))); + throwingSupplierWrapper(() -> client.getAllTables(dbName, null, null))); } finally { dropManyTables(client, count, dbName, format); } @@ -163,7 +163,7 @@ static DescriptiveStatistics benchmarkCreatePartition(@NotNull MicroBenchmark be createPartitionedTable(client, dbName, tableName); final List values = Collections.singletonList("d1"); try { - Table t = client.getTable(dbName, tableName); + Table t = client.getTable(dbName, tableName, null); Partition partition = new Util.PartitionBuilder(t) .withValues(values) .build(); @@ -191,7 +191,7 @@ static DescriptiveStatistics benchmarkListPartition(@NotNull MicroBenchmark benc Collections.singletonList("d"), 1); return bench.measure(() -> - throwingSupplierWrapper(() -> client.listPartitions(dbName, tableName))); + throwingSupplierWrapper(() -> client.listPartitions(dbName, tableName, null))); } catch (TException e) { e.printStackTrace(); return new DescriptiveStatistics(); @@ -213,7 +213,7 @@ static DescriptiveStatistics benchmarkListManyPartitions(@NotNull MicroBenchmark LOG.debug("Created {} partitions", howMany); LOG.debug("started benchmark... "); return bench.measure(() -> - throwingSupplierWrapper(() -> client.listPartitions(dbName, tableName))); + throwingSupplierWrapper(() -> client.listPartitions(dbName, tableName, null))); } catch (TException e) { e.printStackTrace(); return new DescriptiveStatistics(); @@ -235,7 +235,7 @@ static DescriptiveStatistics benchmarkGetPartitions(@NotNull MicroBenchmark benc LOG.debug("Created {} partitions", howMany); LOG.debug("started benchmark... "); return bench.measure(() -> - throwingSupplierWrapper(() -> client.getPartitions(dbName, tableName))); + throwingSupplierWrapper(() -> client.getPartitions(dbName, tableName, null))); } catch (TException e) { e.printStackTrace(); return new DescriptiveStatistics(); @@ -253,7 +253,7 @@ static DescriptiveStatistics benchmarkDropPartition(@NotNull MicroBenchmark benc createPartitionedTable(client, dbName, tableName); final List values = Collections.singletonList("d1"); try { - Table t = client.getTable(dbName, tableName); + Table t = client.getTable(dbName, tableName, null); Partition partition = new Util.PartitionBuilder(t) .withValues(values) .build(); @@ -324,7 +324,7 @@ static DescriptiveStatistics benchmarkGetPartitionNames(@NotNull MicroBenchmark addManyPartitionsNoException(client, dbName, tableName, null, Collections.singletonList("d"), count); return bench.measure( - () -> throwingSupplierWrapper(() -> client.getPartitionNames(dbName, tableName)) + () -> throwingSupplierWrapper(() -> client.getPartitionNames(dbName, tableName, null)) ); } finally { throwingSupplierWrapper(() -> client.dropTable(dbName, tableName)); @@ -343,11 +343,11 @@ static DescriptiveStatistics benchmarkGetPartitionsByName(@NotNull MicroBenchmar addManyPartitionsNoException(client, dbName, tableName, null, Collections.singletonList("d"), count); List partitionNames = throwingSupplierWrapper(() -> - client.getPartitionNames(dbName, tableName)); + client.getPartitionNames(dbName, tableName, null)); return bench.measure( () -> throwingSupplierWrapper(() -> - client.getPartitionsByNames(dbName, tableName, partitionNames)) + client.getPartitionsByNames(dbName, tableName, partitionNames, null)) ); } finally { throwingSupplierWrapper(() -> client.dropTable(dbName, tableName)); @@ -365,7 +365,7 @@ static DescriptiveStatistics benchmarkRenameTable(@NotNull MicroBenchmark bench, try { addManyPartitionsNoException(client, dbName, tableName, null, Collections.singletonList("d"), count); - Table oldTable = client.getTable(dbName, tableName); + Table oldTable = client.getTable(dbName, tableName, null); oldTable.getSd().setLocation(""); Table newTable = oldTable.deepCopy(); newTable.setTableName(tableName + "_renamed"); diff --git a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java index 7cc1e42a8b..46b69884e8 100644 --- a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java +++ b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java @@ -153,8 +153,8 @@ boolean dbExists(@NotNull String dbName) throws TException { return getAllDatabases(dbName).contains(dbName); } - boolean tableExists(@NotNull String dbName, @NotNull String tableName) throws TException { - return getAllTables(dbName, tableName).contains(tableName); + boolean tableExists(@NotNull String dbName, @NotNull String tableName, @Nullable String validWriteIdList) throws TException { + return getAllTables(dbName, tableName, validWriteIdList).contains(tableName); } Database getDatabase(@NotNull String dbName) throws TException { @@ -178,7 +178,7 @@ Database getDatabase(@NotNull String dbName) throws TException { .collect(Collectors.toSet()); } - Set getAllTables(@NotNull String dbName, @Nullable String filter) throws TException { + Set getAllTables(@NotNull String dbName, @Nullable String filter, @Nullable String validWriteIdList) throws TException { if (filter == null || filter.isEmpty()) { return new HashSet<>(client.get_all_tables(dbName)); } @@ -236,8 +236,8 @@ boolean dropTable(@NotNull String dbName, @NotNull String tableName) throws TExc return true; } - Table getTable(@NotNull String dbName, @NotNull String tableName) throws TException { - return client.get_table(dbName, tableName); + Table getTable(@NotNull String dbName, @NotNull String tableName, @Nullable String validWriteIdList) throws TException { + return client.get_table(dbName, tableName, validWriteIdList); } Partition createPartition(@NotNull Table table, @NotNull List values) throws TException { @@ -254,8 +254,8 @@ void addPartitions(List partitions) throws TException { List listPartitions(@NotNull String dbName, - @NotNull String tableName) throws TException { - return client.get_partitions(dbName, tableName, (short) -1); + @NotNull String tableName, @Nullable String validWriteIdList) throws TException { + return client.get_partitions(dbName, tableName, (short) -1, validWriteIdList); } Long getCurrentNotificationId() throws TException { @@ -263,8 +263,8 @@ Long getCurrentNotificationId() throws TException { } List getPartitionNames(@NotNull String dbName, - @NotNull String tableName) throws TException { - return client.get_partition_names(dbName, tableName, (short) -1); + @NotNull String tableName, @Nullable String validWriteIdList) throws TException { + return client.get_partition_names(dbName, tableName, (short) -1, validWriteIdList); } public boolean dropPartition(@NotNull String dbName, @NotNull String tableName, @@ -273,14 +273,14 @@ public boolean dropPartition(@NotNull String dbName, @NotNull String tableName, return client.drop_partition(dbName, tableName, arguments, true); } - List getPartitions(@NotNull String dbName, @NotNull String tableName) throws TException { - return client.get_partitions(dbName, tableName, (short) -1); + List getPartitions(@NotNull String dbName, @NotNull String tableName, @Nullable String validWriteIdList) throws TException { + return client.get_partitions(dbName, tableName, (short) -1, validWriteIdList); } DropPartitionsResult dropPartitions(@NotNull String dbName, @NotNull String tableName, @Nullable List partNames) throws TException { if (partNames == null) { - return dropPartitions(dbName, tableName, getPartitionNames(dbName, tableName)); + return dropPartitions(dbName, tableName, getPartitionNames(dbName, tableName, null)); } if (partNames.isEmpty()) { return null; @@ -290,12 +290,12 @@ DropPartitionsResult dropPartitions(@NotNull String dbName, @NotNull String tabl } List getPartitionsByNames(@NotNull String dbName, @NotNull String tableName, - @Nullable List names) throws TException { + @Nullable List names, @Nullable String validWriteIdList) throws TException { if (names == null) { return client.get_partitions_by_names(dbName, tableName, - getPartitionNames(dbName, tableName)); + getPartitionNames(dbName, tableName, validWriteIdList), validWriteIdList); } - return client.get_partitions_by_names(dbName, tableName, names); + return client.get_partitions_by_names(dbName, tableName, names, validWriteIdList); } boolean alterTable(@NotNull String dbName, @NotNull String tableName, @NotNull Table newTable) diff --git a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/Util.java b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/Util.java index 101d6759c5..55ff673253 100644 --- a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/Util.java +++ b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/Util.java @@ -508,7 +508,7 @@ static Object addManyPartitions(@NotNull HMSClient client, @Nullable Map parameters, @NotNull List arguments, int npartitions) throws TException { - Table table = client.getTable(dbName, tableName); + Table table = client.getTable(dbName, tableName, null); client.addPartitions(createManyPartitions(table, parameters, arguments, npartitions)); return null; } diff --git a/standalone-metastore/metastore-tools/tools-common/src/test/java/org/apache/hadoop/hive/metastore/tools/HMSClientTest.java b/standalone-metastore/metastore-tools/tools-common/src/test/java/org/apache/hadoop/hive/metastore/tools/HMSClientTest.java index ab4b62543f..7bd8d56846 100644 --- a/standalone-metastore/metastore-tools/tools-common/src/test/java/org/apache/hadoop/hive/metastore/tools/HMSClientTest.java +++ b/standalone-metastore/metastore-tools/tools-common/src/test/java/org/apache/hadoop/hive/metastore/tools/HMSClientTest.java @@ -194,7 +194,7 @@ public void dropNonExistingDb() { public void getAllTables() throws TException { try { client.createTable(TEST_TABLE); - assertThat(client.getAllTables(TEST_DATABASE, null), Matchers.contains(TEST_TABLE_NAME)); + assertThat(client.getAllTables(TEST_DATABASE, null, null), Matchers.contains(TEST_TABLE_NAME)); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java index bc8ac0d61b..81a24341cf 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java @@ -260,5 +260,25 @@ public RangeResponse isWriteIdRangeAborted(long minWriteId, long maxWriteId) { public ValidReaderWriteIdList updateHighWatermark(long value) { return new ValidReaderWriteIdList(tableName, exceptions, abortedBits, value, minOpenWriteId); } + + public void commitWriteId(long writeId) { + if (writeId > highWatermark) { + highWatermark = writeId; + long[] newExceptions = new long[exceptions.length + (int) (writeId - highWatermark)]; + System.arraycopy(exceptions, 0, newExceptions, 0, exceptions.length); + for (long i = highWatermark; i < writeId; i++) { + exceptions[exceptions.length + (int) (i - highWatermark)] = i; + } + exceptions = newExceptions; + } else { + int pos = Arrays.binarySearch(exceptions, writeId); + if (pos >= 0) { + long[] newExceptions = new long[exceptions.length - 1]; + System.arraycopy(exceptions, 0, newExceptions, 0, pos); + System.arraycopy(exceptions, pos + 1, newExceptions, pos, exceptions.length - pos - 1); + exceptions = newExceptions; + } + } + } } diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnList.java index d4c3b09730..c81da2b3d3 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnList.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnList.java @@ -31,6 +31,11 @@ */ public static final String VALID_TXNS_KEY = "hive.txn.valid.txns"; + /** + * Key used to store txn id for compactor in a + * {@link org.apache.hadoop.conf.Configuration} object. + */ + public static final String COMPACTOR_VALID_TXNS_ID_KEY = "hive.compactor.txn.valid.txns.id"; /** * The response to a range query. NONE means no values in this range match, * SOME mean that some do, and ALL means that every value does. diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java index cfe01feed0..fbb726a853 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java @@ -32,6 +32,12 @@ */ public static final String VALID_TABLES_WRITEIDS_KEY = "hive.txn.tables.valid.writeids"; + /** + * Key used to store valid write id list for compactor in a + * {@link org.apache.hadoop.conf.Configuration} object. + */ + public static final String COMPACTOR_VALID_TABLES_WRITEIDS_KEY = "hive.compactor.txn.tables.valid.writeids"; + // Transaction for which the list of tables valid write Ids are populated private Long txnId; diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java index b3d64021e6..dcfc0e7595 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java @@ -118,4 +118,10 @@ * @return smallest Open write Id in this set, {@code null} if there is none. */ Long getMinOpenWriteId(); + + /** + * Mark the writeId as committed + * @param writeId + */ + void commitWriteId(long writeId); } diff --git a/streaming/src/test/org/apache/hive/streaming/TestStreaming.java b/streaming/src/test/org/apache/hive/streaming/TestStreaming.java index dbff263aed..18c098be07 100644 --- a/streaming/src/test/org/apache/hive/streaming/TestStreaming.java +++ b/streaming/src/test/org/apache/hive/streaming/TestStreaming.java @@ -391,16 +391,16 @@ public void testNoBuckets() throws Exception { Assert.assertEquals("", 0, BucketCodec.determineVersion(536870912).decodeWriterId(536870912)); rs = queryTable(driver, "select ROW__ID, a, b, INPUT__FILE__NAME from default.streamingnobuckets order by ROW__ID"); - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/delta_0000001_0000001_0000/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\ta1\tb2")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); - Assert.assertTrue(rs.get(4), rs.get(4).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\ta7\tb8")); - Assert.assertTrue(rs.get(4), rs.get(4).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/delta_0000002_0000002_0000/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\ta1\tb2")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/delta_0000003_0000004/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/delta_0000003_0000004/bucket_00000")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/delta_0000003_0000004/bucket_00000")); + Assert.assertTrue(rs.get(4), rs.get(4).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":1}\ta7\tb8")); + Assert.assertTrue(rs.get(4), rs.get(4).endsWith("streamingnobuckets/delta_0000003_0000004/bucket_00000")); queryTable(driver, "update default.streamingnobuckets set a=0, b=0 where a='a7'"); queryTable(driver, "delete from default.streamingnobuckets where a='a1'"); @@ -415,14 +415,14 @@ public void testNoBuckets() throws Exception { runWorker(conf); rs = queryTable(driver, "select ROW__ID, a, b, INPUT__FILE__NAME from default.streamingnobuckets order by ROW__ID"); - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/base_0000005_v0000025/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/base_0000005_v0000025/bucket_00000")); - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/base_0000005_v0000025/bucket_00000")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t0\t0")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/base_0000005_v0000025/bucket_00000")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/base_0000006_v0000025/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/base_0000006_v0000025/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/base_0000006_v0000025/bucket_00000")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":5,\"bucketid\":536870912,\"rowid\":0}\t0\t0")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/base_0000006_v0000025/bucket_00000")); } @Test @@ -470,10 +470,10 @@ public void testCommitWithKeyValue() throws Exception { connection.close(); rs = queryTable(driver, "select ROW__ID, a, b, INPUT__FILE__NAME from default.keyvalue order by ROW__ID"); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\ta1\tb2")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("keyvalue/delta_0000002_0000003/bucket_00000")); - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("keyvalue/delta_0000002_0000003/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\ta1\tb2")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("keyvalue/delta_0000004_0000005/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("keyvalue/delta_0000004_0000005/bucket_00000")); rs = queryTable(driver, "SHOW TBLPROPERTIES default.keyvalue('_metamykey')"); Assert.assertEquals(rs.get(0), "_metamykey\tmyvalue", rs.get(0)); @@ -569,7 +569,7 @@ public void testConnectionWithWriteId() throws Exception { rs = queryTable(driver, "select ROW__ID, a, b, " + "INPUT__FILE__NAME from default.writeidconnection order by a"); Assert.assertEquals(4, rs.size()); - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\ta0\tbar")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\ta0\tbar")); Assert.assertTrue(rs.get(0), rs.get(0).endsWith("bucket_00000")); Assert.assertTrue(rs.get(1), rs.get(1).contains("\"rowid\":0}\ta1\tb2")); Assert.assertTrue(rs.get(1), rs.get(1).endsWith("bucket_00000")); @@ -615,15 +615,15 @@ public void testAllTypesDelimitedWriter() throws Exception { " INPUT__FILE__NAME from default.alltypes order by ROW__ID"); Assert.assertEquals(2, rs.size()); String gotRow1 = rs.get(0); - String expectedPrefixRow1 = "{\"writeid\":1,\"bucketid\":536870912," + + String expectedPrefixRow1 = "{\"writeid\":2,\"bucketid\":536870912," + "\"rowid\":0}\ttrue\t10\t100\t1000\t10000\t4.0\t20.0\t4.222\t1969-12-31 15:59:58.174\t1970-01-01\tstring" + "\thello\thello\t{\"k1\":\"v1\"}\t[100,200]\t{\"c1\":10,\"c2\":\"foo\"}"; - String expectedSuffixRow1 = "alltypes/delta_0000001_0000002/bucket_00000"; + String expectedSuffixRow1 = "alltypes/delta_0000002_0000003/bucket_00000"; String gotRow2 = rs.get(1); - String expectedPrefixRow2 = "{\"writeid\":1,\"bucketid\":536870912," + + String expectedPrefixRow2 = "{\"writeid\":2,\"bucketid\":536870912," + "\"rowid\":1}\tfalse\t20\t200\t2000\t20000\t8.0\t40.0\t2.222\t1970-12-31 15:59:58.174\t1971-01-01\tabcd" + "\tworld\tworld\t{\"k4\":\"v4\"}\t[200,300]\t{\"c1\":20,\"c2\":\"bar\"}"; - String expectedSuffixRow2 = "alltypes/delta_0000001_0000002/bucket_00000"; + String expectedSuffixRow2 = "alltypes/delta_0000002_0000003/bucket_00000"; Assert.assertTrue(gotRow1, gotRow1.startsWith(expectedPrefixRow1)); Assert.assertTrue(gotRow1, gotRow1.endsWith(expectedSuffixRow1)); Assert.assertTrue(gotRow2, gotRow2.startsWith(expectedPrefixRow2)); @@ -669,15 +669,15 @@ public void testAllTypesDelimitedWriterInputStream() throws Exception { " INPUT__FILE__NAME from default.alltypes order by ROW__ID"); Assert.assertEquals(2, rs.size()); String gotRow1 = rs.get(0); - String expectedPrefixRow1 = "{\"writeid\":1,\"bucketid\":536870912," + + String expectedPrefixRow1 = "{\"writeid\":2,\"bucketid\":536870912," + "\"rowid\":0}\ttrue\t10\t100\t1000\t10000\t4.0\t20.0\t4.222\t1969-12-31 15:59:58.174\t1970-01-01\tstring" + "\thello\thello\t{\"k1\":\"v1\"}\t[100,200]\t{\"c1\":10,\"c2\":\"foo\"}"; - String expectedSuffixRow1 = "alltypes/delta_0000001_0000002/bucket_00000"; + String expectedSuffixRow1 = "alltypes/delta_0000002_0000003/bucket_00000"; String gotRow2 = rs.get(1); - String expectedPrefixRow2 = "{\"writeid\":1,\"bucketid\":536870912," + + String expectedPrefixRow2 = "{\"writeid\":2,\"bucketid\":536870912," + "\"rowid\":1}\tfalse\t20\t200\t2000\t20000\t8.0\t40.0\t2.222\t1970-12-31 15:59:58.174\t1971-01-01\tabcd" + "\tworld\tworld\t{\"k4\":\"v4\"}\t[200,300]\t{\"c1\":20,\"c2\":\"bar\"}"; - String expectedSuffixRow2 = "alltypes/delta_0000001_0000002/bucket_00000"; + String expectedSuffixRow2 = "alltypes/delta_0000002_0000003/bucket_00000"; Assert.assertTrue(gotRow1, gotRow1.startsWith(expectedPrefixRow1)); Assert.assertTrue(gotRow1, gotRow1.endsWith(expectedSuffixRow1)); Assert.assertTrue(gotRow2, gotRow2.startsWith(expectedPrefixRow2)); @@ -727,25 +727,25 @@ public void testAutoRollTransactionBatch() throws Exception { Assert.assertEquals("", 0, BucketCodec.determineVersion(536870912).decodeWriterId(536870912)); rs = queryTable(driver, "select ROW__ID, a, b, INPUT__FILE__NAME from default.streamingnobuckets order by ROW__ID"); - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/delta_0000001_0000001_0000/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\ta1\tb2")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); - Assert.assertTrue(rs.get(4), rs.get(4).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\ta7\tb8")); - Assert.assertTrue(rs.get(4), rs.get(4).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); - - Assert.assertTrue(rs.get(5), rs.get(5).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\ta9\tb10")); - Assert.assertTrue(rs.get(5), rs.get(5).endsWith("streamingnobuckets/delta_0000004_0000005/bucket_00000")); - Assert.assertTrue(rs.get(6), rs.get(6).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":1}\ta11\tb12")); - Assert.assertTrue(rs.get(6), rs.get(6).endsWith("streamingnobuckets/delta_0000004_0000005/bucket_00000")); - Assert.assertTrue(rs.get(7), rs.get(7).startsWith("{\"writeid\":5,\"bucketid\":536870912,\"rowid\":0}\ta13\tb14")); - Assert.assertTrue(rs.get(7), rs.get(7).endsWith("streamingnobuckets/delta_0000004_0000005/bucket_00000")); - Assert.assertTrue(rs.get(8), rs.get(8).startsWith("{\"writeid\":5,\"bucketid\":536870912,\"rowid\":1}\ta15\tb16")); - Assert.assertTrue(rs.get(8), rs.get(8).endsWith("streamingnobuckets/delta_0000004_0000005/bucket_00000")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/delta_0000002_0000002_0000/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\ta1\tb2")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/delta_0000003_0000004/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/delta_0000003_0000004/bucket_00000")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/delta_0000003_0000004/bucket_00000")); + Assert.assertTrue(rs.get(4), rs.get(4).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":1}\ta7\tb8")); + Assert.assertTrue(rs.get(4), rs.get(4).endsWith("streamingnobuckets/delta_0000003_0000004/bucket_00000")); + + Assert.assertTrue(rs.get(5), rs.get(5).startsWith("{\"writeid\":5,\"bucketid\":536870912,\"rowid\":0}\ta9\tb10")); + Assert.assertTrue(rs.get(5), rs.get(5).endsWith("streamingnobuckets/delta_0000005_0000006/bucket_00000")); + Assert.assertTrue(rs.get(6), rs.get(6).startsWith("{\"writeid\":5,\"bucketid\":536870912,\"rowid\":1}\ta11\tb12")); + Assert.assertTrue(rs.get(6), rs.get(6).endsWith("streamingnobuckets/delta_0000005_0000006/bucket_00000")); + Assert.assertTrue(rs.get(7), rs.get(7).startsWith("{\"writeid\":6,\"bucketid\":536870912,\"rowid\":0}\ta13\tb14")); + Assert.assertTrue(rs.get(7), rs.get(7).endsWith("streamingnobuckets/delta_0000005_0000006/bucket_00000")); + Assert.assertTrue(rs.get(8), rs.get(8).startsWith("{\"writeid\":6,\"bucketid\":536870912,\"rowid\":1}\ta15\tb16")); + Assert.assertTrue(rs.get(8), rs.get(8).endsWith("streamingnobuckets/delta_0000005_0000006/bucket_00000")); queryTable(driver, "update default.streamingnobuckets set a=0, b=0 where a='a7'"); queryTable(driver, "delete from default.streamingnobuckets where a='a1'"); @@ -765,18 +765,18 @@ public void testAutoRollTransactionBatch() throws Exception { runWorker(conf); rs = queryTable(driver, "select ROW__ID, a, b, INPUT__FILE__NAME from default.streamingnobuckets order by ROW__ID"); - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/base_0000009_v0000029/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/base_0000009_v0000029/bucket_00000")); - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/base_0000009_v0000029/bucket_00000")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":1}\ta11\tb12")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/base_0000009_v0000029/bucket_00000")); - Assert.assertTrue(rs.get(4), rs.get(4).startsWith("{\"writeid\":5,\"bucketid\":536870912,\"rowid\":0}\ta13\tb14")); - Assert.assertTrue(rs.get(4), rs.get(4).endsWith("streamingnobuckets/base_0000009_v0000029/bucket_00000")); - Assert.assertTrue(rs.get(5), rs.get(5).startsWith("{\"writeid\":6,\"bucketid\":536870912,\"rowid\":0}\t0\t0")); - Assert.assertTrue(rs.get(5), rs.get(5).endsWith("streamingnobuckets/base_0000009_v0000029/bucket_00000")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/base_0000010_v0000029/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/base_0000010_v0000029/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/base_0000010_v0000029/bucket_00000")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":5,\"bucketid\":536870912,\"rowid\":1}\ta11\tb12")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/base_0000010_v0000029/bucket_00000")); + Assert.assertTrue(rs.get(4), rs.get(4).startsWith("{\"writeid\":6,\"bucketid\":536870912,\"rowid\":0}\ta13\tb14")); + Assert.assertTrue(rs.get(4), rs.get(4).endsWith("streamingnobuckets/base_0000010_v0000029/bucket_00000")); + Assert.assertTrue(rs.get(5), rs.get(5).startsWith("{\"writeid\":7,\"bucketid\":536870912,\"rowid\":0}\t0\t0")); + Assert.assertTrue(rs.get(5), rs.get(5).endsWith("streamingnobuckets/base_0000010_v0000029/bucket_00000")); } /** @@ -1475,7 +1475,7 @@ public void testTransactionBatchCommitDelimited() throws Exception { connection.write("1,Hello streaming".getBytes()); connection.commitTransaction(); - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}"); Assert.assertEquals(HiveStreamingConnection.TxnState.COMMITTED, connection.getCurrentTransactionState()); @@ -1487,11 +1487,11 @@ public void testTransactionBatchCommitDelimited() throws Exception { connection.write("2,Welcome to streaming".getBytes()); // data should not be visible - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}"); connection.commitTransaction(); - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}", + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}", "{2, Welcome to streaming}"); connection.close(); @@ -1546,7 +1546,7 @@ public void testTransactionBatchCommitRegex() throws Exception { connection.write("1,Hello streaming".getBytes()); connection.commitTransaction(); - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}"); Assert.assertEquals(HiveStreamingConnection.TxnState.COMMITTED, connection.getCurrentTransactionState()); @@ -1558,11 +1558,11 @@ public void testTransactionBatchCommitRegex() throws Exception { connection.write("2,Welcome to streaming".getBytes()); // data should not be visible - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}"); connection.commitTransaction(); - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}", + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}", "{2, Welcome to streaming}"); connection.close(); @@ -1648,7 +1648,7 @@ public void testTransactionBatchCommitJson() throws Exception { connection.write(rec1.getBytes()); connection.commitTransaction(); - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}"); Assert.assertEquals(HiveStreamingConnection.TxnState.COMMITTED, connection.getCurrentTransactionState()); @@ -1829,7 +1829,7 @@ public void testTransactionBatchAbortAndCommit() throws Exception { connection.write("2,Welcome to streaming".getBytes()); connection.commitTransaction(); - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}", + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}", "{2, Welcome to streaming}"); connection.close(); @@ -1855,13 +1855,13 @@ public void testMultipleTransactionBatchCommits() throws Exception { connection.write("1,Hello streaming".getBytes()); connection.commitTransaction(); String validationQuery = "select id, msg from " + dbName + "." + tblName + " order by id, msg"; - checkDataWritten2(partLoc, 1, 10, 1, validationQuery, false, "1\tHello streaming"); + checkDataWritten2(partLoc, 3, 12, 1, validationQuery, false, "1\tHello streaming"); connection.beginTransaction(); connection.write("2,Welcome to streaming".getBytes()); connection.commitTransaction(); - checkDataWritten2(partLoc, 1, 10, 1, validationQuery, true, "1\tHello streaming", + checkDataWritten2(partLoc, 3, 12, 1, validationQuery, true, "1\tHello streaming", "2\tWelcome to streaming"); connection.close(); @@ -1880,14 +1880,14 @@ public void testMultipleTransactionBatchCommits() throws Exception { connection.write("3,Hello streaming - once again".getBytes()); connection.commitTransaction(); - checkDataWritten2(partLoc, 1, 20, 2, validationQuery, false, "1\tHello streaming", + checkDataWritten2(partLoc, 3, 22, 2, validationQuery, false, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again"); connection.beginTransaction(); connection.write("4,Welcome to streaming - once again".getBytes()); connection.commitTransaction(); - checkDataWritten2(partLoc, 1, 20, 2, validationQuery, true, "1\tHello streaming", + checkDataWritten2(partLoc, 3, 22, 2, validationQuery, true, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again", "4\tWelcome to streaming - once again"); @@ -1940,7 +1940,7 @@ public void testInterleavedTransactionBatchCommits() throws Exception { connection2.commitTransaction(); String validationQuery = "select id, msg from " + dbName + "." + tblName + " order by id, msg"; - checkDataWritten2(partLoc, 11, 20, 1, + checkDataWritten2(partLoc, 13, 22, 1, validationQuery, true, "3\tHello streaming - once again"); connection.commitTransaction(); @@ -1960,7 +1960,7 @@ public void testInterleavedTransactionBatchCommits() throws Exception { Assert.assertTrue("", logicalLength == actualLength); } } - checkDataWritten2(partLoc, 1, 20, 2, + checkDataWritten2(partLoc, 3, 22, 2, validationQuery, false, "1\tHello streaming", "3\tHello streaming - once again"); connection.beginTransaction(); @@ -1985,19 +1985,19 @@ public void testInterleavedTransactionBatchCommits() throws Exception { Assert.assertTrue("", logicalLength <= actualLength); } } - checkDataWritten2(partLoc, 1, 20, 2, + checkDataWritten2(partLoc, 3, 22, 2, validationQuery, true, "1\tHello streaming", "3\tHello streaming - once again"); connection.commitTransaction(); - checkDataWritten2(partLoc, 1, 20, 2, + checkDataWritten2(partLoc, 3, 22, 2, validationQuery, false, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again"); connection2.commitTransaction(); - checkDataWritten2(partLoc, 1, 20, 2, + checkDataWritten2(partLoc, 3, 22, 2, validationQuery, true, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again",