diff --git a/beeline/pom.xml b/beeline/pom.xml index 19ec53eba6..0bf065d802 100644 --- a/beeline/pom.xml +++ b/beeline/pom.xml @@ -105,6 +105,12 @@ tests test + + org.apache.hive.hcatalog + hive-hcatalog-server-extensions + ${project.version} + test + org.apache.hive hive-service diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java index 5f9d809ab2..6959febf42 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java @@ -333,7 +333,7 @@ public PartitionFiles next() { Partition p = partitionIter.next(); Iterator fileIterator; //For transactional tables, the actual file copy will be done by acid write event during replay of commit txn. - if (!TxnUtils.isTransactionalTable(t)) { + if (!TxnUtils.isTransactionalTable(t) && p.getSd() != null) { List files = Lists.newArrayList(new FileIterator(p.getSd().getLocation())); fileIterator = files.iterator(); } else { @@ -760,7 +760,8 @@ public void onUpdateTableColumnStat(UpdateTableColumnStatEvent updateTableColumn .buildUpdateTableColumnStatMessage(updateTableColumnStatEvent.getColStats(), updateTableColumnStatEvent.getTableObj(), updateTableColumnStatEvent.getTableParameters(), - updateTableColumnStatEvent.getWriteId()); + updateTableColumnStatEvent.getWriteId(), + updateTableColumnStatEvent.getWriteIds()); NotificationEvent event = new NotificationEvent(0, now(), EventType.UPDATE_TABLE_COLUMN_STAT.toString(), msgEncoder.getSerializer().serialize(msg)); ColumnStatisticsDesc statDesc = updateTableColumnStatEvent.getColStats().getStatsDesc(); @@ -790,7 +791,8 @@ public void onUpdatePartitionColumnStat(UpdatePartitionColumnStatEvent updatePar updatePartColStatEvent.getPartVals(), updatePartColStatEvent.getPartParameters(), updatePartColStatEvent.getTableObj(), - updatePartColStatEvent.getWriteId()); + updatePartColStatEvent.getWriteId(), + updatePartColStatEvent.getWriteIds()); NotificationEvent event = new NotificationEvent(0, now(), EventType.UPDATE_PARTITION_COLUMN_STAT.toString(), msgEncoder.getSerializer().serialize(msg)); ColumnStatisticsDesc statDesc = updatePartColStatEvent.getPartColStats().getStatsDesc(); diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java index efafe0c641..afa17613fa 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java @@ -208,7 +208,7 @@ public void onCreateTable(CreateTableEvent tableEvent) throws MetaException { Configuration conf = handler.getConf(); Table newTbl; try { - newTbl = handler.get_table_core(tbl.getCatName(), tbl.getDbName(), tbl.getTableName()) + newTbl = handler.get_table_core(tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), null) .deepCopy(); newTbl.getParameters().put( HCatConstants.HCAT_MSGBUS_TOPIC_NAME, diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java index 4dc04f46fd..6f12eb84c4 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java @@ -391,16 +391,16 @@ public void testNoBuckets() throws Exception { Assert.assertEquals("", 0, BucketCodec.determineVersion(536870912).decodeWriterId(536870912)); rs = queryTable(driver,"select ROW__ID, a, b, INPUT__FILE__NAME from default.streamingnobuckets order by ROW__ID"); - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/delta_0000001_0000001_0000/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\ta1\tb2")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); - Assert.assertTrue(rs.get(4), rs.get(4).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\ta7\tb8")); - Assert.assertTrue(rs.get(4), rs.get(4).endsWith("streamingnobuckets/delta_0000002_0000003/bucket_00000")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/delta_0000002_0000002_0000/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\ta1\tb2")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/delta_0000003_0000004/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/delta_0000003_0000004/bucket_00000")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/delta_0000003_0000004/bucket_00000")); + Assert.assertTrue(rs.get(4), rs.get(4).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":1}\ta7\tb8")); + Assert.assertTrue(rs.get(4), rs.get(4).endsWith("streamingnobuckets/delta_0000003_0000004/bucket_00000")); queryTable(driver, "update default.streamingnobuckets set a=0, b=0 where a='a7'"); queryTable(driver, "delete from default.streamingnobuckets where a='a1'"); @@ -415,14 +415,14 @@ public void testNoBuckets() throws Exception { runWorker(conf); rs = queryTable(driver,"select ROW__ID, a, b, INPUT__FILE__NAME from default.streamingnobuckets order by ROW__ID"); - Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); - Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/base_0000005_v0000025/bucket_00000")); - Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); - Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/base_0000005_v0000025/bucket_00000")); - Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); - Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/base_0000005_v0000025/bucket_00000")); - Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\t0\t0")); - Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/base_0000005_v0000025/bucket_00000")); + Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\tfoo\tbar")); + Assert.assertTrue(rs.get(0), rs.get(0).endsWith("streamingnobuckets/base_0000006_v0000025/bucket_00000")); + Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":3,\"bucketid\":536870912,\"rowid\":1}\ta3\tb4")); + Assert.assertTrue(rs.get(1), rs.get(1).endsWith("streamingnobuckets/base_0000006_v0000025/bucket_00000")); + Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":4,\"bucketid\":536870912,\"rowid\":0}\ta5\tb6")); + Assert.assertTrue(rs.get(2), rs.get(2).endsWith("streamingnobuckets/base_0000006_v0000025/bucket_00000")); + Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":5,\"bucketid\":536870912,\"rowid\":0}\t0\t0")); + Assert.assertTrue(rs.get(3), rs.get(3).endsWith("streamingnobuckets/base_0000006_v0000025/bucket_00000")); } /** @@ -906,7 +906,7 @@ private void testTransactionBatchCommit_Delimited(UserGroupInformation ugi) thro txnBatch.write("1,Hello streaming".getBytes()); txnBatch.commit(); - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}"); Assert.assertEquals(TransactionBatch.TxnState.COMMITTED , txnBatch.getCurrentTransactionState()); @@ -918,11 +918,11 @@ private void testTransactionBatchCommit_Delimited(UserGroupInformation ugi) thro txnBatch.write("2,Welcome to streaming".getBytes()); // data should not be visible - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}"); txnBatch.commit(); - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}", + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}", "{2, Welcome to streaming}"); txnBatch.close(); @@ -974,7 +974,7 @@ private void testTransactionBatchCommit_Regex(UserGroupInformation ugi) throws E txnBatch.write("1,Hello streaming".getBytes()); txnBatch.commit(); - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}"); Assert.assertEquals(TransactionBatch.TxnState.COMMITTED , txnBatch.getCurrentTransactionState()); @@ -986,11 +986,11 @@ private void testTransactionBatchCommit_Regex(UserGroupInformation ugi) throws E txnBatch.write("2,Welcome to streaming".getBytes()); // data should not be visible - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}"); txnBatch.commit(); - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}", + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}", "{2, Welcome to streaming}"); txnBatch.close(); @@ -1036,7 +1036,7 @@ public void testTransactionBatchCommit_Json() throws Exception { txnBatch.write(rec1.getBytes()); txnBatch.commit(); - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}"); + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}"); Assert.assertEquals(TransactionBatch.TxnState.COMMITTED , txnBatch.getCurrentTransactionState()); @@ -1163,7 +1163,7 @@ public void testTransactionBatchAbortAndCommit() throws Exception { txnBatch.write("2,Welcome to streaming".getBytes()); txnBatch.commit(); - checkDataWritten(partLoc, 1, 10, 1, 1, "{1, Hello streaming}", + checkDataWritten(partLoc, 3, 12, 1, 1, "{1, Hello streaming}", "{2, Welcome to streaming}"); txnBatch.close(); @@ -1182,13 +1182,13 @@ public void testMultipleTransactionBatchCommits() throws Exception { txnBatch.write("1,Hello streaming".getBytes()); txnBatch.commit(); String validationQuery = "select id, msg from " + dbName + "." + tblName + " order by id, msg"; - checkDataWritten2(partLoc, 1, 10, 1, validationQuery, false, "1\tHello streaming"); + checkDataWritten2(partLoc, 3, 12, 1, validationQuery, false, "1\tHello streaming"); txnBatch.beginNextTransaction(); txnBatch.write("2,Welcome to streaming".getBytes()); txnBatch.commit(); - checkDataWritten2(partLoc, 1, 10, 1, validationQuery, true, "1\tHello streaming", + checkDataWritten2(partLoc, 3, 12, 1, validationQuery, true, "1\tHello streaming", "2\tWelcome to streaming"); txnBatch.close(); @@ -1199,14 +1199,14 @@ public void testMultipleTransactionBatchCommits() throws Exception { txnBatch.write("3,Hello streaming - once again".getBytes()); txnBatch.commit(); - checkDataWritten2(partLoc, 1, 20, 2, validationQuery, false, "1\tHello streaming", + checkDataWritten2(partLoc, 3, 22, 2, validationQuery, false, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again"); txnBatch.beginNextTransaction(); txnBatch.write("4,Welcome to streaming - once again".getBytes()); txnBatch.commit(); - checkDataWritten2(partLoc, 1, 20, 2, validationQuery, true, "1\tHello streaming", + checkDataWritten2(partLoc, 3, 22, 2, validationQuery, true, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again", "4\tWelcome to streaming - once again"); @@ -1243,7 +1243,7 @@ public void testInterleavedTransactionBatchCommits() throws Exception { txnBatch2.commit(); String validationQuery = "select id, msg from " + dbName + "." + tblName + " order by id, msg"; - checkDataWritten2(partLoc, 11, 20, 1, + checkDataWritten2(partLoc, 13, 22, 1, validationQuery, true, "3\tHello streaming - once again"); txnBatch1.commit(); @@ -1263,7 +1263,7 @@ public void testInterleavedTransactionBatchCommits() throws Exception { Assert.assertTrue("", logicalLength == actualLength); } } - checkDataWritten2(partLoc, 1, 20, 2, + checkDataWritten2(partLoc, 3, 22, 2, validationQuery, false,"1\tHello streaming", "3\tHello streaming - once again"); txnBatch1.beginNextTransaction(); @@ -1288,19 +1288,19 @@ public void testInterleavedTransactionBatchCommits() throws Exception { Assert.assertTrue("", logicalLength <= actualLength); } } - checkDataWritten2(partLoc, 1, 20, 2, + checkDataWritten2(partLoc, 3, 22, 2, validationQuery, true,"1\tHello streaming", "3\tHello streaming - once again"); txnBatch1.commit(); - checkDataWritten2(partLoc, 1, 20, 2, + checkDataWritten2(partLoc, 3, 22, 2, validationQuery, false, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again"); txnBatch2.commit(); - checkDataWritten2(partLoc, 1, 20, 2, + checkDataWritten2(partLoc, 3, 22, 2, validationQuery, true, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again", diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 0212e076cd..0e1df69656 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -263,11 +263,6 @@ public boolean dropTable(String catName, String dbName, String tableName) } } - @Override - public Table getTable(String catName, String dbName, String tableName) throws MetaException { - return objectStore.getTable(catName, dbName, tableName); - } - @Override public Table getTable(String catName, String dbName, String tableName, String writeIdList) throws MetaException { @@ -280,12 +275,6 @@ public boolean addPartition(Partition part) return objectStore.addPartition(part); } - @Override - public Partition getPartition(String catName, String dbName, String tableName, List partVals) - throws MetaException, NoSuchObjectException { - return objectStore.getPartition(catName, dbName, tableName, partVals); - } - @Override public Partition getPartition(String catName, String dbName, String tableName, List partVals, String writeIdList) @@ -305,15 +294,15 @@ public boolean dropPartition(String catName, String dbName, String tableName, Li } @Override - public List getPartitions(String catName, String dbName, String tableName, int max) + public List getPartitions(String catName, String dbName, String tableName, int max, String writeIdList) throws MetaException, NoSuchObjectException { - return objectStore.getPartitions(catName, dbName, tableName, max); + return objectStore.getPartitions(catName, dbName, tableName, max, writeIdList); } @Override public Map getPartitionLocations(String catName, String dbName, String tblName, - String baseLocationToNotShow, int max) { - return objectStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max); + String baseLocationToNotShow, int max, String writeIdList) { + return objectStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max, writeIdList); } @Override @@ -378,9 +367,9 @@ public Table alterTable(String catName, String dbName, String name, Table newTab } @Override - public List listPartitionNames(String catName, String dbName, String tblName, short maxParts) + public List listPartitionNames(String catName, String dbName, String tblName, short maxParts, String writeIdList) throws MetaException { - return objectStore.listPartitionNames(catName, dbName, tblName, maxParts); + return objectStore.listPartitionNames(catName, dbName, tblName, maxParts, writeIdList); } @Override @@ -388,7 +377,7 @@ public PartitionValuesResponse listPartitionValues(String catName, String db_nam String tbl_name, List cols, boolean applyDistinct, String filter, boolean ascending, List order, - long maxParts) throws MetaException { + long maxParts, String writeIdList) throws MetaException { return null; } @@ -416,42 +405,43 @@ public Partition alterPartition(String catName, String dbName, String tblName, L @Override public List getPartitionsByFilter(String catName, String dbName, String tblName, - String filter, short maxParts) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts); + String filter, short maxParts, String writeIdList) throws MetaException, NoSuchObjectException { + return objectStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts, writeIdList); } @Override public List getPartitionSpecsByFilterAndProjection(Table table, - GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec) + GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec, String writeIdList) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionSpecsByFilterAndProjection(table, projectionSpec, filterSpec); + return objectStore.getPartitionSpecsByFilterAndProjection(table, projectionSpec, filterSpec, writeIdList); } @Override public int getNumPartitionsByFilter(String catName, String dbName, String tblName, - String filter) throws MetaException, NoSuchObjectException { - return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter); + String filter, String writeIdList) throws MetaException, NoSuchObjectException { + return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter, writeIdList); } @Override public int getNumPartitionsByExpr(String catName, String dbName, String tblName, - byte[] expr) throws MetaException, NoSuchObjectException { - return objectStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); + byte[] expr, String writeIdList) throws MetaException, NoSuchObjectException { + return objectStore.getNumPartitionsByExpr(catName, dbName, tblName, expr, writeIdList); } @Override public List getPartitionsByNames(String catName, String dbName, String tblName, - List partNames) + List partNames, String writeIdList) throws MetaException, NoSuchObjectException { return objectStore.getPartitionsByNames( - catName, dbName, tblName, partNames); + catName, dbName, tblName, partNames, writeIdList); } @Override public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, - String defaultPartitionName, short maxParts, List result) throws TException { + String defaultPartitionName, short maxParts, List result, + String writeIdList) throws TException { return objectStore.getPartitionsByExpr(catName, - dbName, tblName, expr, defaultPartitionName, maxParts, result); + dbName, tblName, expr, defaultPartitionName, maxParts, result, writeIdList); } @Override @@ -622,34 +612,36 @@ public Role getRole(String roleName) throws NoSuchObjectException { @Override public Partition getPartitionWithAuth(String catName, String dbName, String tblName, - List partVals, String userName, List groupNames) + List partVals, String userName, + List groupNames, String writeIdList) throws MetaException, NoSuchObjectException, InvalidObjectException { return objectStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, - groupNames); + groupNames, writeIdList); } @Override public List getPartitionsWithAuth(String catName, String dbName, String tblName, - short maxParts, String userName, List groupNames) + short maxParts, String userName, + List groupNames, String writeIdList) throws MetaException, NoSuchObjectException, InvalidObjectException { return objectStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, - groupNames); + groupNames, writeIdList); } @Override public List listPartitionNamesPs(String catName, String dbName, String tblName, - List partVals, short maxParts) + List partVals, short maxParts, String writeIdList) throws MetaException, NoSuchObjectException { - return objectStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts); + return objectStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts, writeIdList); } @Override public List listPartitionsPsWithAuth(String catName, String dbName, String tblName, List partVals, short maxParts, String userName, - List groupNames) + List groupNames, String writeIdList) throws MetaException, InvalidObjectException, NoSuchObjectException { return objectStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, - userName, groupNames); + userName, groupNames, writeIdList); } @Override @@ -720,12 +712,6 @@ public long cleanupEvents() { return objectStore.listTableColumnGrantsAll(catName, dbName, tableName, columnName); } - @Override - public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, - List colNames) throws MetaException, NoSuchObjectException { - return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames); - } - @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, List colNames, @@ -817,14 +803,6 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro } - @Override - public List getPartitionColumnStatistics(String catName, String dbName, - String tblName, List colNames, - List partNames) - throws MetaException, NoSuchObjectException { - return objectStore.getPartitionColumnStatistics(catName, dbName, tblName , colNames, partNames); - } - @Override public List getPartitionColumnStatistics(String catName, String dbName, String tblName, List colNames, @@ -837,9 +815,9 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro @Override public boolean doesPartitionExist(String catName, String dbName, String tableName, - List partKeys, List partVals) + List partKeys, List partVals, String writeIdList) throws MetaException, NoSuchObjectException { - return objectStore.doesPartitionExist(catName, dbName, tableName, partKeys, partVals); + return objectStore.doesPartitionExist(catName, dbName, tableName, partKeys, partVals, writeIdList); } @Override @@ -905,13 +883,6 @@ public Function getFunction(String catName, String dbName, String funcName) return objectStore.getFunctions(catName, dbName, pattern); } - @Override - public AggrStats get_aggr_stats_for(String catName, String dbName, - String tblName, List partNames, List colNames) - throws MetaException { - return null; - } - @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames, @@ -1324,5 +1295,4 @@ public int deleteRuntimeStats(int maxRetainSecs) throws MetaException { NoSuchObjectException { return null; } - } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java index 285f30b008..74fc40232d 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java @@ -10,6 +10,7 @@ import org.apache.hadoop.hive.metastore.*; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.cache.CachedStore.MergedColumnStatsForPartitions; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -44,7 +45,6 @@ public void setUp() throws Exception { MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); MetastoreConf.setVar(conf, ConfVars.TRANSACTIONAL_EVENT_LISTENERS, DbNotificationListener.class.getName()); MetastoreConf.setVar(conf, ConfVars.RAW_STORE_IMPL, "org.apache.hadoop.hive.metastore.cache.CachedStore"); - MetastoreConf.setBoolVar(conf, ConfVars.METASTORE_CACHE_CAN_USE_EVENT, true); MetastoreConf.setBoolVar(conf, ConfVars.HIVE_TXN_STATS_ENABLED, true); MetastoreConf.setBoolVar(conf, ConfVars.AGGREGATE_STATS_CACHE_ENABLED, false); MetaStoreTestUtils.setConfForStandloneMode(conf); @@ -120,84 +120,6 @@ private void comparePartitions(Partition part1, Partition part2) { Assert.assertEquals(part1.getLastAccessTime(), part2.getLastAccessTime()); } - @Test - public void testDatabaseOpsForUpdateUsingEvents() throws Exception { - RawStore rawStore = hmsHandler.getMS(); - - // Prewarm CachedStore - CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(rawStore); - - // Add a db via rawStore - String dbName = "testDatabaseOps"; - String dbOwner = "user1"; - Database db = createTestDb(dbName, dbOwner); - - hmsHandler.create_database(db); - db = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); - - // Read database via CachedStore - Database dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName); - Assert.assertEquals(db, dbRead); - - // Add another db via rawStore - final String dbName1 = "testDatabaseOps1"; - Database db1 = createTestDb(dbName1, dbOwner); - hmsHandler.create_database(db1); - db1 = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1); - - // Read database via CachedStore - dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName1); - Assert.assertEquals(db1, dbRead); - - // Alter the db via rawStore (can only alter owner or parameters) - dbOwner = "user2"; - Database newdb = new Database(db); - newdb.setOwnerName(dbOwner); - hmsHandler.alter_database(dbName, newdb); - newdb = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); - - // Read db via cachedStore - dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName); - Assert.assertEquals(newdb, dbRead); - - // Add another db via rawStore - final String dbName2 = "testDatabaseOps2"; - Database db2 = createTestDb(dbName2, dbOwner); - hmsHandler.create_database(db2); - db2 = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName2); - - // Alter db "testDatabaseOps" via rawStore - dbOwner = "user1"; - newdb = new Database(db); - newdb.setOwnerName(dbOwner); - hmsHandler.alter_database(dbName, newdb); - newdb = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); - - // Drop db "testDatabaseOps1" via rawStore - Database dropDb = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1); - hmsHandler.drop_database(dbName1, true, true); - - // Read the newly added db via CachedStore - dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName2); - Assert.assertEquals(db2, dbRead); - - // Read the altered db via CachedStore (altered user from "user2" to "user1") - dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName); - Assert.assertEquals(newdb, dbRead); - - // Try to read the dropped db after cache update - dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName1); - Assert.assertEquals(null, dbRead); - - // Clean up - hmsHandler.drop_database(dbName, true, true); - hmsHandler.drop_database(dbName2, true, true); - sharedCache.getDatabaseCache().clear(); - sharedCache.clearTableCache(); - sharedCache.getSdCache().clear(); - } - @Test public void testTableOpsForUpdateUsingEvents() throws Exception { long lastEventId = -1; @@ -205,7 +127,7 @@ public void testTableOpsForUpdateUsingEvents() throws Exception { // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(rawStore); + CachedStore.prewarm(rawStore, conf); // Add a db via rawStore String dbName = "test_table_ops"; @@ -225,19 +147,17 @@ public void testTableOpsForUpdateUsingEvents() throws Exception { List ptnCols = new ArrayList(); Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols); hmsHandler.create_table(tbl); - tbl = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); + tbl = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName, null); - // Read database, table via CachedStore - Database dbRead= sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName); - Assert.assertEquals(db, dbRead); - Table tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName); + // Read table via CachedStore + Table tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, null); compareTables(tblRead, tbl); // Add a new table via rawStore String tblName2 = "tbl2"; Table tbl2 = createTestTbl(dbName, tblName2, tblOwner, cols, ptnCols); hmsHandler.create_table(tbl2); - tbl2 = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName2); + tbl2 = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName2, null); // Alter table "tbl" via rawStore tblOwner = "role1"; @@ -245,7 +165,7 @@ public void testTableOpsForUpdateUsingEvents() throws Exception { newTable.setOwner(tblOwner); newTable.setOwnerType(PrincipalType.ROLE); hmsHandler.alter_table(dbName, tblName, newTable); - newTable = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); + newTable = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName, null); Assert.assertEquals("Owner of the table did not change.", tblOwner, newTable.getOwner()); Assert.assertEquals("Owner type of the table did not change", PrincipalType.ROLE, newTable.getOwnerType()); @@ -254,23 +174,22 @@ public void testTableOpsForUpdateUsingEvents() throws Exception { hmsHandler.drop_table(dbName, tblName2, true); // Read the altered "tbl" via CachedStore - tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName); + tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, null); compareTables(tblRead, newTable); // Try to read the dropped "tbl2" via CachedStore (should throw exception) - tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName2); + tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName2, null); Assert.assertNull(tblRead); // Clean up hmsHandler.drop_database(dbName, true, true); - tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName2); + tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName2, null); Assert.assertNull(tblRead); - tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName); + tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, null); Assert.assertNull(tblRead); - sharedCache.getDatabaseCache().clear(); sharedCache.clearTableCache(); sharedCache.getSdCache().clear(); } @@ -282,7 +201,7 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception { // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(rawStore); + CachedStore.prewarm(rawStore, conf); // Add a db via rawStore String dbName = "test_partition_ops"; @@ -304,7 +223,7 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception { ptnCols.add(ptnCol1); Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols); hmsHandler.create_table(tbl); - tbl = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); + tbl = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName, null); final String ptnColVal1 = "aaa"; Map partParams = new HashMap(); @@ -313,7 +232,7 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception { 0, tbl.getSd(), partParams); ptn1.setCatName(DEFAULT_CATALOG_NAME); hmsHandler.add_partition(ptn1); - ptn1 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1)); + ptn1 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1), null); final String ptnColVal2 = "bbb"; Partition ptn2 = @@ -321,13 +240,10 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception { 0, tbl.getSd(), partParams); ptn2.setCatName(DEFAULT_CATALOG_NAME); hmsHandler.add_partition(ptn2); - ptn2 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2)); + ptn2 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2), null); - // Read database, table, partition via CachedStore - Database dbRead = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME.toLowerCase(), dbName.toLowerCase()); - Assert.assertEquals(db, dbRead); Table tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME.toLowerCase(), - dbName.toLowerCase(), tblName.toLowerCase()); + dbName.toLowerCase(), tblName.toLowerCase(), null); compareTables(tbl, tblRead); Partition ptn1Read = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME.toLowerCase(), dbName.toLowerCase(), tblName.toLowerCase(), Arrays.asList(ptnColVal1)); @@ -343,20 +259,20 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception { 0, tbl.getSd(), partParams); ptn3.setCatName(DEFAULT_CATALOG_NAME); hmsHandler.add_partition(ptn3); - ptn3 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3)); + ptn3 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3), null); // Alter an existing partition ("aaa") via rawStore - ptn1 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1)); + ptn1 = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1), null); final String ptnColVal1Alt = "aaa"; Partition ptn1Atl = new Partition(Arrays.asList(ptnColVal1Alt), dbName, tblName, 0, 0, tbl.getSd(), partParams); ptn1Atl.setCatName(DEFAULT_CATALOG_NAME); hmsHandler.alter_partitions(dbName, tblName, Arrays.asList(ptn1Atl)); - ptn1Atl = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt)); + ptn1Atl = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt), null); // Drop an existing partition ("bbb") via rawStore - Partition ptnDrop = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2)); + Partition ptnDrop = rawStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2), null); hmsHandler.drop_partition(dbName, tblName, Arrays.asList(ptnColVal2), false); // Read the newly added partition via CachedStore @@ -382,13 +298,12 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception { // Clean up rawStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName); - sharedCache.getDatabaseCache().clear(); sharedCache.clearTableCache(); sharedCache.getSdCache().clear(); } - private void updateTableColStats(String dbName, String tblName, String[] colName, - double highValue, double avgColLen, boolean isTxnTable) throws Throwable { + private long updateTableColStats(String dbName, String tblName, String[] colName, + double highValue, double avgColLen, boolean isTxnTable, long lastEventId) throws Throwable { long writeId = -1; String validWriteIds = null; if (isTxnTable) { @@ -412,6 +327,7 @@ private void updateTableColStats(String dbName, String tblName, String[] colName // write stats objs persistently hmsHandler.update_table_column_statistics_req(setTblColStat); + lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, null); validateTablePara(dbName, tblName); ColumnStatistics colStatsCache = sharedCache.getTableColStatsFromCache(DEFAULT_CATALOG_NAME, @@ -423,10 +339,11 @@ private void updateTableColStats(String dbName, String tblName, String[] colName dbName, tblName, Lists.newArrayList(colName[1]), validWriteIds, true); Assert.assertEquals(colStatsCache.getStatsObj().get(0).getColName(), colName[1]); verifyStatString(colStatsCache.getStatsObj().get(0), colName[1], avgColLen); + return lastEventId; } - private void updatePartColStats(String dbName, String tblName, boolean isTxnTable, String[] colName, - String partName, double highValue, double avgColLen) throws Throwable { + private long updatePartColStats(String dbName, String tblName, boolean isTxnTable, String[] colName, + String partName, double highValue, double avgColLen, long lastEventId) throws Throwable { long writeId = -1; String validWriteIds = null; List txnIds = null; @@ -471,7 +388,7 @@ private void updatePartColStats(String dbName, String tblName, boolean isTxnTabl } else { Assert.assertEquals(statRowStore.get(0).isIsStatsCompliant(), false); } - + lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf); List statSharedCache = sharedCache.getPartitionColStatsListFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, Collections.singletonList(partName), Collections.singletonList(colName[1]), validWriteIds, true); @@ -489,6 +406,8 @@ private void updatePartColStats(String dbName, String tblName, boolean isTxnTabl statPartCache = sharedCache.getPartitionColStatsFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, CachedStore.partNameToVals(partName), colName[1], validWriteIds); verifyStatString(statPartCache.getColumnStatisticsObj(), colName[1], avgColLen); + + return lastEventId; } private List getStatsObjects(String dbName, String tblName, String[] colName, @@ -572,7 +491,7 @@ private void setUpBeforeTest(String dbName, String tblName, String[] colName, bo // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(rawStore); + CachedStore.prewarm(rawStore, conf); // Add a db via rawStore Database db = createTestDb(dbName, dbOwner); @@ -670,8 +589,8 @@ private String getValidWriteIds(String dbName, String tblName) throws Throwable } private void validateTablePara(String dbName, String tblName) throws Throwable { - Table tblRead = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); - Table tblRead1 = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName); + Table tblRead = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName, null); + Table tblRead1 = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, null); Assert.assertEquals(tblRead.getParameters(), tblRead1.getParameters()); } @@ -706,18 +625,19 @@ private void testTableColStatInternal(String dbName, String tblName, boolean isT String[] colName = new String[]{"income", "name"}; double highValue = 1200000.4525; double avgColLen = 50.30; + long lastEventId = 0; setUpBeforeTest(dbName, tblName, colName, isTxnTable); - updateTableColStats(dbName, tblName, colName, highValue, avgColLen, isTxnTable); + lastEventId = updateTableColStats(dbName, tblName, colName, highValue, avgColLen, isTxnTable, lastEventId); if (!isTxnTable) { deleteColStats(dbName, tblName, colName); } tblName = "tbl_part"; createTableWithPart(dbName, tblName, colName, isTxnTable); - List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1); + List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1, null); String partName = partitions.get(0); - updatePartColStats(dbName, tblName, isTxnTable, colName, partName, highValue, avgColLen); + lastEventId = updatePartColStats(dbName, tblName, isTxnTable, colName, partName, highValue, avgColLen, lastEventId); if (!isTxnTable) { deletePartColStats(dbName, tblName, colName, partName); } @@ -747,11 +667,12 @@ public void testTableColumnStatisticsTxnTableMulti() throws Throwable { setUpBeforeTest(dbName, null, colName, true); createTableWithPart(dbName, tblName, colName, true); - List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1); + List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1, null); String partName = partitions.get(0); - updatePartColStats(dbName, tblName, true, colName, partName, highValue, avgColLen); - updatePartColStats(dbName, tblName, true, colName, partName, 1200000.4521, avgColLen); - updatePartColStats(dbName, tblName, true, colName, partName, highValue, 34.78); + long lastEventId = 0; + lastEventId = updatePartColStats(dbName, tblName, true, colName, partName, highValue, avgColLen, lastEventId); + lastEventId = updatePartColStats(dbName, tblName, true, colName, partName, 1200000.4521, avgColLen, lastEventId); + lastEventId = updatePartColStats(dbName, tblName, true, colName, partName, highValue, 34.78, lastEventId); } @Test @@ -761,10 +682,11 @@ public void testTableColumnStatisticsTxnTableMultiAbort() throws Throwable { String[] colName = new String[]{"income", "name"}; double highValue = 1200000.4525; double avgColLen = 50.30; + long lastEventId = 0; setUpBeforeTest(dbName, null, colName, true); createTableWithPart(dbName, tblName, colName, true); - List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1); + List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1, null); String partName = partitions.get(0); List txnIds = allocateTxns(1); @@ -804,6 +726,7 @@ public void testTableColumnStatisticsTxnTableMultiAbort() throws Throwable { verifyStat(statRawStore.get(0).getStatsObj(), colName, highValue, avgColLen); Assert.assertEquals(statRawStore.get(0).isIsStatsCompliant(), false); + lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf); List statsListFromCache = sharedCache.getPartitionColStatsListFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, Collections.singletonList(partName), Collections.singletonList(colName[1]), validWriteIds, true); @@ -824,14 +747,15 @@ public void testTableColumnStatisticsTxnTableOpenTxn() throws Throwable { String[] colName = new String[]{"income", "name"}; double highValue = 1200000.4121; double avgColLen = 23.30; + long lastEventId = 0; setUpBeforeTest(dbName, null, colName, true); createTableWithPart(dbName, tblName, colName, true); - List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1); + List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1, null); String partName = partitions.get(0); // update part col stats successfully. - updatePartColStats(dbName, tblName, true, colName, partName, 1.2, 12.2); + lastEventId = updatePartColStats(dbName, tblName, true, colName, partName, 1.2, 12.2, lastEventId); List txnIds = allocateTxns(1); long writeId = allocateWriteIds(txnIds, dbName, tblName).get(0).getWriteId(); @@ -854,6 +778,7 @@ public void testTableColumnStatisticsTxnTableOpenTxn() throws Throwable { // write stats objs persistently hmsHandler.update_partition_column_statistics_req(setTblColStat); + lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf); // keep the txn open and verify that the stats got is not compliant. @@ -904,9 +829,9 @@ private void verifyAggrStat(String dbName, String tblName, String[] colName, Lis Assert.assertEquals(aggrStatsCached, aggrStats); //Assert.assertEquals(aggrStatsCached.isIsStatsCompliant(), true); - List stats = sharedCache.getAggrStatsFromCache(DEFAULT_CATALOG_NAME, dbName, tblName, - Collections.singletonList(colName[0]), SharedCache.StatsType.ALL); - Assert.assertEquals(stats.get(0).getStatsData().getDoubleStats().getHighValue(), highValue, 0.01); + MergedColumnStatsForPartitions stats = CachedStore.mergeColStatsForPartitions(DEFAULT_CATALOG_NAME, dbName, tblName, Lists.newArrayList("income=1", "income=2"), + Collections.singletonList(colName[0]), sharedCache, SharedCache.StatsType.ALL, validWriteIds, false, 0.0); + Assert.assertEquals(stats.colStats.get(0).getStatsData().getDoubleStats().getHighValue(), highValue, 0.01); } @Test @@ -917,15 +842,17 @@ public void testAggrStat() throws Throwable { setUpBeforeTest(dbName, null, colName, false); createTableWithPart(dbName, tblName, colName, false); - List partitions = hmsHandler.get_partition_names(dbName, tblName, (short) -1); + List partitions = hmsHandler.get_partition_names(dbName, tblName, (short) -1, null); String partName = partitions.get(0); // update part col stats successfully. - updatePartColStats(dbName, tblName, false, colName, partitions.get(0), 2, 12); - updatePartColStats(dbName, tblName, false, colName, partitions.get(1), 4, 10); + long lastEventId = 0; + lastEventId = updatePartColStats(dbName, tblName, false, colName, partitions.get(0), 2, 12, lastEventId); + lastEventId = updatePartColStats(dbName, tblName, false, colName, partitions.get(1), 4, 10, lastEventId); + lastEventId = CachedStore.updateUsingNotificationEvents(rawStore, lastEventId, conf); verifyAggrStat(dbName, tblName, colName, partitions, false, 4); - updatePartColStats(dbName, tblName, false, colName, partitions.get(1), 3, 10); + lastEventId = updatePartColStats(dbName, tblName, false, colName, partitions.get(1), 3, 10, lastEventId); verifyAggrStat(dbName, tblName, colName, partitions, false, 3); } @@ -934,18 +861,19 @@ public void testAggrStatTxnTable() throws Throwable { String dbName = "aggr_stats_test_db_txn"; String tblName = "tbl_part"; String[] colName = new String[]{"income", "name"}; + long lastEventId = 0; setUpBeforeTest(dbName, null, colName, true); createTableWithPart(dbName, tblName, colName, true); - List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1); + List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1, null); String partName = partitions.get(0); // update part col stats successfully. - updatePartColStats(dbName, tblName, true, colName, partitions.get(0), 2, 12); - updatePartColStats(dbName, tblName, true, colName, partitions.get(1), 4, 10); + lastEventId = updatePartColStats(dbName, tblName, true, colName, partitions.get(0), 2, 12, lastEventId); + lastEventId = updatePartColStats(dbName, tblName, true, colName, partitions.get(1), 4, 10, lastEventId); verifyAggrStat(dbName, tblName, colName, partitions, true, 4); - updatePartColStats(dbName, tblName, true, colName, partitions.get(1), 3, 10); + lastEventId = updatePartColStats(dbName, tblName, true, colName, partitions.get(1), 3, 10, lastEventId); verifyAggrStat(dbName, tblName, colName, partitions, true, 3); List txnIds = allocateTxns(1); @@ -988,15 +916,16 @@ public void testAggrStatAbortTxn() throws Throwable { String dbName = "aggr_stats_test_db_txn_abort"; String tblName = "tbl_part"; String[] colName = new String[]{"income", "name"}; + long lastEventId = 0; setUpBeforeTest(dbName, null, colName, true); createTableWithPart(dbName, tblName, colName, true); - List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1); + List partitions = hmsHandler.get_partition_names(dbName, tblName, (short)-1, null); String partName = partitions.get(0); // update part col stats successfully. - updatePartColStats(dbName, tblName, true, colName, partitions.get(0), 2, 12); - updatePartColStats(dbName, tblName, true, colName, partitions.get(1), 4, 10); + lastEventId = updatePartColStats(dbName, tblName, true, colName, partitions.get(0), 2, 12, lastEventId); + lastEventId = updatePartColStats(dbName, tblName, true, colName, partitions.get(1), 4, 10, lastEventId); verifyAggrStat(dbName, tblName, colName, partitions, true, 4); List txnIds = allocateTxns(4); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java index 61be5a3a5b..6aeb9f3f7c 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java @@ -643,18 +643,18 @@ public void minorCompactWhileStreaming() throws Exception { Path resultFile = null; for (int i = 0; i < names.length; i++) { names[i] = stat[i].getPath().getName(); - if (names[i].equals("delta_0000001_0000004_v0000009")) { + if (names[i].equals("delta_0000002_0000005_v0000009")) { resultFile = stat[i].getPath(); } } Arrays.sort(names); - String[] expected = new String[]{"delta_0000001_0000002", - "delta_0000001_0000004_v0000009", "delta_0000003_0000004", "delta_0000005_0000006"}; + String[] expected = new String[]{"delta_0000002_0000003", + "delta_0000002_0000005_v0000009", "delta_0000004_0000005", "delta_0000006_0000007"}; if (!Arrays.deepEquals(expected, names)) { Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names) + ",stat=" + toString(stat)); } checkExpectedTxnsPresent(null, new Path[]{resultFile}, columnNamesProperty, columnTypesProperty, - 0, 1L, 4L, 1); + 0, 2L, 5L, 1); } finally { if (connection != null) { @@ -697,8 +697,8 @@ public void majorCompactWhileStreaming() throws Exception { Assert.fail("Expecting 1 file \"base_0000004\" and found " + stat.length + " files " + Arrays.toString(stat)); } String name = stat[0].getPath().getName(); - Assert.assertEquals("base_0000004_v0000009", name); - checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 1); + Assert.assertEquals("base_0000005_v0000009", name); + checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 2L, 5L, 1); } finally { if (connection != null) { connection.close(); @@ -740,17 +740,17 @@ private void minorCompactAfterAbort(boolean newStreamingAPI) throws Exception { Path resultDelta = null; for (int i = 0; i < names.length; i++) { names[i] = stat[i].getPath().getName(); - if (names[i].equals("delta_0000001_0000004_v0000009")) { + if (names[i].equals("delta_0000002_0000005_v0000009")) { resultDelta = stat[i].getPath(); } } Arrays.sort(names); - String[] expected = new String[]{"delta_0000001_0000002", - "delta_0000001_0000004_v0000009", "delta_0000003_0000004"}; + String[] expected = new String[]{"delta_0000002_0000003", + "delta_0000002_0000005_v0000009", "delta_0000004_0000005"}; if (!Arrays.deepEquals(expected, names)) { Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names)); } - checkExpectedTxnsPresent(null, new Path[]{resultDelta}, columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 1); + checkExpectedTxnsPresent(null, new Path[]{resultDelta}, columnNamesProperty, columnTypesProperty, 0, 2L, 5L, 1); } @Test @@ -787,10 +787,10 @@ private void majorCompactAfterAbort(boolean newStreamingAPI) throws Exception { Assert.fail("Expecting 1 file \"base_0000004\" and found " + stat.length + " files " + Arrays.toString(stat)); } String name = stat[0].getPath().getName(); - if (!name.equals("base_0000004_v0000009")) { + if (!name.equals("base_0000005_v0000009")) { Assert.fail("majorCompactAfterAbort name " + name + " not equals to base_0000004"); } - checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 1L, 4L, 1); + checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 2L, 5L, 1); } @@ -817,12 +817,12 @@ public void mmTable() throws Exception { runMajorCompaction(dbName, tblName); verifyFooBarResult(tblName, 1); - verifyHasBase(table.getSd(), fs, "base_0000002_v0000006"); + verifyHasBase(table.getSd(), fs, "base_0000003_v0000006"); // Make sure we don't compact if we don't need to compact. runMajorCompaction(dbName, tblName); verifyFooBarResult(tblName, 1); - verifyHasBase(table.getSd(), fs, "base_0000002_v0000006"); + verifyHasBase(table.getSd(), fs, "base_0000003_v0000006"); } @Test @@ -938,7 +938,7 @@ public void mmTableBucketed() throws Exception { runMajorCompaction(dbName, tblName); verifyFooBarResult(tblName, 1); - String baseDir = "base_0000002_v0000006"; + String baseDir = "base_0000003_v0000006"; verifyHasBase(table.getSd(), fs, baseDir); FileStatus[] files = fs.listStatus(new Path(table.getSd().getLocation(), baseDir), @@ -965,7 +965,7 @@ public void mmTableOpenWriteId() throws Exception { long openTxnId = msClient.openTxn("test"); long openWriteId = msClient.allocateTableWriteId(openTxnId, dbName, tblName); - Assert.assertEquals(3, openWriteId); // Just check to make sure base_5 below is not new. + Assert.assertEquals(4, openWriteId); // Just check to make sure base_5 below is not new. executeStatementOnDriver("INSERT INTO " + tblName +"(a,b) VALUES(1, 'foo')", driver); executeStatementOnDriver("INSERT INTO " + tblName +"(a,b) VALUES(2, 'bar')", driver); @@ -974,19 +974,19 @@ public void mmTableOpenWriteId() throws Exception { runMajorCompaction(dbName, tblName); // Don't compact 4 and 5; 3 is opened. FileSystem fs = FileSystem.get(conf); - verifyHasBase(table.getSd(), fs, "base_0000002_v0000010"); + verifyHasBase(table.getSd(), fs, "base_0000003_v0000010"); verifyDirCount(table.getSd(), fs, 1, AcidUtils.baseFileFilter); verifyFooBarResult(tblName, 2); runCleaner(conf); - verifyHasDir(table.getSd(), fs, "delta_0000004_0000004_0000", AcidUtils.deltaFileFilter); verifyHasDir(table.getSd(), fs, "delta_0000005_0000005_0000", AcidUtils.deltaFileFilter); + verifyHasDir(table.getSd(), fs, "delta_0000006_0000006_0000", AcidUtils.deltaFileFilter); verifyFooBarResult(tblName, 2); msClient.abortTxns(Lists.newArrayList(openTxnId)); // Now abort 3. runMajorCompaction(dbName, tblName); // Compact 4 and 5. verifyFooBarResult(tblName, 2); - verifyHasBase(table.getSd(), fs, "base_0000005_v0000016"); + verifyHasBase(table.getSd(), fs, "base_0000006_v0000016"); runCleaner(conf); verifyDeltaCount(table.getSd(), fs, 0); } @@ -1050,8 +1050,8 @@ public void mmTablePartitioned() throws Exception { verifyFooBarResult(tblName, 3); verifyDeltaCount(p3.getSd(), fs, 1); - verifyHasBase(p1.getSd(), fs, "base_0000006_v0000010"); - verifyHasBase(p2.getSd(), fs, "base_0000006_v0000014"); + verifyHasBase(p1.getSd(), fs, "base_0000007_v0000010"); + verifyHasBase(p2.getSd(), fs, "base_0000007_v0000014"); executeStatementOnDriver("INSERT INTO " + tblName + " partition (ds) VALUES(1, 'foo', 2)", driver); executeStatementOnDriver("INSERT INTO " + tblName + " partition (ds) VALUES(2, 'bar', 2)", driver); @@ -1061,8 +1061,8 @@ public void mmTablePartitioned() throws Exception { // Make sure we don't compact if we don't need to compact; but do if we do. verifyFooBarResult(tblName, 4); verifyDeltaCount(p3.getSd(), fs, 1); - verifyHasBase(p1.getSd(), fs, "base_0000006_v0000010"); - verifyHasBase(p2.getSd(), fs, "base_0000008_v0000023"); + verifyHasBase(p1.getSd(), fs, "base_0000007_v0000010"); + verifyHasBase(p2.getSd(), fs, "base_0000009_v0000023"); } @@ -1159,8 +1159,8 @@ private void majorCompactWhileStreamingForSplitUpdate(boolean newStreamingAPI) t Assert.fail("Expecting 1 file \"base_0000004\" and found " + stat.length + " files " + Arrays.toString(stat)); } String name = stat[0].getPath().getName(); - Assert.assertEquals("base_0000004_v0000009", name); - checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 1, 1L, 4L, 2); + Assert.assertEquals("base_0000005_v0000009", name); + checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 1, 2L, 5L, 2); if (connection1 != null) { connection1.close(); } @@ -1209,18 +1209,18 @@ public void testMinorCompactionForSplitUpdateWithInsertsAndDeletes() throws Exce Path minorCompactedDelta = null; for (int i = 0; i < deltas.length; i++) { deltas[i] = stat[i].getPath().getName(); - if (deltas[i].equals("delta_0000001_0000003_v0000006")) { + if (deltas[i].equals("delta_0000002_0000004_v0000006")) { minorCompactedDelta = stat[i].getPath(); } } Arrays.sort(deltas); - String[] expectedDeltas = new String[]{"delta_0000001_0000001_0000", "delta_0000001_0000003_v0000006", - "delta_0000002_0000002_0000"}; + String[] expectedDeltas = new String[]{"delta_0000002_0000002_0000", "delta_0000002_0000004_v0000006", + "delta_0000003_0000003_0000"}; if (!Arrays.deepEquals(expectedDeltas, deltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeltas) + ", found: " + Arrays.toString(deltas)); } checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, - 0, 1L, 2L, 1); + 0, 2L, 3L, 1); // Verify that we have got correct set of delete_deltas. FileStatus[] deleteDeltaStat = @@ -1229,17 +1229,17 @@ public void testMinorCompactionForSplitUpdateWithInsertsAndDeletes() throws Exce Path minorCompactedDeleteDelta = null; for (int i = 0; i < deleteDeltas.length; i++) { deleteDeltas[i] = deleteDeltaStat[i].getPath().getName(); - if (deleteDeltas[i].equals("delete_delta_0000001_0000003_v0000006")) { + if (deleteDeltas[i].equals("delete_delta_0000002_0000004_v0000006")) { minorCompactedDeleteDelta = deleteDeltaStat[i].getPath(); } } Arrays.sort(deleteDeltas); - String[] expectedDeleteDeltas = new String[]{"delete_delta_0000001_0000003_v0000006", "delete_delta_0000003_0000003_0000"}; + String[] expectedDeleteDeltas = new String[]{"delete_delta_0000002_0000004_v0000006", "delete_delta_0000004_0000004_0000"}; if (!Arrays.deepEquals(expectedDeleteDeltas, deleteDeltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeleteDeltas) + ", found: " + Arrays.toString(deleteDeltas)); } checkExpectedTxnsPresent(null, new Path[]{minorCompactedDeleteDelta}, columnNamesProperty, columnTypesProperty, - 0, 2L, 2L, 1); + 0, 3L, 3L, 1); } @Test @@ -1281,18 +1281,18 @@ public void testMinorCompactionForSplitUpdateWithOnlyInserts() throws Exception Path minorCompactedDelta = null; for (int i = 0; i < deltas.length; i++) { deltas[i] = stat[i].getPath().getName(); - if (deltas[i].equals("delta_0000001_0000002_v0000005")) { + if (deltas[i].equals("delta_0000002_0000003_v0000005")) { minorCompactedDelta = stat[i].getPath(); } } Arrays.sort(deltas); - String[] expectedDeltas = new String[]{"delta_0000001_0000001_0000", "delta_0000001_0000002_v0000005", - "delta_0000002_0000002_0000"}; + String[] expectedDeltas = new String[]{"delta_0000002_0000002_0000", "delta_0000002_0000003_v0000005", + "delta_0000003_0000003_0000"}; if (!Arrays.deepEquals(expectedDeltas, deltas)) { Assert.fail("Expected: " + Arrays.toString(expectedDeltas) + ", found: " + Arrays.toString(deltas)); } checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, - 0, 1L, 2L, 1); + 0, 2L, 3L, 1); //Assert that we have no delete deltas if there are no input delete events. FileStatus[] deleteDeltaStat = @@ -1358,18 +1358,18 @@ private void minorCompactWhileStreamingWithSplitUpdate(boolean newStreamingAPI) Path resultFile = null; for (int i = 0; i < names.length; i++) { names[i] = stat[i].getPath().getName(); - if (names[i].equals("delta_0000001_0000004_v0000009")) { + if (names[i].equals("delta_0000002_0000005_v0000009")) { resultFile = stat[i].getPath(); } } Arrays.sort(names); - String[] expected = new String[]{"delta_0000001_0000002", - "delta_0000001_0000004_v0000009", "delta_0000003_0000004", "delta_0000005_0000006"}; + String[] expected = new String[]{"delta_0000002_0000003", + "delta_0000002_0000005_v0000009", "delta_0000004_0000005", "delta_0000006_0000007"}; if (!Arrays.deepEquals(expected, names)) { Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names)); } checkExpectedTxnsPresent(null, new Path[]{resultFile}, columnNamesProperty, columnTypesProperty, - 0, 1L, 4L, 1); + 0, 2L, 5L, 1); //Assert that we have no delete deltas if there are no input delete events. FileStatus[] deleteDeltaStat = @@ -1699,6 +1699,9 @@ public boolean isWriteIdAborted(long writeid) { public RangeResponse isWriteIdRangeAborted(long minWriteId, long maxWriteId) { return RangeResponse.ALL; } + + @Override + public void commitWriteId(long writeId) {}; }; OrcInputFormat aif = new OrcInputFormat(); diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/MetaStoreDumpUtility.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/MetaStoreDumpUtility.java index 2389c3bc68..25c1d5fe66 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/MetaStoreDumpUtility.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/MetaStoreDumpUtility.java @@ -42,6 +42,7 @@ import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.cache.CachedStore; import org.apache.hive.testutils.HiveTestEnvSetup; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -223,6 +224,8 @@ public int compare(String str1, String str2) { conn.close(); + CachedStore.clearSharedCache(); + } catch (Exception e) { throw new RuntimeException("error while loading tpcds metastore dump", e); } diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index d2c2ccd5ea..9ec59a08c6 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -156,10 +156,13 @@ public void initConf() throws Exception { conf.setBoolVar(ConfVars.HIVE_VECTORIZATION_ENABLED, true); } - // Plug verifying metastore in for testing DirectSQL. - conf.setVar(ConfVars.METASTORE_RAW_STORE_IMPL, "org.apache.hadoop.hive.metastore.VerifyingObjectStore"); - miniClusters.initConf(conf); + + // Plug verifying metastore in for testing DirectSQL. + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.RAW_STORE_IMPL, "org.apache.hadoop.hive.metastore.cache.CachedStore"); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.TRANSACTIONAL_EVENT_LISTENERS, "org.apache.hive.hcatalog.listener.DbNotificationListener"); + HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, true); + HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_TXN_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); } public QTestUtil(QTestArguments testArgs) throws Exception { @@ -289,6 +292,7 @@ public void clearTablesCreatedDuringTests() throws Exception { conf.set("hive.metastore.filter.hook", "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl"); db = Hive.get(conf); + SessionState.get().initTxnMgr(conf); // First delete any MVs to avoid race conditions for (String dbName : db.getAllDatabases()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 8c764e2be5..2df146d410 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -1747,35 +1747,40 @@ public void releaseLocksAndCommitOrRollback(boolean commit, HiveTxnManager txnMa } // If we've opened a transaction we need to commit or rollback rather than explicitly // releasing the locks. - conf.unset(ValidTxnList.VALID_TXNS_KEY); - conf.unset(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); if(!checkConcurrency()) { return; } - if (txnMgr.isTxnOpen()) { - if (commit) { - if(conf.getBoolVar(ConfVars.HIVE_IN_TEST) && conf.getBoolVar(ConfVars.HIVETESTMODEROLLBACKTXN)) { + try { + if (txnMgr.isTxnOpen()) { + if (commit) { + if(conf.getBoolVar(ConfVars.HIVE_IN_TEST) && conf.getBoolVar(ConfVars.HIVETESTMODEROLLBACKTXN)) { + txnMgr.rollbackTxn(); + } + else { + txnMgr.commitTxn();//both commit & rollback clear ALL locks for this tx + } + } else { txnMgr.rollbackTxn(); } - else { - txnMgr.commitTxn();//both commit & rollback clear ALL locks for this tx - } } else { - txnMgr.rollbackTxn(); + //since there is no tx, we only have locks for current query (if any) + if (ctx != null && ctx.getHiveLocks() != null) { + hiveLocks.addAll(ctx.getHiveLocks()); + } + txnMgr.releaseLocks(hiveLocks); } - } else { - //since there is no tx, we only have locks for current query (if any) - if (ctx != null && ctx.getHiveLocks() != null) { - hiveLocks.addAll(ctx.getHiveLocks()); + } finally { + hiveLocks.clear(); + if (ctx != null) { + ctx.setHiveLocks(null); } - txnMgr.releaseLocks(hiveLocks); - } - hiveLocks.clear(); - if (ctx != null) { - ctx.setHiveLocks(null); - } - perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.RELEASE_LOCKS); + conf.unset(ValidTxnList.VALID_TXNS_KEY); + conf.unset(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); + SessionState.get().getConf().unset(ValidTxnList.VALID_TXNS_KEY); + SessionState.get().getConf().unset(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.RELEASE_LOCKS); + } } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java index 267f7d041f..1077421ac4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java @@ -21,6 +21,8 @@ import java.util.Map; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.session.LineageState; import org.apache.hadoop.hive.ql.session.SessionState; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 295fe7cbd0..be4d4dc334 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -1969,6 +1969,42 @@ public static TableSnapshot getTableSnapshot(Configuration conf, validWriteIdList != null ? validWriteIdList.toString() : null); } + /** + * This is called by Hive.java for all write operations (DDL). Advance write id + * for the table via transaction manager, and store it in config. The write id + * will be marked as committed instantly in config, as all DDL are auto + * committed, there's no chance to rollback. + */ + public static ValidWriteIdList advanceWriteId(HiveConf conf, Table tbl) throws LockException { + if (!isTransactionalTable(tbl)) { + return null; + } + HiveTxnManager txnMgr = SessionState.get().getTxnMgr(); + long writeId = SessionState.get().getTxnMgr().getTableWriteId(tbl.getDbName(), tbl.getTableName()); + List txnTables = new ArrayList<>(); + String fullTableName = getFullTableName(tbl.getDbName(), tbl.getTableName()); + txnTables.add(fullTableName); + ValidTxnWriteIdList txnWriteIds; + if (conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY) != null) { + txnWriteIds = new ValidTxnWriteIdList(conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY)); + } else { + String txnString; + if (conf.get(ValidTxnList.VALID_TXNS_KEY) != null) { + txnString = conf.get(ValidTxnList.VALID_TXNS_KEY); + } else { + ValidTxnList txnIds = txnMgr.getValidTxns(); + txnString = txnIds.toString(); + } + txnWriteIds = txnMgr.getValidWriteIds(txnTables, txnString); + } + ValidWriteIdList writeIds = txnWriteIds.getTableValidWriteIdList(fullTableName); + if (writeIds != null) { + writeIds.commitWriteId(writeId); + conf.set(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY, txnWriteIds.toString()); + } + return writeIds; + } + /** * Returns ValidWriteIdList for the table with the given "dbName" and "tableName". * This is called when HiveConf has no list for the table. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index d412dd72d1..82fb21f2ce 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -680,7 +680,6 @@ private void stopHeartbeat() throws LockException { @Override public ValidTxnList getValidTxns() throws LockException { - assert isTxnOpen(); init(); try { return getMS().getValidTxns(txnId); @@ -692,7 +691,6 @@ public ValidTxnList getValidTxns() throws LockException { @Override public ValidTxnWriteIdList getValidWriteIds(List tableList, String validTxnList) throws LockException { - assert isTxnOpen(); assert validTxnList != null && !validTxnList.isEmpty(); try { return TxnCommonUtils.createValidTxnWriteIdList( diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java index 17a2d20a00..77efa676e2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java @@ -53,10 +53,12 @@ private HiveLockManagerCtx lockManagerCtx; + private long txnId = 0; + private int numTxn = 0; + @Override public long openTxn(Context ctx, String user) throws LockException { - // No-op - return 0L; + return txnId++; } @Override public List replOpenTxn(String replPolicy, List srcTxnIds, String user) throws LockException { @@ -65,11 +67,11 @@ public long openTxn(Context ctx, String user) throws LockException { @Override public boolean isTxnOpen() { - return false; + return numTxn != 0; } @Override public long getCurrentTxnId() { - return 0L; + return txnId; } @Override public int getStmtIdAndIncrement() { @@ -228,7 +230,7 @@ public void releaseLocks(List hiveLocks) throws LockException { @Override public void commitTxn() throws LockException { - // No-op + numTxn--; } @Override @@ -238,7 +240,7 @@ public void replCommitTxn(CommitTxnRequest rqst) throws LockException { @Override public void rollbackTxn() throws LockException { - // No-op + numTxn--; } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 691f3ee2e9..cd1498df23 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -111,6 +111,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableAddPartitionDesc; import org.apache.hadoop.hive.ql.ddl.table.partition.AlterTableDropPartitionDesc; @@ -326,7 +327,13 @@ private static Hive getInternal(HiveConf c, boolean needsRefresh, boolean isFast } db = create(c, doRegisterAllFns); } - if (c != null) { + if (c != null && db.conf != c) { + if (db.conf.get(ValidTxnList.VALID_TXNS_KEY) != null) { + c.set(ValidTxnList.VALID_TXNS_KEY, db.conf.get(ValidTxnList.VALID_TXNS_KEY)); + } + if (db.conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY) != null) { + c.set(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY, db.conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY)); + } db.conf = c; } return db; @@ -690,6 +697,7 @@ public void alterTable(String catName, String dbName, String tblName, Table newT EnvironmentContext environmentContext, boolean transactional, long replWriteId) throws HiveException { + boolean txnOpened = false; if (catName == null) { catName = getDefaultCatalog(conf); } @@ -720,6 +728,11 @@ public void alterTable(String catName, String dbName, String tblName, Table newT replWriteId); tableSnapshot = new TableSnapshot(replWriteId, writeIds.writeToString()); } else { + if (AcidUtils.isTransactionalTable(newTbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, newTbl); + } // Make sure we pass in the names, so we can get the correct snapshot for rename table. tableSnapshot = AcidUtils.getTableSnapshot(conf, newTbl, dbName, tblName, true); } @@ -738,6 +751,12 @@ public void alterTable(String catName, String dbName, String tblName, Table newT throw new HiveException("Unable to alter table. " + e.getMessage(), e); } catch (TException e) { throw new HiveException("Unable to alter table. " + e.getMessage(), e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } @@ -789,6 +808,7 @@ public void alterPartition(String tblName, Partition newPart, public void alterPartition(String catName, String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext, boolean transactional) throws InvalidOperationException, HiveException { + boolean txnOpened = false; try { if (catName == null) { catName = getDefaultCatalog(conf); @@ -802,6 +822,13 @@ public void alterPartition(String catName, String dbName, String tblName, Partit if (environmentContext == null) { environmentContext = new EnvironmentContext(); } + + if (AcidUtils.isTransactionalTable(newPart.getTable())) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, newPart.getTable()); + } + AcidUtils.TableSnapshot tableSnapshot = null; if (transactional) { tableSnapshot = AcidUtils.getTableSnapshot(conf, newPart.getTable(), true); @@ -819,6 +846,12 @@ public void alterPartition(String catName, String dbName, String tblName, Partit throw new HiveException("Unable to alter partition. " + e.getMessage(), e); } catch (TException e) { throw new HiveException("Unable to alter partition. " + e.getMessage(), e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } @@ -846,10 +879,16 @@ private void validatePartition(Partition newPart) throws HiveException { public void alterPartitions(String tblName, List newParts, EnvironmentContext environmentContext, boolean transactional) throws InvalidOperationException, HiveException { + boolean txnOpened = false; String[] names = Utilities.getDbTableName(tblName); List newTParts = new ArrayList(); try { + if (AcidUtils.isTransactionalTable(newParts.get(0).getTable())) { + // Advance writeId for ddl on transactional table + txnOpened = openTxnIfNeeded(); + AcidUtils.advanceWriteId(conf, newParts.get(0).getTable()); + } AcidUtils.TableSnapshot tableSnapshot = null; if (transactional) { tableSnapshot = AcidUtils.getTableSnapshot(conf, newParts.get(0).getTable(), true); @@ -873,6 +912,12 @@ public void alterPartitions(String tblName, List newParts, throw new HiveException("Unable to alter partition. " + e.getMessage(), e); } catch (TException e) { throw new HiveException("Unable to alter partition. " + e.getMessage(), e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } /** @@ -889,6 +934,7 @@ public void alterPartitions(String tblName, List newParts, public void renamePartition(Table tbl, Map oldPartSpec, Partition newPart, long replWriteId) throws HiveException { + boolean txnOpened = false; try { Map newPartSpec = newPart.getSpec(); if (oldPartSpec.keySet().size() != tbl.getPartCols().size() @@ -922,6 +968,10 @@ public void renamePartition(Table tbl, Map oldPartSpec, Partitio tbl.getTableName()), new long[0], new BitSet(), replWriteId); tableSnapshot = new TableSnapshot(replWriteId, writeIds.writeToString()); } else { + if (AcidUtils.isTransactionalTable(tbl)) { + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } // Set table snapshot to api.Table to make it persistent. tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, true); } @@ -941,6 +991,12 @@ public void renamePartition(Table tbl, Map oldPartSpec, Partitio throw new HiveException("Unable to rename partition. " + e.getMessage(), e); } catch (TException e) { throw new HiveException("Unable to rename partition. " + e.getMessage(), e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } @@ -1000,6 +1056,7 @@ public void createTable(Table tbl, boolean ifNotExists, List defaultConstraints, List checkConstraints) throws HiveException { + boolean txnOpened = false; try { if (tbl.getDbName() == null || "".equals(tbl.getDbName().trim())) { tbl.setDbName(SessionState.get().getCurrentDatabase()); @@ -1024,6 +1081,11 @@ public void createTable(Table tbl, boolean ifNotExists, tTbl.setPrivileges(principalPrivs); } } + if (AcidUtils.isTransactionalTable(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } // Set table snapshot to api.Table to make it persistent. A transactional table being // replicated may have a valid write Id copied from the source. Use that instead of // crafting one on the replica. @@ -1049,6 +1111,12 @@ public void createTable(Table tbl, boolean ifNotExists, } } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } @@ -1143,7 +1211,18 @@ public void dropTable(String dbName, String tableName, boolean deleteData, */ public void dropTable(String dbName, String tableName, boolean deleteData, boolean ignoreUnknownTab, boolean ifPurge) throws HiveException { + boolean txnOpened = false; try { + Table tbl = null; + try { + tbl = getTable(dbName, tableName); + } catch (InvalidTableException e) { + } + if (tbl != null && AcidUtils.isTransactionalTable(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } getMSC().dropTable(dbName, tableName, deleteData, ignoreUnknownTab, ifPurge); } catch (NoSuchObjectException e) { if (!ignoreUnknownTab) { @@ -1158,6 +1237,12 @@ public void dropTable(String dbName, String tableName, boolean deleteData, throw new HiveException(e); } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } @@ -1171,13 +1256,19 @@ public void dropTable(String dbName, String tableName, boolean deleteData, * @throws HiveException */ public void truncateTable(String dbDotTableName, Map partSpec, Long writeId) throws HiveException { + boolean txnOpened = false; try { Table table = getTable(dbDotTableName, true); + AcidUtils.TableSnapshot snapshot = null; if (AcidUtils.isTransactionalTable(table)) { if (writeId <= 0) { snapshot = AcidUtils.getTableSnapshot(conf, table, true); } else { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, table); + String fullTableName = getFullTableName(table.getDbName(), table.getTableName()); ValidWriteIdList writeIdList = getMSC().getValidWriteIds(fullTableName, writeId); snapshot = new TableSnapshot(writeId, writeIdList.writeToString()); @@ -1195,6 +1286,12 @@ public void truncateTable(String dbDotTableName, Map partSpec, L } } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } @@ -1261,7 +1358,7 @@ public Table getTable(final String dbName, final String tableName) throws HiveEx */ public Table getTable(final String dbName, final String tableName, boolean throwException) throws HiveException { - return this.getTable(dbName, tableName, throwException, false); + return this.getTable(dbName, tableName, throwException, true); } /** @@ -1984,48 +2081,61 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par boolean isSrcLocal, boolean isAcidIUDoperation, boolean resetStatistics, Long writeId, int stmtId, boolean isInsertOverwrite) throws HiveException { + boolean txnOpened = false; + try { + PerfLogger perfLogger = SessionState.getPerfLogger(); + perfLogger.PerfLogBegin("MoveTask", PerfLogger.LOAD_PARTITION); - PerfLogger perfLogger = SessionState.getPerfLogger(); - perfLogger.PerfLogBegin("MoveTask", PerfLogger.LOAD_PARTITION); - - // Get the partition object if it already exists - Partition oldPart = getPartition(tbl, partSpec, false); - boolean isTxnTable = AcidUtils.isTransactionalTable(tbl); - - // If config is set, table is not temporary and partition being inserted exists, capture - // the list of files added. For not yet existing partitions (insert overwrite to new partition - // or dynamic partition inserts), the add partition event will capture the list of files added. - List newFiles = Collections.synchronizedList(new ArrayList<>()); + // Get the partition object if it already exists + Partition oldPart = getPartition(tbl, partSpec, false); + boolean isTxnTable = AcidUtils.isTransactionalTable(tbl); - Partition newTPart = loadPartitionInternal(loadPath, tbl, partSpec, oldPart, - loadFileType, inheritTableSpecs, - inheritLocation, isSkewedStoreAsSubdir, isSrcLocal, isAcidIUDoperation, - resetStatistics, writeId, stmtId, isInsertOverwrite, isTxnTable, newFiles); + // If config is set, table is not temporary and partition being inserted exists, capture + // the list of files added. For not yet existing partitions (insert overwrite to new partition + // or dynamic partition inserts), the add partition event will capture the list of files added. + List newFiles = Collections.synchronizedList(new ArrayList<>()); - AcidUtils.TableSnapshot tableSnapshot = isTxnTable ? getTableSnapshot(tbl, writeId) : null; - if (tableSnapshot != null) { - newTPart.getTPartition().setWriteId(tableSnapshot.getWriteId()); - } + Partition newTPart = loadPartitionInternal(loadPath, tbl, partSpec, oldPart, + loadFileType, inheritTableSpecs, + inheritLocation, isSkewedStoreAsSubdir, isSrcLocal, isAcidIUDoperation, + resetStatistics, writeId, stmtId, isInsertOverwrite, isTxnTable, newFiles); - if (oldPart == null) { - addPartitionToMetastore(newTPart, resetStatistics, tbl, tableSnapshot); - // For acid table, add the acid_write event with file list at the time of load itself. But - // it should be done after partition is created. - if (isTxnTable && (null != newFiles)) { - addWriteNotificationLog(tbl, partSpec, newFiles, writeId); + if (AcidUtils.isTransactionalTable(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); } - } else { - try { - setStatsPropAndAlterPartition(resetStatistics, tbl, newTPart, tableSnapshot); - } catch (TException e) { - LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); + AcidUtils.TableSnapshot tableSnapshot = isTxnTable ? getTableSnapshot(tbl, writeId) : null; + if (tableSnapshot != null) { + newTPart.getTPartition().setWriteId(tableSnapshot.getWriteId()); + } + + if (oldPart == null) { + addPartitionToMetastore(newTPart, resetStatistics, tbl, tableSnapshot); + // For acid table, add the acid_write event with file list at the time of load itself. But + // it should be done after partition is created. + if (isTxnTable && (null != newFiles)) { + addWriteNotificationLog(tbl, partSpec, newFiles, writeId); + } + } else { + try { + setStatsPropAndAlterPartition(resetStatistics, tbl, newTPart, tableSnapshot); + } catch (TException e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } } - } - perfLogger.PerfLogEnd("MoveTask", PerfLogger.LOAD_PARTITION); + perfLogger.PerfLogEnd("MoveTask", PerfLogger.LOAD_PARTITION); - return newTPart; + return newTPart; + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } + } } /** @@ -2633,206 +2743,219 @@ private void constructOneLBLocationMap(FileStatus fSta, final int numDP, final int numLB, final boolean isAcid, final long writeId, final int stmtId, final boolean resetStatistics, final AcidUtils.Operation operation, boolean isInsertOverwrite) throws HiveException { + boolean txnOpened = false; + try { + PerfLogger perfLogger = SessionState.getPerfLogger(); + perfLogger.PerfLogBegin("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS); - PerfLogger perfLogger = SessionState.getPerfLogger(); - perfLogger.PerfLogBegin("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS); - - // Get all valid partition paths and existing partitions for them (if any) - final Table tbl = getTable(tableName); - final Set validPartitions = getValidPartitionsInPath(numDP, numLB, loadPath, writeId, stmtId, - AcidUtils.isInsertOnlyTable(tbl.getParameters()), isInsertOverwrite); - - final int partsToLoad = validPartitions.size(); - final AtomicInteger partitionsLoaded = new AtomicInteger(0); - final boolean inPlaceEligible = conf.getLong("fs.trash.interval", 0) <= 0 - && InPlaceUpdate.canRenderInPlace(conf) && !SessionState.getConsole().getIsSilent(); - final PrintStream ps = (inPlaceEligible) ? SessionState.getConsole().getInfoStream() : null; - - final SessionState parentSession = SessionState.get(); - List> tasks = Lists.newLinkedList(); + // Get all valid partition paths and existing partitions for them (if any) + final Table tbl = getTable(tableName); + final Set validPartitions = getValidPartitionsInPath(numDP, numLB, loadPath, writeId, stmtId, + AcidUtils.isInsertOnlyTable(tbl.getParameters()), isInsertOverwrite); - final class PartitionDetails { - Map fullSpec; - Partition partition; - List newFiles; - boolean hasOldPartition = false; - AcidUtils.TableSnapshot tableSnapshot; - } + final int partsToLoad = validPartitions.size(); + final AtomicInteger partitionsLoaded = new AtomicInteger(0); + final boolean inPlaceEligible = conf.getLong("fs.trash.interval", 0) <= 0 + && InPlaceUpdate.canRenderInPlace(conf) && !SessionState.getConsole().getIsSilent(); + final PrintStream ps = (inPlaceEligible) ? SessionState.getConsole().getInfoStream() : null; - Map partitionDetailsMap = - Collections.synchronizedMap(new LinkedHashMap<>()); + final SessionState parentSession = SessionState.get(); + List> tasks = Lists.newLinkedList(); - // calculate full path spec for each valid partition path - validPartitions.forEach(partPath -> { - Map fullPartSpec = Maps.newLinkedHashMap(partSpec); - if (!Warehouse.makeSpecFromName(fullPartSpec, partPath, new HashSet<>(partSpec.keySet()))) { - Utilities.FILE_OP_LOGGER.warn("Ignoring invalid DP directory " + partPath); - } else { - PartitionDetails details = new PartitionDetails(); - details.fullSpec = fullPartSpec; - partitionDetailsMap.put(partPath, details); + final class PartitionDetails { + Map fullSpec; + Partition partition; + List newFiles; + boolean hasOldPartition = false; + AcidUtils.TableSnapshot tableSnapshot; } - }); - // fetch all the partitions matching the part spec using the partition iterable - // this way the maximum batch size configuration parameter is considered - PartitionIterable partitionIterable = new PartitionIterable(Hive.get(), tbl, partSpec, - conf.getInt(MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX.getVarname(), 300)); - Iterator iterator = partitionIterable.iterator(); + Map partitionDetailsMap = + Collections.synchronizedMap(new LinkedHashMap<>()); - // Match valid partition path to partitions - while (iterator.hasNext()) { - Partition partition = iterator.next(); - partitionDetailsMap.entrySet().stream() - .filter(entry -> entry.getValue().fullSpec.equals(partition.getSpec())) - .findAny().ifPresent(entry -> { - entry.getValue().partition = partition; - entry.getValue().hasOldPartition = true; - }); - } + // calculate full path spec for each valid partition path + validPartitions.forEach(partPath -> { + Map fullPartSpec = Maps.newLinkedHashMap(partSpec); + if (!Warehouse.makeSpecFromName(fullPartSpec, partPath, new HashSet<>(partSpec.keySet()))) { + Utilities.FILE_OP_LOGGER.warn("Ignoring invalid DP directory " + partPath); + } else { + PartitionDetails details = new PartitionDetails(); + details.fullSpec = fullPartSpec; + partitionDetailsMap.put(partPath, details); + } + }); - boolean isTxnTable = AcidUtils.isTransactionalTable(tbl); - AcidUtils.TableSnapshot tableSnapshot = isTxnTable ? getTableSnapshot(tbl, writeId) : null; + // fetch all the partitions matching the part spec using the partition iterable + // this way the maximum batch size configuration parameter is considered + PartitionIterable partitionIterable = new PartitionIterable(Hive.get(), tbl, partSpec, + conf.getInt(MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX.getVarname(), 300)); + Iterator iterator = partitionIterable.iterator(); - for (Entry entry : partitionDetailsMap.entrySet()) { - tasks.add(() -> { - PartitionDetails partitionDetails = entry.getValue(); - Map fullPartSpec = partitionDetails.fullSpec; - try { + // Match valid partition path to partitions + while (iterator.hasNext()) { + Partition partition = iterator.next(); + partitionDetailsMap.entrySet().stream() + .filter(entry -> entry.getValue().fullSpec.equals(partition.getSpec())) + .findAny().ifPresent(entry -> { + entry.getValue().partition = partition; + entry.getValue().hasOldPartition = true; + }); + } - SessionState.setCurrentSessionState(parentSession); - LOG.info("New loading path = " + entry.getKey() + " withPartSpec " + fullPartSpec); + boolean isTxnTable = AcidUtils.isTransactionalTable(tbl); + if (isTxnTable) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } + AcidUtils.TableSnapshot tableSnapshot = isTxnTable ? getTableSnapshot(tbl, writeId) : null; - List newFiles = Lists.newArrayList(); - Partition oldPartition = partitionDetails.partition; - // load the partition - Partition partition = loadPartitionInternal(entry.getKey(), tbl, - fullPartSpec, oldPartition, loadFileType, true, false, numLB > 0, false, isAcid, - resetStatistics, writeId, stmtId, isInsertOverwrite, isTxnTable, newFiles); - // if the partition already existed before the loading, no need to add it again to the - // metastore + for (Entry entry : partitionDetailsMap.entrySet()) { + tasks.add(() -> { + PartitionDetails partitionDetails = entry.getValue(); + Map fullPartSpec = partitionDetails.fullSpec; + try { - if (tableSnapshot != null) { - partition.getTPartition().setWriteId(tableSnapshot.getWriteId()); - } - partitionDetails.tableSnapshot = tableSnapshot; - if (oldPartition == null) { - partitionDetails.newFiles = newFiles; - partitionDetails.partition = partition; - } + SessionState.setCurrentSessionState(parentSession); + LOG.info("New loading path = " + entry.getKey() + " withPartSpec " + fullPartSpec); + + List newFiles = Lists.newArrayList(); + Partition oldPartition = partitionDetails.partition; + // load the partition + Partition partition = loadPartitionInternal(entry.getKey(), tbl, + fullPartSpec, oldPartition, loadFileType, true, false, numLB > 0, false, isAcid, + resetStatistics, writeId, stmtId, isInsertOverwrite, isTxnTable, newFiles); + // if the partition already existed before the loading, no need to add it again to the + // metastore + + if (tableSnapshot != null) { + partition.getTPartition().setWriteId(tableSnapshot.getWriteId()); + } + partitionDetails.tableSnapshot = tableSnapshot; + if (oldPartition == null) { + partitionDetails.newFiles = newFiles; + partitionDetails.partition = partition; + } - if (inPlaceEligible) { - synchronized (ps) { - InPlaceUpdate.rePositionCursor(ps); - partitionsLoaded.incrementAndGet(); - InPlaceUpdate.reprintLine(ps, "Loaded : " + partitionsLoaded.get() + "/" - + partsToLoad + " partitions."); + if (inPlaceEligible) { + synchronized (ps) { + InPlaceUpdate.rePositionCursor(ps); + partitionsLoaded.incrementAndGet(); + InPlaceUpdate.reprintLine(ps, "Loaded : " + partitionsLoaded.get() + "/" + + partsToLoad + " partitions."); + } } + + return partition; + } catch (Exception e) { + LOG.error("Exception when loading partition with parameters " + + " partPath=" + entry.getKey() + ", " + + " table=" + tbl.getTableName() + ", " + + " partSpec=" + fullPartSpec + ", " + + " loadFileType=" + loadFileType.toString() + ", " + + " listBucketingLevel=" + numLB + ", " + + " isAcid=" + isAcid + ", " + + " resetStatistics=" + resetStatistics, e); + throw e; } + }); + } - return partition; - } catch (Exception e) { - LOG.error("Exception when loading partition with parameters " - + " partPath=" + entry.getKey() + ", " - + " table=" + tbl.getTableName() + ", " - + " partSpec=" + fullPartSpec + ", " - + " loadFileType=" + loadFileType.toString() + ", " - + " listBucketingLevel=" + numLB + ", " - + " isAcid=" + isAcid + ", " - + " resetStatistics=" + resetStatistics, e); - throw e; + int poolSize = conf.getInt(ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT.varname, 1); + ExecutorService executor = Executors.newFixedThreadPool(poolSize, + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("load-dynamic-partitionsToAdd-%d").build()); + + List> futures = Lists.newLinkedList(); + Map, Partition> result = Maps.newLinkedHashMap(); + try { + futures = executor.invokeAll(tasks); + LOG.debug("Number of partitionsToAdd to be added is " + futures.size()); + for (Future future : futures) { + Partition partition = future.get(); + result.put(partition.getSpec(), partition); + } + // add new partitions in batch + + addPartitionsToMetastore( + partitionDetailsMap.entrySet() + .stream() + .filter(entry -> !entry.getValue().hasOldPartition) + .map(entry -> entry.getValue().partition) + .collect(Collectors.toList()), + resetStatistics, + tbl, + partitionDetailsMap.entrySet() + .stream() + .filter(entry -> !entry.getValue().hasOldPartition) + .map(entry -> entry.getValue().tableSnapshot) + .collect(Collectors.toList())); + // For acid table, add the acid_write event with file list at the time of load itself. But + // it should be done after partition is created. + + for (Entry entry : partitionDetailsMap.entrySet()) { + PartitionDetails partitionDetails = entry.getValue(); + if (isTxnTable && partitionDetails.newFiles != null) { + addWriteNotificationLog(tbl, partitionDetails.fullSpec, partitionDetails.newFiles, writeId); + } } - }); - } - int poolSize = conf.getInt(ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT.varname, 1); - ExecutorService executor = Executors.newFixedThreadPool(poolSize, - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("load-dynamic-partitionsToAdd-%d").build()); - - List> futures = Lists.newLinkedList(); - Map, Partition> result = Maps.newLinkedHashMap(); - try { - futures = executor.invokeAll(tasks); - LOG.debug("Number of partitionsToAdd to be added is " + futures.size()); - for (Future future : futures) { - Partition partition = future.get(); - result.put(partition.getSpec(), partition); - } - // add new partitions in batch - - addPartitionsToMetastore( - partitionDetailsMap.entrySet() - .stream() - .filter(entry -> !entry.getValue().hasOldPartition) - .map(entry -> entry.getValue().partition) - .collect(Collectors.toList()), - resetStatistics, - tbl, - partitionDetailsMap.entrySet() - .stream() - .filter(entry -> !entry.getValue().hasOldPartition) - .map(entry -> entry.getValue().tableSnapshot) - .collect(Collectors.toList())); - // For acid table, add the acid_write event with file list at the time of load itself. But - // it should be done after partition is created. + setStatsPropAndAlterPartitions(resetStatistics, tbl, + partitionDetailsMap.entrySet().stream() + .filter(entry -> entry.getValue().hasOldPartition) + .map(entry -> entry.getValue().partition) + .collect(Collectors.toList()), tableSnapshot); - for (Entry entry : partitionDetailsMap.entrySet()) { - PartitionDetails partitionDetails = entry.getValue(); - if (isTxnTable && partitionDetails.newFiles != null) { - addWriteNotificationLog(tbl, partitionDetails.fullSpec, partitionDetails.newFiles, writeId); - } + } catch (InterruptedException | ExecutionException e) { + throw new HiveException("Exception when loading " + validPartitions.size() + + " in table " + tbl.getTableName() + + " with loadPath=" + loadPath); + } catch (TException e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } catch (Exception e) { + + StringBuffer logMsg = new StringBuffer(); + logMsg.append("Exception when loading partitionsToAdd with parameters "); + logMsg.append("partPaths="); + validPartitions.forEach(path -> logMsg.append(path + ", ")); + logMsg.append("table=" + tbl.getTableName() + ", "). + append("partSpec=" + partSpec + ", "). + append("loadFileType=" + loadFileType.toString() + ", "). + append("listBucketingLevel=" + numLB + ", "). + append("isAcid=" + isAcid + ", "). + append("resetStatistics=" + resetStatistics); + + LOG.error(logMsg.toString(), e); + throw e; + } finally { + LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks"); + executor.shutdownNow(); } - setStatsPropAndAlterPartitions(resetStatistics, tbl, - partitionDetailsMap.entrySet().stream() - .filter(entry -> entry.getValue().hasOldPartition) - .map(entry -> entry.getValue().partition) - .collect(Collectors.toList()), tableSnapshot); + try { + if (isAcid) { + List partNames = + result.values().stream().map(Partition::getName).collect(Collectors.toList()); + getMSC().addDynamicPartitions(parentSession.getTxnMgr().getCurrentTxnId(), writeId, + tbl.getDbName(), tbl.getTableName(), partNames, + AcidUtils.toDataOperationType(operation)); + } + LOG.info("Loaded " + result.size() + "partitionsToAdd"); - } catch (InterruptedException | ExecutionException e) { - throw new HiveException("Exception when loading " + validPartitions.size() - + " in table " + tbl.getTableName() - + " with loadPath=" + loadPath); - } catch (TException e) { - LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); - } catch (Exception e) { + perfLogger.PerfLogEnd("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS); - StringBuffer logMsg = new StringBuffer(); - logMsg.append("Exception when loading partitionsToAdd with parameters "); - logMsg.append("partPaths="); - validPartitions.forEach(path -> logMsg.append(path + ", ")); - logMsg.append("table=" + tbl.getTableName() + ", "). - append("partSpec=" + partSpec + ", "). - append("loadFileType=" + loadFileType.toString() + ", "). - append("listBucketingLevel=" + numLB + ", "). - append("isAcid=" + isAcid + ", "). - append("resetStatistics=" + resetStatistics); - - LOG.error(logMsg.toString(), e); - throw e; + return result; + } catch (TException te) { + LOG.error(StringUtils.stringifyException(te)); + throw new HiveException("Exception updating metastore for acid table " + + tableName + " with partitions " + result.values(), te); + } } finally { - LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks"); - executor.shutdownNow(); - } - - try { - if (isAcid) { - List partNames = - result.values().stream().map(Partition::getName).collect(Collectors.toList()); - getMSC().addDynamicPartitions(parentSession.getTxnMgr().getCurrentTxnId(), writeId, - tbl.getDbName(), tbl.getTableName(), partNames, - AcidUtils.toDataOperationType(operation)); + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } } - LOG.info("Loaded " + result.size() + "partitionsToAdd"); - - perfLogger.PerfLogEnd("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS); - - return result; - } catch (TException te) { - LOG.error(StringUtils.stringifyException(te)); - throw new HiveException("Exception updating metastore for acid table " - + tableName + " with partitions " + result.values(), te); } } @@ -2987,109 +3110,135 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType */ @VisibleForTesting public Partition createPartition(Table tbl, Map partSpec) throws HiveException { + boolean txnOpened = false; try { org.apache.hadoop.hive.metastore.api.Partition part = Partition.createMetaPartitionObject(tbl, partSpec, null); + if (AcidUtils.isTransactionalTable(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); part.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : 0); return new Partition(tbl, getMSC().add_partition(part)); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } public List createPartitions(AlterTableAddPartitionDesc addPartitionDesc) throws HiveException { - // TODO: catalog name everywhere in this method - Table tbl = getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName()); - int size = addPartitionDesc.getPartitionCount(); - List in = - new ArrayList(size); - long writeId; - String validWriteIdList; - - // In case of replication, get the writeId from the source and use valid write Id list - // for replication. - if (addPartitionDesc.getReplicationSpec().isInReplicationScope() && - addPartitionDesc.getPartition(0).getWriteId() > 0) { - writeId = addPartitionDesc.getPartition(0).getWriteId(); - // We need a valid writeId list for a transactional change. During replication we do not - // have a valid writeId list which was used for this on the source. But we know for sure - // that the writeId associated with it was valid then (otherwise the change would have - // failed on the source). So use a valid transaction list with only that writeId. - validWriteIdList = new ValidReaderWriteIdList(TableName.getDbTable(tbl.getDbName(), - tbl.getTableName()), - new long[0], new BitSet(), writeId).writeToString(); - } else { - AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, true); - if (tableSnapshot != null && tableSnapshot.getWriteId() > 0) { - writeId = tableSnapshot.getWriteId(); - validWriteIdList = tableSnapshot.getValidWriteIdList(); + boolean txnOpened = false; + try { + // TODO: catalog name everywhere in this method + Table tbl = getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName()); + int size = addPartitionDesc.getPartitionCount(); + List in = + new ArrayList(size); + long writeId; + String validWriteIdList; + + // In case of replication, get the writeId from the source and use valid write Id list + // for replication. + if (addPartitionDesc.getReplicationSpec().isInReplicationScope() && + addPartitionDesc.getPartition(0).getWriteId() > 0) { + writeId = addPartitionDesc.getPartition(0).getWriteId(); + // We need a valid writeId list for a transactional change. During replication we do not + // have a valid writeId list which was used for this on the source. But we know for sure + // that the writeId associated with it was valid then (otherwise the change would have + // failed on the source). So use a valid transaction list with only that writeId. + validWriteIdList = new ValidReaderWriteIdList(TableName.getDbTable(tbl.getDbName(), + tbl.getTableName()), + new long[0], new BitSet(), writeId).writeToString(); } else { - writeId = -1; - validWriteIdList = null; - } - } - for (int i = 0; i < size; ++i) { - org.apache.hadoop.hive.metastore.api.Partition tmpPart = - convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i), conf); - if (tmpPart != null && writeId > 0) { - tmpPart.setWriteId(writeId); + if (AcidUtils.isTransactionalTable(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, true); + if (tableSnapshot != null && tableSnapshot.getWriteId() > 0) { + writeId = tableSnapshot.getWriteId(); + validWriteIdList = tableSnapshot.getValidWriteIdList(); + } else { + writeId = -1; + validWriteIdList = null; + } } - in.add(tmpPart); - } - List out = new ArrayList(); - try { - if (!addPartitionDesc.getReplicationSpec().isInReplicationScope()){ - // TODO: normally, the result is not necessary; might make sense to pass false - for (org.apache.hadoop.hive.metastore.api.Partition outPart - : getMSC().add_partitions(in, addPartitionDesc.isIfNotExists(), true)) { - out.add(new Partition(tbl, outPart)); + for (int i = 0; i < size; ++i) { + org.apache.hadoop.hive.metastore.api.Partition tmpPart = + convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i), conf); + if (tmpPart != null && writeId > 0) { + tmpPart.setWriteId(writeId); } - } else { - - // For replication add-ptns, we need to follow a insert-if-not-exist, alter-if-exists scenario. - // TODO : ideally, we should push this mechanism to the metastore, because, otherwise, we have - // no choice but to iterate over the partitions here. + in.add(tmpPart); + } + List out = new ArrayList(); + try { + if (!addPartitionDesc.getReplicationSpec().isInReplicationScope()){ + // TODO: normally, the result is not necessary; might make sense to pass false + for (org.apache.hadoop.hive.metastore.api.Partition outPart + : getMSC().add_partitions(in, addPartitionDesc.isIfNotExists(), true)) { + out.add(new Partition(tbl, outPart)); + } + } else { - List partsToAdd = new ArrayList<>(); - List partsToAlter = new ArrayList<>(); - List part_names = new ArrayList<>(); - for (org.apache.hadoop.hive.metastore.api.Partition p: in){ - part_names.add(Warehouse.makePartName(tbl.getPartitionKeys(), p.getValues())); - try { - org.apache.hadoop.hive.metastore.api.Partition ptn = - getMSC().getPartition(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), p.getValues()); - if (addPartitionDesc.getReplicationSpec().allowReplacementInto(ptn.getParameters())){ - ReplicationSpec.copyLastReplId(ptn.getParameters(), p.getParameters()); - partsToAlter.add(p); - } // else ptn already exists, but we do nothing with it. - } catch (NoSuchObjectException nsoe){ - // if the object does not exist, we want to add it. - partsToAdd.add(p); + // For replication add-ptns, we need to follow a insert-if-not-exist, alter-if-exists scenario. + // TODO : ideally, we should push this mechanism to the metastore, because, otherwise, we have + // no choice but to iterate over the partitions here. + + List partsToAdd = new ArrayList<>(); + List partsToAlter = new ArrayList<>(); + List part_names = new ArrayList<>(); + for (org.apache.hadoop.hive.metastore.api.Partition p: in){ + part_names.add(Warehouse.makePartName(tbl.getPartitionKeys(), p.getValues())); + try { + org.apache.hadoop.hive.metastore.api.Partition ptn = + getMSC().getPartition(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), p.getValues()); + if (addPartitionDesc.getReplicationSpec().allowReplacementInto(ptn.getParameters())){ + ReplicationSpec.copyLastReplId(ptn.getParameters(), p.getParameters()); + partsToAlter.add(p); + } // else ptn already exists, but we do nothing with it. + } catch (NoSuchObjectException nsoe){ + // if the object does not exist, we want to add it. + partsToAdd.add(p); + } + } + for (org.apache.hadoop.hive.metastore.api.Partition outPart + : getMSC().add_partitions(partsToAdd, addPartitionDesc.isIfNotExists(), true)) { + out.add(new Partition(tbl, outPart)); + } + EnvironmentContext ec = new EnvironmentContext(); + // In case of replication, statistics is obtained from the source, so do not update those + // on replica. + ec.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); + getMSC().alter_partitions(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), + partsToAlter, ec, validWriteIdList, writeId); + + for ( org.apache.hadoop.hive.metastore.api.Partition outPart : + getMSC().getPartitionsByNames(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(),part_names)){ + out.add(new Partition(tbl,outPart)); } } - for (org.apache.hadoop.hive.metastore.api.Partition outPart - : getMSC().add_partitions(partsToAdd, addPartitionDesc.isIfNotExists(), true)) { - out.add(new Partition(tbl, outPart)); - } - EnvironmentContext ec = new EnvironmentContext(); - // In case of replication, statistics is obtained from the source, so do not update those - // on replica. - ec.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); - getMSC().alter_partitions(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), - partsToAlter, ec, validWriteIdList, writeId); - - for ( org.apache.hadoop.hive.metastore.api.Partition outPart : - getMSC().getPartitionsByNames(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(),part_names)){ - out.add(new Partition(tbl,outPart)); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } + return out; + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); } } - } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); - throw new HiveException(e); } - return out; } public static org.apache.hadoop.hive.metastore.api.Partition convertAddSpecToMetaPartition( @@ -3428,12 +3577,25 @@ public boolean dropPartition(String db_name, String tbl_name, public boolean dropPartition(String dbName, String tableName, List partVals, PartitionDropOptions options) throws HiveException { + boolean txnOpened = false; try { + Table tbl = getTable(dbName, tableName); + if (AcidUtils.isTransactionalTable(tbl)) { + // Advance writeId for ddl on transactional table + txnOpened = openTxnIfNeeded(); + AcidUtils.advanceWriteId(conf, tbl); + } return getMSC().dropPartition(dbName, tableName, partVals, options); } catch (NoSuchObjectException e) { throw new HiveException("Partition or table doesn't exist.", e); } catch (Exception e) { throw new HiveException(e.getMessage(), e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } @@ -3536,8 +3698,14 @@ public boolean dropPartition(String dbName, String tableName, List partV public List dropPartitions(String dbName, String tblName, List partSpecs, PartitionDropOptions dropOptions) throws HiveException { + boolean txnOpened = false; try { Table tbl = getTable(dbName, tblName); + if (AcidUtils.isTransactionalTable(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } List> partExprs = new ArrayList<>(partSpecs.size()); for (AlterTableDropPartitionDesc.PartitionDesc partSpec : partSpecs) { @@ -3551,6 +3719,12 @@ public boolean dropPartition(String dbName, String tableName, List partV throw new HiveException("Partition or table doesn't exist.", e); } catch (Exception e) { throw new HiveException(e.getMessage(), e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().commitTxn(); + } + } } } @@ -4981,7 +5155,19 @@ public static boolean isHadoop1() { public List exchangeTablePartitions(Map partitionSpecs, String sourceDb, String sourceTable, String destDb, String destinationTableName) throws HiveException { + boolean txnOpened = false; try { + Table srcTbl = getTable(sourceDb, sourceTable); + if (AcidUtils.isTransactionalTable(srcTbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, srcTbl); + } + Table descTbl = getTable(destDb, destinationTableName); + if (AcidUtils.isTransactionalTable(descTbl)) { + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, descTbl); + } List partitions = getMSC().exchange_partitions(partitionSpecs, sourceDb, sourceTable, destDb, destinationTableName); @@ -4990,6 +5176,12 @@ public static boolean isHadoop1() { } catch (Exception ex) { LOG.error(StringUtils.stringifyException(ex)); throw new HiveException(ex); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } @@ -5216,21 +5408,47 @@ public AggrStats getAggrColStatsFor(String dbName, String tblName, public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws HiveException { + boolean txnOpened = false; try { + Table tbl = getTable(dbName, tableName); + if (AcidUtils.isTransactionalTable(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } return getMSC().deleteTableColumnStatistics(dbName, tableName, colName); } catch(Exception e) { LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, String colName) throws HiveException { + boolean txnOpened = false; try { + Table tbl = getTable(dbName, tableName); + if (AcidUtils.isTransactionalTable(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } return getMSC().deletePartitionColumnStatistics(dbName, tableName, partName, colName); } catch(Exception e) { LOG.debug(StringUtils.stringifyException(e)); throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } @@ -5475,12 +5693,25 @@ public void cacheFileMetadata( public void dropConstraint(String dbName, String tableName, String constraintName) throws HiveException, NoSuchObjectException { + boolean txnOpened = false; try { + Table tbl = getTable(dbName, tableName); + if (AcidUtils.isTransactionalTable(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } getMSC().dropConstraint(dbName, tableName, constraintName); } catch (NoSuchObjectException e) { throw e; } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } @@ -5806,55 +6037,133 @@ public CheckConstraint getCheckConstraints(String dbName, String tblName) public void addPrimaryKey(List primaryKeyCols) throws HiveException, NoSuchObjectException { + boolean txnOpened = false; try { + Table tbl = getTable(primaryKeyCols.get(0).getTable_db(), primaryKeyCols.get(0).getTable_name()); + if (AcidUtils.isTransactionalTable(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } getMSC().addPrimaryKey(primaryKeyCols); } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } public void addForeignKey(List foreignKeyCols) throws HiveException, NoSuchObjectException { + boolean txnOpened = false; try { + Table tbl = getTable(foreignKeyCols.get(0).getFktable_db(), foreignKeyCols.get(0).getFktable_name()); + if (AcidUtils.isTransactionalTable(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } getMSC().addForeignKey(foreignKeyCols); } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } public void addUniqueConstraint(List uniqueConstraintCols) throws HiveException, NoSuchObjectException { + boolean txnOpened = false; try { + Table tbl = getTable(uniqueConstraintCols.get(0).getTable_db(), uniqueConstraintCols.get(0).getTable_name()); + if (AcidUtils.isTransactionalTable(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } getMSC().addUniqueConstraint(uniqueConstraintCols); } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } public void addNotNullConstraint(List notNullConstraintCols) throws HiveException, NoSuchObjectException { + boolean txnOpened = false; try { + Table tbl = getTable(notNullConstraintCols.get(0).getTable_db(), notNullConstraintCols.get(0).getTable_name()); + if (AcidUtils.isTransactionalTable(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } getMSC().addNotNullConstraint(notNullConstraintCols); } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } public void addDefaultConstraint(List defaultConstraints) throws HiveException, NoSuchObjectException { + boolean txnOpened = false; try { + Table tbl = getTable(defaultConstraints.get(0).getTable_db(), defaultConstraints.get(0).getTable_name()); + if (AcidUtils.isTransactionalTable(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } getMSC().addDefaultConstraint(defaultConstraints); } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } public void addCheckConstraint(List checkConstraints) throws HiveException, NoSuchObjectException { + boolean txnOpened = false; try { + Table tbl = getTable(checkConstraints.get(0).getTable_db(), checkConstraints.get(0).getTable_name()); + if (AcidUtils.isTransactionalTable(tbl)) { + txnOpened = openTxnIfNeeded(); + // Advance writeId for ddl on transactional table + AcidUtils.advanceWriteId(conf, tbl); + } getMSC().addCheckConstraint(checkConstraints); } catch (Exception e) { throw new HiveException(e); + } finally { + if (txnOpened) { + if (SessionState.get().getTxnMgr().isTxnOpen()) { + SessionState.get().getTxnMgr().rollbackTxn(); + } + } } } @@ -6066,5 +6375,22 @@ public StorageHandlerInfo getStorageHandlerInfo(Table table) throw new HiveException(e); } } + + private boolean openTxnIfNeeded() throws HiveException { + try { + if (SessionState.get().getTxnMgr() == null) { + SessionState.get().initTxnMgr(conf); + } + HiveTxnManager txnMgr = SessionState.get().getTxnMgr(); + if (!txnMgr.isTxnOpen()) { + Context ctx = new Context(conf); + txnMgr.openTxn(ctx, SessionState.getUserFromAuthenticator()); + return true; + } + return false; + } catch (Exception e) { + throw new HiveException(e); + } + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index d39a0b487f..0d944993b4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -178,43 +178,20 @@ public void truncateTable(String dbName, String tableName, super.truncateTable(dbName, tableName, partNames, validWriteIds, writeId); } - @Override - public org.apache.hadoop.hive.metastore.api.Table getTable(String dbname, String name) throws MetaException, - TException, NoSuchObjectException { - return getTable(dbname, name, false); - } - - @Override - public org.apache.hadoop.hive.metastore.api.Table getTable(String dbname, String name, - boolean getColStats) throws MetaException, - TException, NoSuchObjectException { - // First check temp tables - org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbname, name); - if (table != null) { - return deepCopy(table); // Original method used deepCopy(), do the same here. - } - // Try underlying client - return super.getTable(MetaStoreUtils.getDefaultCatalog(conf), dbname, name, getColStats); - } - // Need to override this one too or dropTable breaks because it doesn't find the table when checks // before the drop. @Override public org.apache.hadoop.hive.metastore.api.Table getTable(String catName, String dbName, - String tableName) throws TException { - return getTable(catName, dbName, tableName, false); - } - - // Need to override this one too or dropTable breaks because it doesn't find the table when checks - // before the drop. - @Override - public org.apache.hadoop.hive.metastore.api.Table getTable(String catName, String dbName, - String tableName, boolean getColStats) + String tableName, String validWriteIdList, boolean getColStats) throws TException { if (!DEFAULT_CATALOG_NAME.equals(catName)) { - return super.getTable(catName, dbName, tableName, getColStats); + return super.getTable(catName, dbName, tableName, validWriteIdList, getColStats); } else { - return getTable(dbName, tableName, getColStats); + org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbName, tableName); + if (table != null) { + return deepCopy(table); // Original method used deepCopy(), do the same here. + } + return super.getTable(catName, dbName, tableName, validWriteIdList, getColStats); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java index 6d7af382bf..3a3af367e8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java @@ -141,8 +141,8 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { String query = ctx.getTokenRewriteStream().toString(input.getTokenStartIndex(), input.getTokenStopIndex()); LOG.info("Explain analyze (running phase) for query " + query); - conf.unset(ValidTxnList.VALID_TXNS_KEY); - conf.unset(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); +// conf.unset(ValidTxnList.VALID_TXNS_KEY); +// conf.unset(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); Context runCtx = null; try { runCtx = new Context(conf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 7c58072413..3c1b21a944 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -67,10 +67,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.StatsSetupConst.StatDB; import org.apache.hadoop.hive.common.StringInternUtils; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; @@ -12267,7 +12269,30 @@ else if(ast.getChild(0).getType() == HiveParser.TOK_FALSE) { } LOG.info("Completed phase 1 of Semantic Analysis"); - // 5. Resolve Parse Tree + // 5. Get write id for tables + if (getTxnMgr().supportsAcid()) { + List tabNames = new ArrayList<>(); + for (String alias : qb.getTabAliases()) { + String tabName = qb.getTabNameForAlias(alias); + tabName = TableName.fromString(tabName, SessionState.get().getCurrentCatalog(), SessionState.get().getCurrentDatabase()).getDbTable(); + tabNames.add(tabName); + } + + String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY); + if ((txnString == null) || (txnString.isEmpty())) { + throw new IllegalStateException("calling recordValidWritsIdss() without initializing ValidTxnList " + + JavaUtils.txnIdToString(getTxnMgr().getCurrentTxnId())); + } + try { + ValidTxnWriteIdList txnWriteIds = getTxnMgr().getValidWriteIds(tabNames, txnString); + db.getMSC().setValidWriteIdList(txnWriteIds.toString()); + Hive.get().getMSC().setValidWriteIdList(txnWriteIds.toString()); + } catch (HiveException|MetaException e) { + throw new SemanticException("Failed to fetch write Id", e); + } + } + + // 6. Resolve Parse Tree // Materialization is allowed if it is not a view definition getMetaData(qb, createVwDesc == null); LOG.info("Completed getting MetaData in Semantic Analysis"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java index 2cc057ee6e..b4da7d4354 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java @@ -467,7 +467,7 @@ public PartitionWrapper(org.apache.hadoop.hive.metastore.api.Partition mapiPart, String catName = mapiPart.isSetCatName() ? mapiPart.getCatName() : MetaStoreUtils.getDefaultCatalog(context.getHandler().getConf()); org.apache.hadoop.hive.metastore.api.Table t = context.getHandler().get_table_core( - catName, mapiPart.getDbName(), mapiPart.getTableName()); + catName, mapiPart.getDbName(), mapiPart.getTableName(), null); if (wrapperApiPart.getSd() == null){ // In the cases of create partition, by the time this event fires, the partition // object has not yet come into existence, and thus will not yet have a diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java index 8acb1c54db..fbabf15fd6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java @@ -213,7 +213,7 @@ private void stopWorkers() { throws MetaException, NoSuchTxnException, NoSuchObjectException { if (isAnalyzeTableInProgress(fullTableName)) return null; String cat = fullTableName.getCat(), db = fullTableName.getDb(), tbl = fullTableName.getTable(); - Table table = rs.getTable(cat, db, tbl); + Table table = rs.getTable(cat, db, tbl, null); LOG.debug("Processing table {}", table); // Check if the table should be skipped. @@ -297,7 +297,7 @@ private void stopWorkers() { try { colsPerPartition = rs.getPartitionColsWithStats(cat, db, tbl); partNames = Lists.newArrayList(colsPerPartition.keySet()); - int partitionCount = rs.getNumPartitionsByFilter(cat, db, tbl, ""); + int partitionCount = rs.getNumPartitionsByFilter(cat, db, tbl, "", null); isAllParts = partitionCount == partNames.size(); isOk = true; } finally { @@ -308,10 +308,10 @@ private void stopWorkers() { } } } else { - partNames = rs.listPartitionNames(cat, db, tbl, (short) -1); + partNames = rs.listPartitionNames(cat, db, tbl, (short) -1, null); isAllParts = true; } - Table t = rs.getTable(cat, db, tbl); + Table t = rs.getTable(cat, db, tbl, null); List currentBatch = null; int nextBatchStart = 0, nextIxInBatch = -1, currentBatchStart = 0; List colsToUpdateForAll = null; @@ -325,7 +325,7 @@ private void stopWorkers() { currentBatchStart = nextBatchStart; nextBatchStart = nextBatchEnd; try { - currentBatch = rs.getPartitionsByNames(cat, db, tbl, currentNames); + currentBatch = rs.getPartitionsByNames(cat, db, tbl, currentNames, null); } catch (NoSuchObjectException e) { LOG.error("Failed to get partitions for " + fullTableName + ", skipping some partitions", e); currentBatch = null; @@ -444,7 +444,7 @@ private String buildPartColStr(Table table) { try { // Note: this should NOT do txn verification - we want to get outdated stats, to // see if we need to update anything. - existingStats = rs.getTableColumnStatistics(cat, db, tbl, allCols); + existingStats = rs.getTableColumnStatistics(cat, db, tbl, allCols, null); } catch (NoSuchObjectException e) { LOG.error("Cannot retrieve existing stats, skipping " + fullTableName, e); return null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MetaStoreCompactorThread.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MetaStoreCompactorThread.java index a6dd4fa003..582c4bfe48 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MetaStoreCompactorThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MetaStoreCompactorThread.java @@ -67,7 +67,7 @@ public void init(AtomicBoolean stop, AtomicBoolean looped) throws Exception { @Override Table resolveTable(CompactionInfo ci) throws MetaException { try { - return rs.getTable(getDefaultCatalog(conf), ci.dbname, ci.tableName); + return rs.getTable(getDefaultCatalog(conf), ci.dbname, ci.tableName, null); } catch (MetaException e) { LOG.error("Unable to find table " + ci.getFullTableName() + ", " + e.getMessage()); throw e; @@ -88,7 +88,7 @@ public void init(AtomicBoolean stop, AtomicBoolean looped) throws Exception { @Override List getPartitionsByNames(CompactionInfo ci) throws MetaException { try { return rs.getPartitionsByNames(getDefaultCatalog(conf), ci.dbname, ci.tableName, - Collections.singletonList(ci.partName)); + Collections.singletonList(ci.partName), null); } catch (MetaException e) { LOG.error("Unable to get partitions by name for CompactionInfo=" + ci); throw e; diff --git a/ql/src/test/queries/clientpositive/compute_stats_date.q b/ql/src/test/queries/clientpositive/compute_stats_date.q index bf478526ba..2faabdc33b 100644 --- a/ql/src/test/queries/clientpositive/compute_stats_date.q +++ b/ql/src/test/queries/clientpositive/compute_stats_date.q @@ -23,6 +23,6 @@ analyze table tab_date compute statistics for columns fl_date; describe formatted tab_date fl_date; -- Update stats manually. Try both yyyy-mm-dd and integer value for high/low value -alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0'); +alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0', 'numNulls'='0'); describe formatted tab_date fl_date; diff --git a/ql/src/test/queries/clientpositive/lock4.q b/ql/src/test/queries/clientpositive/lock4.q index 256ca9deb4..1c40c2a798 100644 --- a/ql/src/test/queries/clientpositive/lock4.q +++ b/ql/src/test/queries/clientpositive/lock4.q @@ -1,5 +1,4 @@ --! qt:dataset:srcpart -set hive.lock.mapred.only.operation=true; drop table tstsrcpart_n3; create table tstsrcpart_n3 like srcpart; diff --git a/ql/src/test/queries/clientpositive/perf/query1.q b/ql/src/test/queries/clientpositive/perf/query1.q index a8d70727f1..5f69772b7a 100644 --- a/ql/src/test/queries/clientpositive/perf/query1.q +++ b/ql/src/test/queries/clientpositive/perf/query1.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query1.tpl and seed 2031708268 explain with customer_total_return as diff --git a/ql/src/test/queries/clientpositive/perf/query10.q b/ql/src/test/queries/clientpositive/perf/query10.q index d3b1be7a75..72f7cc35ac 100644 --- a/ql/src/test/queries/clientpositive/perf/query10.q +++ b/ql/src/test/queries/clientpositive/perf/query10.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query10.tpl and seed 797269820 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query11.q b/ql/src/test/queries/clientpositive/perf/query11.q index 6017c89790..0062dcf873 100644 --- a/ql/src/test/queries/clientpositive/perf/query11.q +++ b/ql/src/test/queries/clientpositive/perf/query11.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query11.tpl and seed 1819994127 explain with year_total as ( diff --git a/ql/src/test/queries/clientpositive/perf/query12.q b/ql/src/test/queries/clientpositive/perf/query12.q index 59b50acb46..14225be2fa 100644 --- a/ql/src/test/queries/clientpositive/perf/query12.q +++ b/ql/src/test/queries/clientpositive/perf/query12.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query12.tpl and seed 345591136 explain select i_item_desc diff --git a/ql/src/test/queries/clientpositive/perf/query13.q b/ql/src/test/queries/clientpositive/perf/query13.q index dca19b0161..0b49fb4673 100644 --- a/ql/src/test/queries/clientpositive/perf/query13.q +++ b/ql/src/test/queries/clientpositive/perf/query13.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query13.tpl and seed 622697896 explain select avg(ss_quantity) diff --git a/ql/src/test/queries/clientpositive/perf/query14.q b/ql/src/test/queries/clientpositive/perf/query14.q index c12ecb56c4..9ba2a77d40 100644 --- a/ql/src/test/queries/clientpositive/perf/query14.q +++ b/ql/src/test/queries/clientpositive/perf/query14.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query14.tpl and seed 1819994127 explain with cross_items as diff --git a/ql/src/test/queries/clientpositive/perf/query15.q b/ql/src/test/queries/clientpositive/perf/query15.q index 9e1711a1d2..385c91c8fc 100644 --- a/ql/src/test/queries/clientpositive/perf/query15.q +++ b/ql/src/test/queries/clientpositive/perf/query15.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query15.tpl and seed 1819994127 explain select ca_zip diff --git a/ql/src/test/queries/clientpositive/perf/query16.q b/ql/src/test/queries/clientpositive/perf/query16.q index 05625f71aa..cae35456bb 100644 --- a/ql/src/test/queries/clientpositive/perf/query16.q +++ b/ql/src/test/queries/clientpositive/perf/query16.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query16.tpl and seed 171719422 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query17.q b/ql/src/test/queries/clientpositive/perf/query17.q index 0cd4201f51..ed1c37c331 100644 --- a/ql/src/test/queries/clientpositive/perf/query17.q +++ b/ql/src/test/queries/clientpositive/perf/query17.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query17.tpl and seed 1819994127 explain select i_item_id diff --git a/ql/src/test/queries/clientpositive/perf/query18.q b/ql/src/test/queries/clientpositive/perf/query18.q index bf1ff5983b..c8960dc7dc 100644 --- a/ql/src/test/queries/clientpositive/perf/query18.q +++ b/ql/src/test/queries/clientpositive/perf/query18.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query18.tpl and seed 1978355063 explain select i_item_id, diff --git a/ql/src/test/queries/clientpositive/perf/query19.q b/ql/src/test/queries/clientpositive/perf/query19.q index 5768e4b04e..351e60a18b 100644 --- a/ql/src/test/queries/clientpositive/perf/query19.q +++ b/ql/src/test/queries/clientpositive/perf/query19.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query19.tpl and seed 1930872976 explain select i_brand_id brand_id, i_brand brand, i_manufact_id, i_manufact, diff --git a/ql/src/test/queries/clientpositive/perf/query2.q b/ql/src/test/queries/clientpositive/perf/query2.q index 26a52ef264..c64ce1e1af 100644 --- a/ql/src/test/queries/clientpositive/perf/query2.q +++ b/ql/src/test/queries/clientpositive/perf/query2.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query2.tpl and seed 1819994127 explain with wscs as diff --git a/ql/src/test/queries/clientpositive/perf/query20.q b/ql/src/test/queries/clientpositive/perf/query20.q index c5f8848fb3..6d116a6859 100644 --- a/ql/src/test/queries/clientpositive/perf/query20.q +++ b/ql/src/test/queries/clientpositive/perf/query20.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query20.tpl and seed 345591136 explain select i_item_desc diff --git a/ql/src/test/queries/clientpositive/perf/query21.q b/ql/src/test/queries/clientpositive/perf/query21.q index 34b458b709..e328ee90e9 100644 --- a/ql/src/test/queries/clientpositive/perf/query21.q +++ b/ql/src/test/queries/clientpositive/perf/query21.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query21.tpl and seed 1819994127 explain select * diff --git a/ql/src/test/queries/clientpositive/perf/query22.q b/ql/src/test/queries/clientpositive/perf/query22.q index 70491731f8..29faf7df77 100644 --- a/ql/src/test/queries/clientpositive/perf/query22.q +++ b/ql/src/test/queries/clientpositive/perf/query22.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query22.tpl and seed 1819994127 explain select i_product_name diff --git a/ql/src/test/queries/clientpositive/perf/query23.q b/ql/src/test/queries/clientpositive/perf/query23.q index 1e02655927..ae3e5dfbc3 100644 --- a/ql/src/test/queries/clientpositive/perf/query23.q +++ b/ql/src/test/queries/clientpositive/perf/query23.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query23.tpl and seed 2031708268 explain with frequent_ss_items as diff --git a/ql/src/test/queries/clientpositive/perf/query24.q b/ql/src/test/queries/clientpositive/perf/query24.q index b3cdaef4a5..e9d00828cd 100644 --- a/ql/src/test/queries/clientpositive/perf/query24.q +++ b/ql/src/test/queries/clientpositive/perf/query24.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query24.tpl and seed 1220860970 explain with ssales as diff --git a/ql/src/test/queries/clientpositive/perf/query25.q b/ql/src/test/queries/clientpositive/perf/query25.q index 358cdc58b4..9d1369f617 100644 --- a/ql/src/test/queries/clientpositive/perf/query25.q +++ b/ql/src/test/queries/clientpositive/perf/query25.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query25.tpl and seed 1819994127 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query26.q b/ql/src/test/queries/clientpositive/perf/query26.q index b35d98ccbc..e56416d196 100644 --- a/ql/src/test/queries/clientpositive/perf/query26.q +++ b/ql/src/test/queries/clientpositive/perf/query26.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query26.tpl and seed 1930872976 explain select i_item_id, diff --git a/ql/src/test/queries/clientpositive/perf/query27.q b/ql/src/test/queries/clientpositive/perf/query27.q index ec09e1d3af..b74cc8d092 100644 --- a/ql/src/test/queries/clientpositive/perf/query27.q +++ b/ql/src/test/queries/clientpositive/perf/query27.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query27.tpl and seed 2017787633 explain select i_item_id, diff --git a/ql/src/test/queries/clientpositive/perf/query28.q b/ql/src/test/queries/clientpositive/perf/query28.q index fc3c1b2d40..83caa9037a 100644 --- a/ql/src/test/queries/clientpositive/perf/query28.q +++ b/ql/src/test/queries/clientpositive/perf/query28.q @@ -1,5 +1,6 @@ set hive.mapred.mode=nonstrict; set hive.optimize.metadataonly=true; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query28.tpl and seed 444293455 explain diff --git a/ql/src/test/queries/clientpositive/perf/query29.q b/ql/src/test/queries/clientpositive/perf/query29.q index 8bf4d512ed..4d193d0759 100644 --- a/ql/src/test/queries/clientpositive/perf/query29.q +++ b/ql/src/test/queries/clientpositive/perf/query29.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query29.tpl and seed 2031708268 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query3.q b/ql/src/test/queries/clientpositive/perf/query3.q index a70a62fd88..156f0bf1c5 100644 --- a/ql/src/test/queries/clientpositive/perf/query3.q +++ b/ql/src/test/queries/clientpositive/perf/query3.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query3.tpl and seed 2031708268 explain select dt.d_year diff --git a/ql/src/test/queries/clientpositive/perf/query30.q b/ql/src/test/queries/clientpositive/perf/query30.q index 47f0d935ea..af0a1c9b5a 100644 --- a/ql/src/test/queries/clientpositive/perf/query30.q +++ b/ql/src/test/queries/clientpositive/perf/query30.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query30.tpl and seed 1819994127 explain with customer_total_return as diff --git a/ql/src/test/queries/clientpositive/perf/query31.q b/ql/src/test/queries/clientpositive/perf/query31.q index 42c3ca6e27..a62b6c42c9 100644 --- a/ql/src/test/queries/clientpositive/perf/query31.q +++ b/ql/src/test/queries/clientpositive/perf/query31.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query31.tpl and seed 1819994127 explain with ss as diff --git a/ql/src/test/queries/clientpositive/perf/query32.q b/ql/src/test/queries/clientpositive/perf/query32.q index ed43b4d628..e675c859ab 100644 --- a/ql/src/test/queries/clientpositive/perf/query32.q +++ b/ql/src/test/queries/clientpositive/perf/query32.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query32.tpl and seed 2031708268 explain select sum(cs_ext_discount_amt) as `excess discount amount` diff --git a/ql/src/test/queries/clientpositive/perf/query33.q b/ql/src/test/queries/clientpositive/perf/query33.q index 1dfa9bee8f..11d68987d8 100644 --- a/ql/src/test/queries/clientpositive/perf/query33.q +++ b/ql/src/test/queries/clientpositive/perf/query33.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query33.tpl and seed 1930872976 explain with ss as ( diff --git a/ql/src/test/queries/clientpositive/perf/query34.q b/ql/src/test/queries/clientpositive/perf/query34.q index 427eed6e4d..473eddbb92 100644 --- a/ql/src/test/queries/clientpositive/perf/query34.q +++ b/ql/src/test/queries/clientpositive/perf/query34.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query34.tpl and seed 1971067816 explain select c_last_name diff --git a/ql/src/test/queries/clientpositive/perf/query35.q b/ql/src/test/queries/clientpositive/perf/query35.q index 19951ac9c1..7d5e2efe2b 100644 --- a/ql/src/test/queries/clientpositive/perf/query35.q +++ b/ql/src/test/queries/clientpositive/perf/query35.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query35.tpl and seed 1930872976 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query36.q b/ql/src/test/queries/clientpositive/perf/query36.q index 789f9324f6..3f1161a4da 100644 --- a/ql/src/test/queries/clientpositive/perf/query36.q +++ b/ql/src/test/queries/clientpositive/perf/query36.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query36.tpl and seed 1544728811 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query37.q b/ql/src/test/queries/clientpositive/perf/query37.q index 811eab0489..db39f7dfc1 100644 --- a/ql/src/test/queries/clientpositive/perf/query37.q +++ b/ql/src/test/queries/clientpositive/perf/query37.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query37.tpl and seed 301843662 explain select i_item_id diff --git a/ql/src/test/queries/clientpositive/perf/query38.q b/ql/src/test/queries/clientpositive/perf/query38.q index 8eade8a363..e6250e9571 100644 --- a/ql/src/test/queries/clientpositive/perf/query38.q +++ b/ql/src/test/queries/clientpositive/perf/query38.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query38.tpl and seed 1819994127 explain select count(*) from ( diff --git a/ql/src/test/queries/clientpositive/perf/query39.q b/ql/src/test/queries/clientpositive/perf/query39.q index d3c806d2d3..e64f693921 100644 --- a/ql/src/test/queries/clientpositive/perf/query39.q +++ b/ql/src/test/queries/clientpositive/perf/query39.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query39.tpl and seed 1327317894 explain with inv as diff --git a/ql/src/test/queries/clientpositive/perf/query4.q b/ql/src/test/queries/clientpositive/perf/query4.q index 631a464028..87845b42c1 100644 --- a/ql/src/test/queries/clientpositive/perf/query4.q +++ b/ql/src/test/queries/clientpositive/perf/query4.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query4.tpl and seed 1819994127 explain with year_total as ( diff --git a/ql/src/test/queries/clientpositive/perf/query40.q b/ql/src/test/queries/clientpositive/perf/query40.q index 61f5ad3c91..da46f4b380 100644 --- a/ql/src/test/queries/clientpositive/perf/query40.q +++ b/ql/src/test/queries/clientpositive/perf/query40.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query40.tpl and seed 1819994127 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query42.q b/ql/src/test/queries/clientpositive/perf/query42.q index 6b8abe090e..4e075f47d3 100644 --- a/ql/src/test/queries/clientpositive/perf/query42.q +++ b/ql/src/test/queries/clientpositive/perf/query42.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query42.tpl and seed 1819994127 explain select dt.d_year diff --git a/ql/src/test/queries/clientpositive/perf/query43.q b/ql/src/test/queries/clientpositive/perf/query43.q index ebdc69d933..9f6cd270f5 100644 --- a/ql/src/test/queries/clientpositive/perf/query43.q +++ b/ql/src/test/queries/clientpositive/perf/query43.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query43.tpl and seed 1819994127 explain select s_store_name, s_store_id, diff --git a/ql/src/test/queries/clientpositive/perf/query44.q b/ql/src/test/queries/clientpositive/perf/query44.q index 712bbfb32d..cd074dadcd 100644 --- a/ql/src/test/queries/clientpositive/perf/query44.q +++ b/ql/src/test/queries/clientpositive/perf/query44.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query44.tpl and seed 1819994127 explain select asceding.rnk, i1.i_product_name best_performing, i2.i_product_name worst_performing diff --git a/ql/src/test/queries/clientpositive/perf/query45.q b/ql/src/test/queries/clientpositive/perf/query45.q index 4db3fb2248..c4f8bf4d9b 100644 --- a/ql/src/test/queries/clientpositive/perf/query45.q +++ b/ql/src/test/queries/clientpositive/perf/query45.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query45.tpl and seed 2031708268 explain select ca_zip, ca_county, sum(ws_sales_price) diff --git a/ql/src/test/queries/clientpositive/perf/query46.q b/ql/src/test/queries/clientpositive/perf/query46.q index 46f8be34bd..62adbd03de 100644 --- a/ql/src/test/queries/clientpositive/perf/query46.q +++ b/ql/src/test/queries/clientpositive/perf/query46.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query46.tpl and seed 803547492 explain select c_last_name diff --git a/ql/src/test/queries/clientpositive/perf/query47.q b/ql/src/test/queries/clientpositive/perf/query47.q index 5c26ba5a22..c0b999f601 100644 --- a/ql/src/test/queries/clientpositive/perf/query47.q +++ b/ql/src/test/queries/clientpositive/perf/query47.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query47.tpl and seed 2031708268 explain with v1 as( diff --git a/ql/src/test/queries/clientpositive/perf/query48.q b/ql/src/test/queries/clientpositive/perf/query48.q index cfff1d7857..bab2935ca6 100644 --- a/ql/src/test/queries/clientpositive/perf/query48.q +++ b/ql/src/test/queries/clientpositive/perf/query48.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query48.tpl and seed 622697896 explain select sum (ss_quantity) diff --git a/ql/src/test/queries/clientpositive/perf/query49.q b/ql/src/test/queries/clientpositive/perf/query49.q index 6c62e1f13c..e53e3de712 100644 --- a/ql/src/test/queries/clientpositive/perf/query49.q +++ b/ql/src/test/queries/clientpositive/perf/query49.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query49.tpl and seed 1819994127 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query5.q b/ql/src/test/queries/clientpositive/perf/query5.q index bf61fb2ed4..216210a545 100644 --- a/ql/src/test/queries/clientpositive/perf/query5.q +++ b/ql/src/test/queries/clientpositive/perf/query5.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query5.tpl and seed 1819994127 explain with ssr as diff --git a/ql/src/test/queries/clientpositive/perf/query50.q b/ql/src/test/queries/clientpositive/perf/query50.q index 0e2caf6b86..644aa66c56 100644 --- a/ql/src/test/queries/clientpositive/perf/query50.q +++ b/ql/src/test/queries/clientpositive/perf/query50.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query50.tpl and seed 1819994127 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query51.q b/ql/src/test/queries/clientpositive/perf/query51.q index 9f90525ef7..52a8ae97a5 100644 --- a/ql/src/test/queries/clientpositive/perf/query51.q +++ b/ql/src/test/queries/clientpositive/perf/query51.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query51.tpl and seed 1819994127 explain WITH web_v1 as ( diff --git a/ql/src/test/queries/clientpositive/perf/query52.q b/ql/src/test/queries/clientpositive/perf/query52.q index 1fee84674a..9b7f9dacaa 100644 --- a/ql/src/test/queries/clientpositive/perf/query52.q +++ b/ql/src/test/queries/clientpositive/perf/query52.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query52.tpl and seed 1819994127 explain select dt.d_year diff --git a/ql/src/test/queries/clientpositive/perf/query53.q b/ql/src/test/queries/clientpositive/perf/query53.q index 0b81574c13..1491d5bfdb 100644 --- a/ql/src/test/queries/clientpositive/perf/query53.q +++ b/ql/src/test/queries/clientpositive/perf/query53.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query53.tpl and seed 1819994127 explain select * from diff --git a/ql/src/test/queries/clientpositive/perf/query54.q b/ql/src/test/queries/clientpositive/perf/query54.q index 424f3855d2..54db5c64c9 100644 --- a/ql/src/test/queries/clientpositive/perf/query54.q +++ b/ql/src/test/queries/clientpositive/perf/query54.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query54.tpl and seed 1930872976 explain with my_customers as ( diff --git a/ql/src/test/queries/clientpositive/perf/query55.q b/ql/src/test/queries/clientpositive/perf/query55.q index f953f117af..0f0325d39b 100644 --- a/ql/src/test/queries/clientpositive/perf/query55.q +++ b/ql/src/test/queries/clientpositive/perf/query55.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query55.tpl and seed 2031708268 explain select i_brand_id brand_id, i_brand brand, diff --git a/ql/src/test/queries/clientpositive/perf/query56.q b/ql/src/test/queries/clientpositive/perf/query56.q index f3c83236ac..0027d1d12c 100644 --- a/ql/src/test/queries/clientpositive/perf/query56.q +++ b/ql/src/test/queries/clientpositive/perf/query56.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query56.tpl and seed 1951559352 explain with ss as ( diff --git a/ql/src/test/queries/clientpositive/perf/query57.q b/ql/src/test/queries/clientpositive/perf/query57.q index 4dc6e63257..e352648371 100644 --- a/ql/src/test/queries/clientpositive/perf/query57.q +++ b/ql/src/test/queries/clientpositive/perf/query57.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query57.tpl and seed 2031708268 explain with v1 as( diff --git a/ql/src/test/queries/clientpositive/perf/query58.q b/ql/src/test/queries/clientpositive/perf/query58.q index 8d918ef4cb..a5ca9a0738 100644 --- a/ql/src/test/queries/clientpositive/perf/query58.q +++ b/ql/src/test/queries/clientpositive/perf/query58.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query58.tpl and seed 1819994127 explain with ss_items as diff --git a/ql/src/test/queries/clientpositive/perf/query59.q b/ql/src/test/queries/clientpositive/perf/query59.q index 099965306b..4a75731c04 100644 --- a/ql/src/test/queries/clientpositive/perf/query59.q +++ b/ql/src/test/queries/clientpositive/perf/query59.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query59.tpl and seed 1819994127 explain with wss as diff --git a/ql/src/test/queries/clientpositive/perf/query6.q b/ql/src/test/queries/clientpositive/perf/query6.q index aabce5202e..10e8d8f852 100644 --- a/ql/src/test/queries/clientpositive/perf/query6.q +++ b/ql/src/test/queries/clientpositive/perf/query6.q @@ -1,6 +1,7 @@ set hive.auto.convert.join=true; set hive.tez.cartesian-product.enabled=true; set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query6.tpl and seed 1819994127 explain select a.ca_state state, count(*) cnt diff --git a/ql/src/test/queries/clientpositive/perf/query60.q b/ql/src/test/queries/clientpositive/perf/query60.q index a5ab248cd1..2e3faf6dad 100644 --- a/ql/src/test/queries/clientpositive/perf/query60.q +++ b/ql/src/test/queries/clientpositive/perf/query60.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query60.tpl and seed 1930872976 explain with ss as ( diff --git a/ql/src/test/queries/clientpositive/perf/query61.q b/ql/src/test/queries/clientpositive/perf/query61.q index edaf6f6e8e..01c4218568 100644 --- a/ql/src/test/queries/clientpositive/perf/query61.q +++ b/ql/src/test/queries/clientpositive/perf/query61.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query61.tpl and seed 1930872976 explain select promotions,total,cast(promotions as decimal(15,4))/cast(total as decimal(15,4))*100 diff --git a/ql/src/test/queries/clientpositive/perf/query63.q b/ql/src/test/queries/clientpositive/perf/query63.q index 49e513c786..3d5a735dfb 100644 --- a/ql/src/test/queries/clientpositive/perf/query63.q +++ b/ql/src/test/queries/clientpositive/perf/query63.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query63.tpl and seed 1819994127 explain select * diff --git a/ql/src/test/queries/clientpositive/perf/query64.q b/ql/src/test/queries/clientpositive/perf/query64.q index b069c2ace9..45e167b717 100644 --- a/ql/src/test/queries/clientpositive/perf/query64.q +++ b/ql/src/test/queries/clientpositive/perf/query64.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query64.tpl and seed 1220860970 explain with cs_ui as diff --git a/ql/src/test/queries/clientpositive/perf/query65.q b/ql/src/test/queries/clientpositive/perf/query65.q index d5b53a25c5..4612c10205 100644 --- a/ql/src/test/queries/clientpositive/perf/query65.q +++ b/ql/src/test/queries/clientpositive/perf/query65.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query65.tpl and seed 1819994127 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query66.q b/ql/src/test/queries/clientpositive/perf/query66.q index 280bac8df3..753e973678 100644 --- a/ql/src/test/queries/clientpositive/perf/query66.q +++ b/ql/src/test/queries/clientpositive/perf/query66.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query66.tpl and seed 2042478054 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query67.q b/ql/src/test/queries/clientpositive/perf/query67.q index c3ecf2a177..ce3fee04a6 100644 --- a/ql/src/test/queries/clientpositive/perf/query67.q +++ b/ql/src/test/queries/clientpositive/perf/query67.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query67.tpl and seed 1819994127 explain select * diff --git a/ql/src/test/queries/clientpositive/perf/query68.q b/ql/src/test/queries/clientpositive/perf/query68.q index 964dc8a0a7..298f3e7821 100644 --- a/ql/src/test/queries/clientpositive/perf/query68.q +++ b/ql/src/test/queries/clientpositive/perf/query68.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query68.tpl and seed 803547492 explain select c_last_name diff --git a/ql/src/test/queries/clientpositive/perf/query69.q b/ql/src/test/queries/clientpositive/perf/query69.q index ce2d19cc5d..4d1dbceaf2 100644 --- a/ql/src/test/queries/clientpositive/perf/query69.q +++ b/ql/src/test/queries/clientpositive/perf/query69.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query69.tpl and seed 797269820 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query7.q b/ql/src/test/queries/clientpositive/perf/query7.q index 7bc1a00a3f..8efcd65427 100644 --- a/ql/src/test/queries/clientpositive/perf/query7.q +++ b/ql/src/test/queries/clientpositive/perf/query7.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query7.tpl and seed 1930872976 explain select i_item_id, diff --git a/ql/src/test/queries/clientpositive/perf/query70.q b/ql/src/test/queries/clientpositive/perf/query70.q index 7974976c34..9c8df7144e 100644 --- a/ql/src/test/queries/clientpositive/perf/query70.q +++ b/ql/src/test/queries/clientpositive/perf/query70.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query70.tpl and seed 1819994127 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query71.q b/ql/src/test/queries/clientpositive/perf/query71.q index ea6548ec4d..cc1e3de3e0 100644 --- a/ql/src/test/queries/clientpositive/perf/query71.q +++ b/ql/src/test/queries/clientpositive/perf/query71.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query71.tpl and seed 2031708268 explain select i_brand_id brand_id, i_brand brand,t_hour,t_minute, diff --git a/ql/src/test/queries/clientpositive/perf/query72.q b/ql/src/test/queries/clientpositive/perf/query72.q index 20fbcb1242..f111bbacb0 100644 --- a/ql/src/test/queries/clientpositive/perf/query72.q +++ b/ql/src/test/queries/clientpositive/perf/query72.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query72.tpl and seed 2031708268 explain select i_item_desc diff --git a/ql/src/test/queries/clientpositive/perf/query73.q b/ql/src/test/queries/clientpositive/perf/query73.q index 42ccaa19be..cf8ceb4fff 100644 --- a/ql/src/test/queries/clientpositive/perf/query73.q +++ b/ql/src/test/queries/clientpositive/perf/query73.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query73.tpl and seed 1971067816 explain select c_last_name diff --git a/ql/src/test/queries/clientpositive/perf/query74.q b/ql/src/test/queries/clientpositive/perf/query74.q index b25db9c0e0..11d1712e47 100644 --- a/ql/src/test/queries/clientpositive/perf/query74.q +++ b/ql/src/test/queries/clientpositive/perf/query74.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query74.tpl and seed 1556717815 explain with year_total as ( diff --git a/ql/src/test/queries/clientpositive/perf/query75.q b/ql/src/test/queries/clientpositive/perf/query75.q index ac1fc381c4..cb8d5163f3 100644 --- a/ql/src/test/queries/clientpositive/perf/query75.q +++ b/ql/src/test/queries/clientpositive/perf/query75.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query75.tpl and seed 1819994127 explain WITH all_sales AS ( diff --git a/ql/src/test/queries/clientpositive/perf/query76.q b/ql/src/test/queries/clientpositive/perf/query76.q index ca943ce967..a342013a70 100644 --- a/ql/src/test/queries/clientpositive/perf/query76.q +++ b/ql/src/test/queries/clientpositive/perf/query76.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query76.tpl and seed 2031708268 explain select channel, col_name, d_year, d_qoy, i_category, COUNT(*) sales_cnt, SUM(ext_sales_price) sales_amt FROM ( diff --git a/ql/src/test/queries/clientpositive/perf/query77.q b/ql/src/test/queries/clientpositive/perf/query77.q index 28578133fe..ecc31fc419 100644 --- a/ql/src/test/queries/clientpositive/perf/query77.q +++ b/ql/src/test/queries/clientpositive/perf/query77.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query77.tpl and seed 1819994127 explain with ss as diff --git a/ql/src/test/queries/clientpositive/perf/query78.q b/ql/src/test/queries/clientpositive/perf/query78.q index ca9e6d6cb1..ae50db5698 100644 --- a/ql/src/test/queries/clientpositive/perf/query78.q +++ b/ql/src/test/queries/clientpositive/perf/query78.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query78.tpl and seed 1819994127 explain with ws as diff --git a/ql/src/test/queries/clientpositive/perf/query79.q b/ql/src/test/queries/clientpositive/perf/query79.q index dfa7017c13..350d7c6664 100644 --- a/ql/src/test/queries/clientpositive/perf/query79.q +++ b/ql/src/test/queries/clientpositive/perf/query79.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query79.tpl and seed 2031708268 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query8.q b/ql/src/test/queries/clientpositive/perf/query8.q index cfce36618b..35287a2cfe 100644 --- a/ql/src/test/queries/clientpositive/perf/query8.q +++ b/ql/src/test/queries/clientpositive/perf/query8.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query8.tpl and seed 1766988859 explain select s_store_name diff --git a/ql/src/test/queries/clientpositive/perf/query80.q b/ql/src/test/queries/clientpositive/perf/query80.q index 651c5d7ff5..c5c37d468b 100644 --- a/ql/src/test/queries/clientpositive/perf/query80.q +++ b/ql/src/test/queries/clientpositive/perf/query80.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query80.tpl and seed 1819994127 explain with ssr as diff --git a/ql/src/test/queries/clientpositive/perf/query81.q b/ql/src/test/queries/clientpositive/perf/query81.q index fd072c398d..f3b4d949ea 100644 --- a/ql/src/test/queries/clientpositive/perf/query81.q +++ b/ql/src/test/queries/clientpositive/perf/query81.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query81.tpl and seed 1819994127 explain with customer_total_return as diff --git a/ql/src/test/queries/clientpositive/perf/query82.q b/ql/src/test/queries/clientpositive/perf/query82.q index 9aec0cbd68..83598288af 100644 --- a/ql/src/test/queries/clientpositive/perf/query82.q +++ b/ql/src/test/queries/clientpositive/perf/query82.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query82.tpl and seed 55585014 explain select i_item_id diff --git a/ql/src/test/queries/clientpositive/perf/query83.q b/ql/src/test/queries/clientpositive/perf/query83.q index fd9184ccb9..f9eef5b2aa 100644 --- a/ql/src/test/queries/clientpositive/perf/query83.q +++ b/ql/src/test/queries/clientpositive/perf/query83.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query83.tpl and seed 1930872976 explain with sr_items as diff --git a/ql/src/test/queries/clientpositive/perf/query84.q b/ql/src/test/queries/clientpositive/perf/query84.q index 4ab59457d2..44574df8d0 100644 --- a/ql/src/test/queries/clientpositive/perf/query84.q +++ b/ql/src/test/queries/clientpositive/perf/query84.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query84.tpl and seed 1819994127 explain select c_customer_id as customer_id diff --git a/ql/src/test/queries/clientpositive/perf/query85.q b/ql/src/test/queries/clientpositive/perf/query85.q index 2e67e728bf..cccbbf2944 100644 --- a/ql/src/test/queries/clientpositive/perf/query85.q +++ b/ql/src/test/queries/clientpositive/perf/query85.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query85.tpl and seed 622697896 explain select substr(r_reason_desc,1,20) diff --git a/ql/src/test/queries/clientpositive/perf/query86.q b/ql/src/test/queries/clientpositive/perf/query86.q index 6670868962..ca8a8cc927 100644 --- a/ql/src/test/queries/clientpositive/perf/query86.q +++ b/ql/src/test/queries/clientpositive/perf/query86.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query86.tpl and seed 1819994127 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query87.q b/ql/src/test/queries/clientpositive/perf/query87.q index e4562c23fe..6a514a6ad3 100644 --- a/ql/src/test/queries/clientpositive/perf/query87.q +++ b/ql/src/test/queries/clientpositive/perf/query87.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query87.tpl and seed 1819994127 explain select count(*) diff --git a/ql/src/test/queries/clientpositive/perf/query88.q b/ql/src/test/queries/clientpositive/perf/query88.q index 265cc7c4f2..1c78501ad9 100644 --- a/ql/src/test/queries/clientpositive/perf/query88.q +++ b/ql/src/test/queries/clientpositive/perf/query88.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query88.tpl and seed 318176889 explain select * diff --git a/ql/src/test/queries/clientpositive/perf/query89.q b/ql/src/test/queries/clientpositive/perf/query89.q index 31592295c0..9ddc125ecb 100644 --- a/ql/src/test/queries/clientpositive/perf/query89.q +++ b/ql/src/test/queries/clientpositive/perf/query89.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query89.tpl and seed 1719819282 explain select * diff --git a/ql/src/test/queries/clientpositive/perf/query9.q b/ql/src/test/queries/clientpositive/perf/query9.q index 421f5e1f43..190b12f5f2 100644 --- a/ql/src/test/queries/clientpositive/perf/query9.q +++ b/ql/src/test/queries/clientpositive/perf/query9.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query9.tpl and seed 1490436826 explain select case when (select count(*) diff --git a/ql/src/test/queries/clientpositive/perf/query90.q b/ql/src/test/queries/clientpositive/perf/query90.q index d17cbc4c21..8fa8c3f652 100644 --- a/ql/src/test/queries/clientpositive/perf/query90.q +++ b/ql/src/test/queries/clientpositive/perf/query90.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query90.tpl and seed 2031708268 explain select cast(amc as decimal(15,4))/cast(pmc as decimal(15,4)) am_pm_ratio diff --git a/ql/src/test/queries/clientpositive/perf/query91.q b/ql/src/test/queries/clientpositive/perf/query91.q index 79ca713dd0..bd54aeee83 100644 --- a/ql/src/test/queries/clientpositive/perf/query91.q +++ b/ql/src/test/queries/clientpositive/perf/query91.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query91.tpl and seed 1930872976 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query92.q b/ql/src/test/queries/clientpositive/perf/query92.q index f26fa5e46f..4fc25e80b8 100644 --- a/ql/src/test/queries/clientpositive/perf/query92.q +++ b/ql/src/test/queries/clientpositive/perf/query92.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query92.tpl and seed 2031708268 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query93.q b/ql/src/test/queries/clientpositive/perf/query93.q index 7f4a093df7..d5e9168426 100644 --- a/ql/src/test/queries/clientpositive/perf/query93.q +++ b/ql/src/test/queries/clientpositive/perf/query93.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query93.tpl and seed 1200409435 explain select ss_customer_sk diff --git a/ql/src/test/queries/clientpositive/perf/query94.q b/ql/src/test/queries/clientpositive/perf/query94.q index 18253fa7d6..0557982e29 100644 --- a/ql/src/test/queries/clientpositive/perf/query94.q +++ b/ql/src/test/queries/clientpositive/perf/query94.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query94.tpl and seed 2031708268 explain select diff --git a/ql/src/test/queries/clientpositive/perf/query95.q b/ql/src/test/queries/clientpositive/perf/query95.q index e9024a8c0b..9c79975e52 100644 --- a/ql/src/test/queries/clientpositive/perf/query95.q +++ b/ql/src/test/queries/clientpositive/perf/query95.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query95.tpl and seed 2031708268 explain with ws_wh as diff --git a/ql/src/test/queries/clientpositive/perf/query96.q b/ql/src/test/queries/clientpositive/perf/query96.q index a306d6cdfd..3dd14b77cf 100644 --- a/ql/src/test/queries/clientpositive/perf/query96.q +++ b/ql/src/test/queries/clientpositive/perf/query96.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query96.tpl and seed 1819994127 explain select count(*) diff --git a/ql/src/test/queries/clientpositive/perf/query97.q b/ql/src/test/queries/clientpositive/perf/query97.q index 7203e5243c..d782ebaf53 100644 --- a/ql/src/test/queries/clientpositive/perf/query97.q +++ b/ql/src/test/queries/clientpositive/perf/query97.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query97.tpl and seed 1819994127 explain with ssci as ( diff --git a/ql/src/test/queries/clientpositive/perf/query98.q b/ql/src/test/queries/clientpositive/perf/query98.q index 6168f2af86..dea8478c93 100644 --- a/ql/src/test/queries/clientpositive/perf/query98.q +++ b/ql/src/test/queries/clientpositive/perf/query98.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query98.tpl and seed 345591136 explain select i_item_desc diff --git a/ql/src/test/queries/clientpositive/perf/query99.q b/ql/src/test/queries/clientpositive/perf/query99.q index 83be1d0e71..191967b399 100644 --- a/ql/src/test/queries/clientpositive/perf/query99.q +++ b/ql/src/test/queries/clientpositive/perf/query99.q @@ -1,4 +1,5 @@ set hive.mapred.mode=nonstrict; +set hive.materializedview.rewriting=false; -- start query 1 in stream 0 using template query99.tpl and seed 1819994127 explain select diff --git a/ql/src/test/queries/clientpositive/results_cache_invalidation2.q b/ql/src/test/queries/clientpositive/results_cache_invalidation2.q index b360c85eb1..9a4d576749 100644 --- a/ql/src/test/queries/clientpositive/results_cache_invalidation2.q +++ b/ql/src/test/queries/clientpositive/results_cache_invalidation2.q @@ -1,6 +1,4 @@ --! qt:dataset:src -set hive.metastore.event.listeners=org.apache.hive.hcatalog.listener.DbNotificationListener; - set hive.query.results.cache.enabled=true; set hive.query.results.cache.nontransactional.tables.enabled=true; diff --git a/ql/src/test/results/clientnegative/allow_change_col_type_par_neg.q.out b/ql/src/test/results/clientnegative/allow_change_col_type_par_neg.q.out index 98d99a9087..7ec28d0f5a 100644 --- a/ql/src/test/results/clientnegative/allow_change_col_type_par_neg.q.out +++ b/ql/src/test/results/clientnegative/allow_change_col_type_par_neg.q.out @@ -8,7 +8,7 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 hive.metastore.disallow.incompatible.col.type.changes=true metaconf:hive.metastore.disallow.incompatible.col.type.changes=true -hive.metastore.disallow.incompatible.col.type.changes=false +hive.metastore.disallow.incompatible.col.type.changes=true metaconf:hive.metastore.disallow.incompatible.col.type.changes=true PREHOOK: query: alter table t1 change column c1 c1 smallint PREHOOK: type: ALTERTABLE_RENAMECOL diff --git a/ql/src/test/results/clientpositive/compute_stats_date.q.out b/ql/src/test/results/clientpositive/compute_stats_date.q.out index d5eaf2099a..e638e5cdfe 100644 --- a/ql/src/test/results/clientpositive/compute_stats_date.q.out +++ b/ql/src/test/results/clientpositive/compute_stats_date.q.out @@ -132,11 +132,11 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"fl_date\":\"true\"}} -PREHOOK: query: alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0') +PREHOOK: query: alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0', 'numNulls'='0') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS PREHOOK: Input: default@tab_date PREHOOK: Output: default@tab_date -POSTHOOK: query: alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0') +POSTHOOK: query: alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0', 'numNulls'='0') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS POSTHOOK: Input: default@tab_date POSTHOOK: Output: default@tab_date diff --git a/ql/src/test/results/clientpositive/lock4.q.out b/ql/src/test/results/clientpositive/lock4.q.out index 7deb6ee0da..3c3811faf1 100644 --- a/ql/src/test/results/clientpositive/lock4.q.out +++ b/ql/src/test/results/clientpositive/lock4.q.out @@ -68,10 +68,12 @@ PREHOOK: query: SHOW LOCKS PREHOOK: type: SHOWLOCKS POSTHOOK: query: SHOW LOCKS POSTHOOK: type: SHOWLOCKS +Lock ID Database Table Partition State Blocked By Type Transaction ID Last Heartbeat Acquired At User Hostname Agent Info PREHOOK: query: SHOW LOCKS tstsrcpart_n3 PREHOOK: type: SHOWLOCKS POSTHOOK: query: SHOW LOCKS tstsrcpart_n3 POSTHOOK: type: SHOWLOCKS +Lock ID Database Table Partition State Blocked By Type Transaction ID Last Heartbeat Acquired At User Hostname Agent Info PREHOOK: query: drop table tstsrcpart_n3 PREHOOK: type: DROPTABLE PREHOOK: Input: default@tstsrcpart_n3 diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java index b26586ab62..1219777ceb 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java @@ -816,13 +816,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddDynamicPartition case 5: // PARTITIONNAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list756 = iprot.readListBegin(); - struct.partitionnames = new ArrayList(_list756.size); - String _elem757; - for (int _i758 = 0; _i758 < _list756.size; ++_i758) + org.apache.thrift.protocol.TList _list764 = iprot.readListBegin(); + struct.partitionnames = new ArrayList(_list764.size); + String _elem765; + for (int _i766 = 0; _i766 < _list764.size; ++_i766) { - _elem757 = iprot.readString(); - struct.partitionnames.add(_elem757); + _elem765 = iprot.readString(); + struct.partitionnames.add(_elem765); } iprot.readListEnd(); } @@ -872,9 +872,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddDynamicPartitio oprot.writeFieldBegin(PARTITIONNAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionnames.size())); - for (String _iter759 : struct.partitionnames) + for (String _iter767 : struct.partitionnames) { - oprot.writeString(_iter759); + oprot.writeString(_iter767); } oprot.writeListEnd(); } @@ -910,9 +910,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartition oprot.writeString(struct.tablename); { oprot.writeI32(struct.partitionnames.size()); - for (String _iter760 : struct.partitionnames) + for (String _iter768 : struct.partitionnames) { - oprot.writeString(_iter760); + oprot.writeString(_iter768); } } BitSet optionals = new BitSet(); @@ -937,13 +937,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartitions struct.tablename = iprot.readString(); struct.setTablenameIsSet(true); { - org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionnames = new ArrayList(_list761.size); - String _elem762; - for (int _i763 = 0; _i763 < _list761.size; ++_i763) + org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionnames = new ArrayList(_list769.size); + String _elem770; + for (int _i771 = 0; _i771 < _list769.size; ++_i771) { - _elem762 = iprot.readString(); - struct.partitionnames.add(_elem762); + _elem770 = iprot.readString(); + struct.partitionnames.add(_elem770); } } struct.setPartitionnamesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java index 361332b600..cf8bbd13ec 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java @@ -716,13 +716,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI case 3: // TXN_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list682 = iprot.readListBegin(); - struct.txnIds = new ArrayList(_list682.size); - long _elem683; - for (int _i684 = 0; _i684 < _list682.size; ++_i684) + org.apache.thrift.protocol.TList _list690 = iprot.readListBegin(); + struct.txnIds = new ArrayList(_list690.size); + long _elem691; + for (int _i692 = 0; _i692 < _list690.size; ++_i692) { - _elem683 = iprot.readI64(); - struct.txnIds.add(_elem683); + _elem691 = iprot.readI64(); + struct.txnIds.add(_elem691); } iprot.readListEnd(); } @@ -742,14 +742,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI case 5: // SRC_TXN_TO_WRITE_ID_LIST if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list685 = iprot.readListBegin(); - struct.srcTxnToWriteIdList = new ArrayList(_list685.size); - TxnToWriteId _elem686; - for (int _i687 = 0; _i687 < _list685.size; ++_i687) + org.apache.thrift.protocol.TList _list693 = iprot.readListBegin(); + struct.srcTxnToWriteIdList = new ArrayList(_list693.size); + TxnToWriteId _elem694; + for (int _i695 = 0; _i695 < _list693.size; ++_i695) { - _elem686 = new TxnToWriteId(); - _elem686.read(iprot); - struct.srcTxnToWriteIdList.add(_elem686); + _elem694 = new TxnToWriteId(); + _elem694.read(iprot); + struct.srcTxnToWriteIdList.add(_elem694); } iprot.readListEnd(); } @@ -786,9 +786,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite oprot.writeFieldBegin(TXN_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txnIds.size())); - for (long _iter688 : struct.txnIds) + for (long _iter696 : struct.txnIds) { - oprot.writeI64(_iter688); + oprot.writeI64(_iter696); } oprot.writeListEnd(); } @@ -807,9 +807,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite oprot.writeFieldBegin(SRC_TXN_TO_WRITE_ID_LIST_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.srcTxnToWriteIdList.size())); - for (TxnToWriteId _iter689 : struct.srcTxnToWriteIdList) + for (TxnToWriteId _iter697 : struct.srcTxnToWriteIdList) { - _iter689.write(oprot); + _iter697.write(oprot); } oprot.writeListEnd(); } @@ -849,9 +849,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI if (struct.isSetTxnIds()) { { oprot.writeI32(struct.txnIds.size()); - for (long _iter690 : struct.txnIds) + for (long _iter698 : struct.txnIds) { - oprot.writeI64(_iter690); + oprot.writeI64(_iter698); } } } @@ -861,9 +861,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI if (struct.isSetSrcTxnToWriteIdList()) { { oprot.writeI32(struct.srcTxnToWriteIdList.size()); - for (TxnToWriteId _iter691 : struct.srcTxnToWriteIdList) + for (TxnToWriteId _iter699 : struct.srcTxnToWriteIdList) { - _iter691.write(oprot); + _iter699.write(oprot); } } } @@ -879,13 +879,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteId BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list692 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.txnIds = new ArrayList(_list692.size); - long _elem693; - for (int _i694 = 0; _i694 < _list692.size; ++_i694) + org.apache.thrift.protocol.TList _list700 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.txnIds = new ArrayList(_list700.size); + long _elem701; + for (int _i702 = 0; _i702 < _list700.size; ++_i702) { - _elem693 = iprot.readI64(); - struct.txnIds.add(_elem693); + _elem701 = iprot.readI64(); + struct.txnIds.add(_elem701); } } struct.setTxnIdsIsSet(true); @@ -896,14 +896,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteId } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list695 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.srcTxnToWriteIdList = new ArrayList(_list695.size); - TxnToWriteId _elem696; - for (int _i697 = 0; _i697 < _list695.size; ++_i697) + org.apache.thrift.protocol.TList _list703 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.srcTxnToWriteIdList = new ArrayList(_list703.size); + TxnToWriteId _elem704; + for (int _i705 = 0; _i705 < _list703.size; ++_i705) { - _elem696 = new TxnToWriteId(); - _elem696.read(iprot); - struct.srcTxnToWriteIdList.add(_elem696); + _elem704 = new TxnToWriteId(); + _elem704.read(iprot); + struct.srcTxnToWriteIdList.add(_elem704); } } struct.setSrcTxnToWriteIdListIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java index aaf187b4bd..f71f286638 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI case 1: // TXN_TO_WRITE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list698 = iprot.readListBegin(); - struct.txnToWriteIds = new ArrayList(_list698.size); - TxnToWriteId _elem699; - for (int _i700 = 0; _i700 < _list698.size; ++_i700) + org.apache.thrift.protocol.TList _list706 = iprot.readListBegin(); + struct.txnToWriteIds = new ArrayList(_list706.size); + TxnToWriteId _elem707; + for (int _i708 = 0; _i708 < _list706.size; ++_i708) { - _elem699 = new TxnToWriteId(); - _elem699.read(iprot); - struct.txnToWriteIds.add(_elem699); + _elem707 = new TxnToWriteId(); + _elem707.read(iprot); + struct.txnToWriteIds.add(_elem707); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite oprot.writeFieldBegin(TXN_TO_WRITE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.txnToWriteIds.size())); - for (TxnToWriteId _iter701 : struct.txnToWriteIds) + for (TxnToWriteId _iter709 : struct.txnToWriteIds) { - _iter701.write(oprot); + _iter709.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.txnToWriteIds.size()); - for (TxnToWriteId _iter702 : struct.txnToWriteIds) + for (TxnToWriteId _iter710 : struct.txnToWriteIds) { - _iter702.write(oprot); + _iter710.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list703 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.txnToWriteIds = new ArrayList(_list703.size); - TxnToWriteId _elem704; - for (int _i705 = 0; _i705 < _list703.size; ++_i705) + org.apache.thrift.protocol.TList _list711 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.txnToWriteIds = new ArrayList(_list711.size); + TxnToWriteId _elem712; + for (int _i713 = 0; _i713 < _list711.size; ++_i713) { - _elem704 = new TxnToWriteId(); - _elem704.read(iprot); - struct.txnToWriteIds.add(_elem704); + _elem712 = new TxnToWriteId(); + _elem712.read(iprot); + struct.txnToWriteIds.add(_elem712); } } struct.setTxnToWriteIdsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java index eeeae54dd2..30d130df10 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java @@ -877,14 +877,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AlterPartitionsRequ case 4: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1024 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list1024.size); - Partition _elem1025; - for (int _i1026 = 0; _i1026 < _list1024.size; ++_i1026) + org.apache.thrift.protocol.TList _list1032 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list1032.size); + Partition _elem1033; + for (int _i1034 = 0; _i1034 < _list1032.size; ++_i1034) { - _elem1025 = new Partition(); - _elem1025.read(iprot); - struct.partitions.add(_elem1025); + _elem1033 = new Partition(); + _elem1033.read(iprot); + struct.partitions.add(_elem1033); } iprot.readListEnd(); } @@ -952,9 +952,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AlterPartitionsReq oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter1027 : struct.partitions) + for (Partition _iter1035 : struct.partitions) { - _iter1027.write(oprot); + _iter1035.write(oprot); } oprot.writeListEnd(); } @@ -1000,9 +1000,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsRequ oprot.writeString(struct.tableName); { oprot.writeI32(struct.partitions.size()); - for (Partition _iter1028 : struct.partitions) + for (Partition _iter1036 : struct.partitions) { - _iter1028.write(oprot); + _iter1036.write(oprot); } } BitSet optionals = new BitSet(); @@ -1041,14 +1041,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsReque struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); { - org.apache.thrift.protocol.TList _list1029 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list1029.size); - Partition _elem1030; - for (int _i1031 = 0; _i1031 < _list1029.size; ++_i1031) + org.apache.thrift.protocol.TList _list1037 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list1037.size); + Partition _elem1038; + for (int _i1039 = 0; _i1039 < _list1037.size; ++_i1039) { - _elem1030 = new Partition(); - _elem1030.read(iprot); - struct.partitions.add(_elem1030); + _elem1038 = new Partition(); + _elem1038.read(iprot); + struct.partitions.add(_elem1038); } } struct.setPartitionsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java index ca7628866a..31eccab231 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClearFileMetadataRe case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list872 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list872.size); - long _elem873; - for (int _i874 = 0; _i874 < _list872.size; ++_i874) + org.apache.thrift.protocol.TList _list880 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list880.size); + long _elem881; + for (int _i882 = 0; _i882 < _list880.size; ++_i882) { - _elem873 = iprot.readI64(); - struct.fileIds.add(_elem873); + _elem881 = iprot.readI64(); + struct.fileIds.add(_elem881); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClearFileMetadataR oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter875 : struct.fileIds) + for (long _iter883 : struct.fileIds) { - oprot.writeI64(_iter875); + oprot.writeI64(_iter883); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter876 : struct.fileIds) + for (long _iter884 : struct.fileIds) { - oprot.writeI64(_iter876); + oprot.writeI64(_iter884); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe public void read(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list877 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list877.size); - long _elem878; - for (int _i879 = 0; _i879 < _list877.size; ++_i879) + org.apache.thrift.protocol.TList _list885 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list885.size); + long _elem886; + for (int _i887 = 0; _i887 < _list885.size; ++_i887) { - _elem878 = iprot.readI64(); - struct.fileIds.add(_elem878); + _elem886 = iprot.readI64(); + struct.fileIds.add(_elem886); } } struct.setFileIdsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java index c3cb11e14c..cce7cc08ba 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java @@ -354,13 +354,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClientCapabilities case 1: // VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list888 = iprot.readListBegin(); - struct.values = new ArrayList(_list888.size); - ClientCapability _elem889; - for (int _i890 = 0; _i890 < _list888.size; ++_i890) + org.apache.thrift.protocol.TList _list896 = iprot.readListBegin(); + struct.values = new ArrayList(_list896.size); + ClientCapability _elem897; + for (int _i898 = 0; _i898 < _list896.size; ++_i898) { - _elem889 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); - struct.values.add(_elem889); + _elem897 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); + struct.values.add(_elem897); } iprot.readListEnd(); } @@ -386,9 +386,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClientCapabilities oprot.writeFieldBegin(VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.values.size())); - for (ClientCapability _iter891 : struct.values) + for (ClientCapability _iter899 : struct.values) { - oprot.writeI32(_iter891.getValue()); + oprot.writeI32(_iter899.getValue()); } oprot.writeListEnd(); } @@ -413,9 +413,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.values.size()); - for (ClientCapability _iter892 : struct.values) + for (ClientCapability _iter900 : struct.values) { - oprot.writeI32(_iter892.getValue()); + oprot.writeI32(_iter900.getValue()); } } } @@ -424,13 +424,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities public void read(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list893 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.values = new ArrayList(_list893.size); - ClientCapability _elem894; - for (int _i895 = 0; _i895 < _list893.size; ++_i895) + org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); + struct.values = new ArrayList(_list901.size); + ClientCapability _elem902; + for (int _i903 = 0; _i903 < _list901.size; ++_i903) { - _elem894 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); - struct.values.add(_elem894); + _elem902 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); + struct.values.add(_elem902); } } struct.setValuesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java index 5acd896fd3..147c91fb3f 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java @@ -814,15 +814,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CompactionRequest s case 6: // PROPERTIES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map738 = iprot.readMapBegin(); - struct.properties = new HashMap(2*_map738.size); - String _key739; - String _val740; - for (int _i741 = 0; _i741 < _map738.size; ++_i741) + org.apache.thrift.protocol.TMap _map746 = iprot.readMapBegin(); + struct.properties = new HashMap(2*_map746.size); + String _key747; + String _val748; + for (int _i749 = 0; _i749 < _map746.size; ++_i749) { - _key739 = iprot.readString(); - _val740 = iprot.readString(); - struct.properties.put(_key739, _val740); + _key747 = iprot.readString(); + _val748 = iprot.readString(); + struct.properties.put(_key747, _val748); } iprot.readMapEnd(); } @@ -878,10 +878,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CompactionRequest oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size())); - for (Map.Entry _iter742 : struct.properties.entrySet()) + for (Map.Entry _iter750 : struct.properties.entrySet()) { - oprot.writeString(_iter742.getKey()); - oprot.writeString(_iter742.getValue()); + oprot.writeString(_iter750.getKey()); + oprot.writeString(_iter750.getValue()); } oprot.writeMapEnd(); } @@ -928,10 +928,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionRequest s if (struct.isSetProperties()) { { oprot.writeI32(struct.properties.size()); - for (Map.Entry _iter743 : struct.properties.entrySet()) + for (Map.Entry _iter751 : struct.properties.entrySet()) { - oprot.writeString(_iter743.getKey()); - oprot.writeString(_iter743.getValue()); + oprot.writeString(_iter751.getKey()); + oprot.writeString(_iter751.getValue()); } } } @@ -957,15 +957,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionRequest st } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map744 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.properties = new HashMap(2*_map744.size); - String _key745; - String _val746; - for (int _i747 = 0; _i747 < _map744.size; ++_i747) + org.apache.thrift.protocol.TMap _map752 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.properties = new HashMap(2*_map752.size); + String _key753; + String _val754; + for (int _i755 = 0; _i755 < _map752.size; ++_i755) { - _key745 = iprot.readString(); - _val746 = iprot.readString(); - struct.properties.put(_key745, _val746); + _key753 = iprot.readString(); + _val754 = iprot.readString(); + struct.properties.put(_key753, _val754); } } struct.setPropertiesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreateTableRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreateTableRequest.java new file mode 100644 index 0000000000..c2325c1c43 --- /dev/null +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreateTableRequest.java @@ -0,0 +1,1730 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CreateTableRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CreateTableRequest"); + + private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField ENV_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("envContext", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField PRIMARY_KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("primaryKeys", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField FOREIGN_KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("foreignKeys", org.apache.thrift.protocol.TType.LIST, (short)4); + private static final org.apache.thrift.protocol.TField UNIQUE_CONSTRAINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("uniqueConstraints", org.apache.thrift.protocol.TType.LIST, (short)5); + private static final org.apache.thrift.protocol.TField NOT_NULL_CONSTRAINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("notNullConstraints", org.apache.thrift.protocol.TType.LIST, (short)6); + private static final org.apache.thrift.protocol.TField DEFAULT_CONSTRAINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("defaultConstraints", org.apache.thrift.protocol.TType.LIST, (short)7); + private static final org.apache.thrift.protocol.TField CHECK_CONSTRAINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("checkConstraints", org.apache.thrift.protocol.TType.LIST, (short)8); + private static final org.apache.thrift.protocol.TField PROCESSOR_CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("processorCapabilities", org.apache.thrift.protocol.TType.LIST, (short)9); + private static final org.apache.thrift.protocol.TField PROCESSOR_IDENTIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("processorIdentifier", org.apache.thrift.protocol.TType.STRING, (short)10); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new CreateTableRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new CreateTableRequestTupleSchemeFactory()); + } + + private Table table; // required + private EnvironmentContext envContext; // optional + private List primaryKeys; // optional + private List foreignKeys; // optional + private List uniqueConstraints; // optional + private List notNullConstraints; // optional + private List defaultConstraints; // optional + private List checkConstraints; // optional + private List processorCapabilities; // optional + private String processorIdentifier; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TABLE((short)1, "table"), + ENV_CONTEXT((short)2, "envContext"), + PRIMARY_KEYS((short)3, "primaryKeys"), + FOREIGN_KEYS((short)4, "foreignKeys"), + UNIQUE_CONSTRAINTS((short)5, "uniqueConstraints"), + NOT_NULL_CONSTRAINTS((short)6, "notNullConstraints"), + DEFAULT_CONSTRAINTS((short)7, "defaultConstraints"), + CHECK_CONSTRAINTS((short)8, "checkConstraints"), + PROCESSOR_CAPABILITIES((short)9, "processorCapabilities"), + PROCESSOR_IDENTIFIER((short)10, "processorIdentifier"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TABLE + return TABLE; + case 2: // ENV_CONTEXT + return ENV_CONTEXT; + case 3: // PRIMARY_KEYS + return PRIMARY_KEYS; + case 4: // FOREIGN_KEYS + return FOREIGN_KEYS; + case 5: // UNIQUE_CONSTRAINTS + return UNIQUE_CONSTRAINTS; + case 6: // NOT_NULL_CONSTRAINTS + return NOT_NULL_CONSTRAINTS; + case 7: // DEFAULT_CONSTRAINTS + return DEFAULT_CONSTRAINTS; + case 8: // CHECK_CONSTRAINTS + return CHECK_CONSTRAINTS; + case 9: // PROCESSOR_CAPABILITIES + return PROCESSOR_CAPABILITIES; + case 10: // PROCESSOR_IDENTIFIER + return PROCESSOR_IDENTIFIER; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final _Fields optionals[] = {_Fields.ENV_CONTEXT,_Fields.PRIMARY_KEYS,_Fields.FOREIGN_KEYS,_Fields.UNIQUE_CONSTRAINTS,_Fields.NOT_NULL_CONSTRAINTS,_Fields.DEFAULT_CONSTRAINTS,_Fields.CHECK_CONSTRAINTS,_Fields.PROCESSOR_CAPABILITIES,_Fields.PROCESSOR_IDENTIFIER}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class))); + tmpMap.put(_Fields.ENV_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("envContext", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); + tmpMap.put(_Fields.PRIMARY_KEYS, new org.apache.thrift.meta_data.FieldMetaData("primaryKeys", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLPrimaryKey.class)))); + tmpMap.put(_Fields.FOREIGN_KEYS, new org.apache.thrift.meta_data.FieldMetaData("foreignKeys", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLForeignKey.class)))); + tmpMap.put(_Fields.UNIQUE_CONSTRAINTS, new org.apache.thrift.meta_data.FieldMetaData("uniqueConstraints", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLUniqueConstraint.class)))); + tmpMap.put(_Fields.NOT_NULL_CONSTRAINTS, new org.apache.thrift.meta_data.FieldMetaData("notNullConstraints", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLNotNullConstraint.class)))); + tmpMap.put(_Fields.DEFAULT_CONSTRAINTS, new org.apache.thrift.meta_data.FieldMetaData("defaultConstraints", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLDefaultConstraint.class)))); + tmpMap.put(_Fields.CHECK_CONSTRAINTS, new org.apache.thrift.meta_data.FieldMetaData("checkConstraints", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLCheckConstraint.class)))); + tmpMap.put(_Fields.PROCESSOR_CAPABILITIES, new org.apache.thrift.meta_data.FieldMetaData("processorCapabilities", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.PROCESSOR_IDENTIFIER, new org.apache.thrift.meta_data.FieldMetaData("processorIdentifier", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CreateTableRequest.class, metaDataMap); + } + + public CreateTableRequest() { + } + + public CreateTableRequest( + Table table) + { + this(); + this.table = table; + } + + /** + * Performs a deep copy on other. + */ + public CreateTableRequest(CreateTableRequest other) { + if (other.isSetTable()) { + this.table = new Table(other.table); + } + if (other.isSetEnvContext()) { + this.envContext = new EnvironmentContext(other.envContext); + } + if (other.isSetPrimaryKeys()) { + List __this__primaryKeys = new ArrayList(other.primaryKeys.size()); + for (SQLPrimaryKey other_element : other.primaryKeys) { + __this__primaryKeys.add(new SQLPrimaryKey(other_element)); + } + this.primaryKeys = __this__primaryKeys; + } + if (other.isSetForeignKeys()) { + List __this__foreignKeys = new ArrayList(other.foreignKeys.size()); + for (SQLForeignKey other_element : other.foreignKeys) { + __this__foreignKeys.add(new SQLForeignKey(other_element)); + } + this.foreignKeys = __this__foreignKeys; + } + if (other.isSetUniqueConstraints()) { + List __this__uniqueConstraints = new ArrayList(other.uniqueConstraints.size()); + for (SQLUniqueConstraint other_element : other.uniqueConstraints) { + __this__uniqueConstraints.add(new SQLUniqueConstraint(other_element)); + } + this.uniqueConstraints = __this__uniqueConstraints; + } + if (other.isSetNotNullConstraints()) { + List __this__notNullConstraints = new ArrayList(other.notNullConstraints.size()); + for (SQLNotNullConstraint other_element : other.notNullConstraints) { + __this__notNullConstraints.add(new SQLNotNullConstraint(other_element)); + } + this.notNullConstraints = __this__notNullConstraints; + } + if (other.isSetDefaultConstraints()) { + List __this__defaultConstraints = new ArrayList(other.defaultConstraints.size()); + for (SQLDefaultConstraint other_element : other.defaultConstraints) { + __this__defaultConstraints.add(new SQLDefaultConstraint(other_element)); + } + this.defaultConstraints = __this__defaultConstraints; + } + if (other.isSetCheckConstraints()) { + List __this__checkConstraints = new ArrayList(other.checkConstraints.size()); + for (SQLCheckConstraint other_element : other.checkConstraints) { + __this__checkConstraints.add(new SQLCheckConstraint(other_element)); + } + this.checkConstraints = __this__checkConstraints; + } + if (other.isSetProcessorCapabilities()) { + List __this__processorCapabilities = new ArrayList(other.processorCapabilities); + this.processorCapabilities = __this__processorCapabilities; + } + if (other.isSetProcessorIdentifier()) { + this.processorIdentifier = other.processorIdentifier; + } + } + + public CreateTableRequest deepCopy() { + return new CreateTableRequest(this); + } + + @Override + public void clear() { + this.table = null; + this.envContext = null; + this.primaryKeys = null; + this.foreignKeys = null; + this.uniqueConstraints = null; + this.notNullConstraints = null; + this.defaultConstraints = null; + this.checkConstraints = null; + this.processorCapabilities = null; + this.processorIdentifier = null; + } + + public Table getTable() { + return this.table; + } + + public void setTable(Table table) { + this.table = table; + } + + public void unsetTable() { + this.table = null; + } + + /** Returns true if field table is set (has been assigned a value) and false otherwise */ + public boolean isSetTable() { + return this.table != null; + } + + public void setTableIsSet(boolean value) { + if (!value) { + this.table = null; + } + } + + public EnvironmentContext getEnvContext() { + return this.envContext; + } + + public void setEnvContext(EnvironmentContext envContext) { + this.envContext = envContext; + } + + public void unsetEnvContext() { + this.envContext = null; + } + + /** Returns true if field envContext is set (has been assigned a value) and false otherwise */ + public boolean isSetEnvContext() { + return this.envContext != null; + } + + public void setEnvContextIsSet(boolean value) { + if (!value) { + this.envContext = null; + } + } + + public int getPrimaryKeysSize() { + return (this.primaryKeys == null) ? 0 : this.primaryKeys.size(); + } + + public java.util.Iterator getPrimaryKeysIterator() { + return (this.primaryKeys == null) ? null : this.primaryKeys.iterator(); + } + + public void addToPrimaryKeys(SQLPrimaryKey elem) { + if (this.primaryKeys == null) { + this.primaryKeys = new ArrayList(); + } + this.primaryKeys.add(elem); + } + + public List getPrimaryKeys() { + return this.primaryKeys; + } + + public void setPrimaryKeys(List primaryKeys) { + this.primaryKeys = primaryKeys; + } + + public void unsetPrimaryKeys() { + this.primaryKeys = null; + } + + /** Returns true if field primaryKeys is set (has been assigned a value) and false otherwise */ + public boolean isSetPrimaryKeys() { + return this.primaryKeys != null; + } + + public void setPrimaryKeysIsSet(boolean value) { + if (!value) { + this.primaryKeys = null; + } + } + + public int getForeignKeysSize() { + return (this.foreignKeys == null) ? 0 : this.foreignKeys.size(); + } + + public java.util.Iterator getForeignKeysIterator() { + return (this.foreignKeys == null) ? null : this.foreignKeys.iterator(); + } + + public void addToForeignKeys(SQLForeignKey elem) { + if (this.foreignKeys == null) { + this.foreignKeys = new ArrayList(); + } + this.foreignKeys.add(elem); + } + + public List getForeignKeys() { + return this.foreignKeys; + } + + public void setForeignKeys(List foreignKeys) { + this.foreignKeys = foreignKeys; + } + + public void unsetForeignKeys() { + this.foreignKeys = null; + } + + /** Returns true if field foreignKeys is set (has been assigned a value) and false otherwise */ + public boolean isSetForeignKeys() { + return this.foreignKeys != null; + } + + public void setForeignKeysIsSet(boolean value) { + if (!value) { + this.foreignKeys = null; + } + } + + public int getUniqueConstraintsSize() { + return (this.uniqueConstraints == null) ? 0 : this.uniqueConstraints.size(); + } + + public java.util.Iterator getUniqueConstraintsIterator() { + return (this.uniqueConstraints == null) ? null : this.uniqueConstraints.iterator(); + } + + public void addToUniqueConstraints(SQLUniqueConstraint elem) { + if (this.uniqueConstraints == null) { + this.uniqueConstraints = new ArrayList(); + } + this.uniqueConstraints.add(elem); + } + + public List getUniqueConstraints() { + return this.uniqueConstraints; + } + + public void setUniqueConstraints(List uniqueConstraints) { + this.uniqueConstraints = uniqueConstraints; + } + + public void unsetUniqueConstraints() { + this.uniqueConstraints = null; + } + + /** Returns true if field uniqueConstraints is set (has been assigned a value) and false otherwise */ + public boolean isSetUniqueConstraints() { + return this.uniqueConstraints != null; + } + + public void setUniqueConstraintsIsSet(boolean value) { + if (!value) { + this.uniqueConstraints = null; + } + } + + public int getNotNullConstraintsSize() { + return (this.notNullConstraints == null) ? 0 : this.notNullConstraints.size(); + } + + public java.util.Iterator getNotNullConstraintsIterator() { + return (this.notNullConstraints == null) ? null : this.notNullConstraints.iterator(); + } + + public void addToNotNullConstraints(SQLNotNullConstraint elem) { + if (this.notNullConstraints == null) { + this.notNullConstraints = new ArrayList(); + } + this.notNullConstraints.add(elem); + } + + public List getNotNullConstraints() { + return this.notNullConstraints; + } + + public void setNotNullConstraints(List notNullConstraints) { + this.notNullConstraints = notNullConstraints; + } + + public void unsetNotNullConstraints() { + this.notNullConstraints = null; + } + + /** Returns true if field notNullConstraints is set (has been assigned a value) and false otherwise */ + public boolean isSetNotNullConstraints() { + return this.notNullConstraints != null; + } + + public void setNotNullConstraintsIsSet(boolean value) { + if (!value) { + this.notNullConstraints = null; + } + } + + public int getDefaultConstraintsSize() { + return (this.defaultConstraints == null) ? 0 : this.defaultConstraints.size(); + } + + public java.util.Iterator getDefaultConstraintsIterator() { + return (this.defaultConstraints == null) ? null : this.defaultConstraints.iterator(); + } + + public void addToDefaultConstraints(SQLDefaultConstraint elem) { + if (this.defaultConstraints == null) { + this.defaultConstraints = new ArrayList(); + } + this.defaultConstraints.add(elem); + } + + public List getDefaultConstraints() { + return this.defaultConstraints; + } + + public void setDefaultConstraints(List defaultConstraints) { + this.defaultConstraints = defaultConstraints; + } + + public void unsetDefaultConstraints() { + this.defaultConstraints = null; + } + + /** Returns true if field defaultConstraints is set (has been assigned a value) and false otherwise */ + public boolean isSetDefaultConstraints() { + return this.defaultConstraints != null; + } + + public void setDefaultConstraintsIsSet(boolean value) { + if (!value) { + this.defaultConstraints = null; + } + } + + public int getCheckConstraintsSize() { + return (this.checkConstraints == null) ? 0 : this.checkConstraints.size(); + } + + public java.util.Iterator getCheckConstraintsIterator() { + return (this.checkConstraints == null) ? null : this.checkConstraints.iterator(); + } + + public void addToCheckConstraints(SQLCheckConstraint elem) { + if (this.checkConstraints == null) { + this.checkConstraints = new ArrayList(); + } + this.checkConstraints.add(elem); + } + + public List getCheckConstraints() { + return this.checkConstraints; + } + + public void setCheckConstraints(List checkConstraints) { + this.checkConstraints = checkConstraints; + } + + public void unsetCheckConstraints() { + this.checkConstraints = null; + } + + /** Returns true if field checkConstraints is set (has been assigned a value) and false otherwise */ + public boolean isSetCheckConstraints() { + return this.checkConstraints != null; + } + + public void setCheckConstraintsIsSet(boolean value) { + if (!value) { + this.checkConstraints = null; + } + } + + public int getProcessorCapabilitiesSize() { + return (this.processorCapabilities == null) ? 0 : this.processorCapabilities.size(); + } + + public java.util.Iterator getProcessorCapabilitiesIterator() { + return (this.processorCapabilities == null) ? null : this.processorCapabilities.iterator(); + } + + public void addToProcessorCapabilities(String elem) { + if (this.processorCapabilities == null) { + this.processorCapabilities = new ArrayList(); + } + this.processorCapabilities.add(elem); + } + + public List getProcessorCapabilities() { + return this.processorCapabilities; + } + + public void setProcessorCapabilities(List processorCapabilities) { + this.processorCapabilities = processorCapabilities; + } + + public void unsetProcessorCapabilities() { + this.processorCapabilities = null; + } + + /** Returns true if field processorCapabilities is set (has been assigned a value) and false otherwise */ + public boolean isSetProcessorCapabilities() { + return this.processorCapabilities != null; + } + + public void setProcessorCapabilitiesIsSet(boolean value) { + if (!value) { + this.processorCapabilities = null; + } + } + + public String getProcessorIdentifier() { + return this.processorIdentifier; + } + + public void setProcessorIdentifier(String processorIdentifier) { + this.processorIdentifier = processorIdentifier; + } + + public void unsetProcessorIdentifier() { + this.processorIdentifier = null; + } + + /** Returns true if field processorIdentifier is set (has been assigned a value) and false otherwise */ + public boolean isSetProcessorIdentifier() { + return this.processorIdentifier != null; + } + + public void setProcessorIdentifierIsSet(boolean value) { + if (!value) { + this.processorIdentifier = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TABLE: + if (value == null) { + unsetTable(); + } else { + setTable((Table)value); + } + break; + + case ENV_CONTEXT: + if (value == null) { + unsetEnvContext(); + } else { + setEnvContext((EnvironmentContext)value); + } + break; + + case PRIMARY_KEYS: + if (value == null) { + unsetPrimaryKeys(); + } else { + setPrimaryKeys((List)value); + } + break; + + case FOREIGN_KEYS: + if (value == null) { + unsetForeignKeys(); + } else { + setForeignKeys((List)value); + } + break; + + case UNIQUE_CONSTRAINTS: + if (value == null) { + unsetUniqueConstraints(); + } else { + setUniqueConstraints((List)value); + } + break; + + case NOT_NULL_CONSTRAINTS: + if (value == null) { + unsetNotNullConstraints(); + } else { + setNotNullConstraints((List)value); + } + break; + + case DEFAULT_CONSTRAINTS: + if (value == null) { + unsetDefaultConstraints(); + } else { + setDefaultConstraints((List)value); + } + break; + + case CHECK_CONSTRAINTS: + if (value == null) { + unsetCheckConstraints(); + } else { + setCheckConstraints((List)value); + } + break; + + case PROCESSOR_CAPABILITIES: + if (value == null) { + unsetProcessorCapabilities(); + } else { + setProcessorCapabilities((List)value); + } + break; + + case PROCESSOR_IDENTIFIER: + if (value == null) { + unsetProcessorIdentifier(); + } else { + setProcessorIdentifier((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TABLE: + return getTable(); + + case ENV_CONTEXT: + return getEnvContext(); + + case PRIMARY_KEYS: + return getPrimaryKeys(); + + case FOREIGN_KEYS: + return getForeignKeys(); + + case UNIQUE_CONSTRAINTS: + return getUniqueConstraints(); + + case NOT_NULL_CONSTRAINTS: + return getNotNullConstraints(); + + case DEFAULT_CONSTRAINTS: + return getDefaultConstraints(); + + case CHECK_CONSTRAINTS: + return getCheckConstraints(); + + case PROCESSOR_CAPABILITIES: + return getProcessorCapabilities(); + + case PROCESSOR_IDENTIFIER: + return getProcessorIdentifier(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TABLE: + return isSetTable(); + case ENV_CONTEXT: + return isSetEnvContext(); + case PRIMARY_KEYS: + return isSetPrimaryKeys(); + case FOREIGN_KEYS: + return isSetForeignKeys(); + case UNIQUE_CONSTRAINTS: + return isSetUniqueConstraints(); + case NOT_NULL_CONSTRAINTS: + return isSetNotNullConstraints(); + case DEFAULT_CONSTRAINTS: + return isSetDefaultConstraints(); + case CHECK_CONSTRAINTS: + return isSetCheckConstraints(); + case PROCESSOR_CAPABILITIES: + return isSetProcessorCapabilities(); + case PROCESSOR_IDENTIFIER: + return isSetProcessorIdentifier(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof CreateTableRequest) + return this.equals((CreateTableRequest)that); + return false; + } + + public boolean equals(CreateTableRequest that) { + if (that == null) + return false; + + boolean this_present_table = true && this.isSetTable(); + boolean that_present_table = true && that.isSetTable(); + if (this_present_table || that_present_table) { + if (!(this_present_table && that_present_table)) + return false; + if (!this.table.equals(that.table)) + return false; + } + + boolean this_present_envContext = true && this.isSetEnvContext(); + boolean that_present_envContext = true && that.isSetEnvContext(); + if (this_present_envContext || that_present_envContext) { + if (!(this_present_envContext && that_present_envContext)) + return false; + if (!this.envContext.equals(that.envContext)) + return false; + } + + boolean this_present_primaryKeys = true && this.isSetPrimaryKeys(); + boolean that_present_primaryKeys = true && that.isSetPrimaryKeys(); + if (this_present_primaryKeys || that_present_primaryKeys) { + if (!(this_present_primaryKeys && that_present_primaryKeys)) + return false; + if (!this.primaryKeys.equals(that.primaryKeys)) + return false; + } + + boolean this_present_foreignKeys = true && this.isSetForeignKeys(); + boolean that_present_foreignKeys = true && that.isSetForeignKeys(); + if (this_present_foreignKeys || that_present_foreignKeys) { + if (!(this_present_foreignKeys && that_present_foreignKeys)) + return false; + if (!this.foreignKeys.equals(that.foreignKeys)) + return false; + } + + boolean this_present_uniqueConstraints = true && this.isSetUniqueConstraints(); + boolean that_present_uniqueConstraints = true && that.isSetUniqueConstraints(); + if (this_present_uniqueConstraints || that_present_uniqueConstraints) { + if (!(this_present_uniqueConstraints && that_present_uniqueConstraints)) + return false; + if (!this.uniqueConstraints.equals(that.uniqueConstraints)) + return false; + } + + boolean this_present_notNullConstraints = true && this.isSetNotNullConstraints(); + boolean that_present_notNullConstraints = true && that.isSetNotNullConstraints(); + if (this_present_notNullConstraints || that_present_notNullConstraints) { + if (!(this_present_notNullConstraints && that_present_notNullConstraints)) + return false; + if (!this.notNullConstraints.equals(that.notNullConstraints)) + return false; + } + + boolean this_present_defaultConstraints = true && this.isSetDefaultConstraints(); + boolean that_present_defaultConstraints = true && that.isSetDefaultConstraints(); + if (this_present_defaultConstraints || that_present_defaultConstraints) { + if (!(this_present_defaultConstraints && that_present_defaultConstraints)) + return false; + if (!this.defaultConstraints.equals(that.defaultConstraints)) + return false; + } + + boolean this_present_checkConstraints = true && this.isSetCheckConstraints(); + boolean that_present_checkConstraints = true && that.isSetCheckConstraints(); + if (this_present_checkConstraints || that_present_checkConstraints) { + if (!(this_present_checkConstraints && that_present_checkConstraints)) + return false; + if (!this.checkConstraints.equals(that.checkConstraints)) + return false; + } + + boolean this_present_processorCapabilities = true && this.isSetProcessorCapabilities(); + boolean that_present_processorCapabilities = true && that.isSetProcessorCapabilities(); + if (this_present_processorCapabilities || that_present_processorCapabilities) { + if (!(this_present_processorCapabilities && that_present_processorCapabilities)) + return false; + if (!this.processorCapabilities.equals(that.processorCapabilities)) + return false; + } + + boolean this_present_processorIdentifier = true && this.isSetProcessorIdentifier(); + boolean that_present_processorIdentifier = true && that.isSetProcessorIdentifier(); + if (this_present_processorIdentifier || that_present_processorIdentifier) { + if (!(this_present_processorIdentifier && that_present_processorIdentifier)) + return false; + if (!this.processorIdentifier.equals(that.processorIdentifier)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_table = true && (isSetTable()); + list.add(present_table); + if (present_table) + list.add(table); + + boolean present_envContext = true && (isSetEnvContext()); + list.add(present_envContext); + if (present_envContext) + list.add(envContext); + + boolean present_primaryKeys = true && (isSetPrimaryKeys()); + list.add(present_primaryKeys); + if (present_primaryKeys) + list.add(primaryKeys); + + boolean present_foreignKeys = true && (isSetForeignKeys()); + list.add(present_foreignKeys); + if (present_foreignKeys) + list.add(foreignKeys); + + boolean present_uniqueConstraints = true && (isSetUniqueConstraints()); + list.add(present_uniqueConstraints); + if (present_uniqueConstraints) + list.add(uniqueConstraints); + + boolean present_notNullConstraints = true && (isSetNotNullConstraints()); + list.add(present_notNullConstraints); + if (present_notNullConstraints) + list.add(notNullConstraints); + + boolean present_defaultConstraints = true && (isSetDefaultConstraints()); + list.add(present_defaultConstraints); + if (present_defaultConstraints) + list.add(defaultConstraints); + + boolean present_checkConstraints = true && (isSetCheckConstraints()); + list.add(present_checkConstraints); + if (present_checkConstraints) + list.add(checkConstraints); + + boolean present_processorCapabilities = true && (isSetProcessorCapabilities()); + list.add(present_processorCapabilities); + if (present_processorCapabilities) + list.add(processorCapabilities); + + boolean present_processorIdentifier = true && (isSetProcessorIdentifier()); + list.add(present_processorIdentifier); + if (present_processorIdentifier) + list.add(processorIdentifier); + + return list.hashCode(); + } + + @Override + public int compareTo(CreateTableRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetTable()).compareTo(other.isSetTable()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTable()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.table, other.table); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetEnvContext()).compareTo(other.isSetEnvContext()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEnvContext()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.envContext, other.envContext); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPrimaryKeys()).compareTo(other.isSetPrimaryKeys()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPrimaryKeys()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.primaryKeys, other.primaryKeys); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetForeignKeys()).compareTo(other.isSetForeignKeys()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetForeignKeys()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.foreignKeys, other.foreignKeys); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetUniqueConstraints()).compareTo(other.isSetUniqueConstraints()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetUniqueConstraints()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.uniqueConstraints, other.uniqueConstraints); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetNotNullConstraints()).compareTo(other.isSetNotNullConstraints()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNotNullConstraints()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.notNullConstraints, other.notNullConstraints); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDefaultConstraints()).compareTo(other.isSetDefaultConstraints()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDefaultConstraints()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.defaultConstraints, other.defaultConstraints); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetCheckConstraints()).compareTo(other.isSetCheckConstraints()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCheckConstraints()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.checkConstraints, other.checkConstraints); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetProcessorCapabilities()).compareTo(other.isSetProcessorCapabilities()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetProcessorCapabilities()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.processorCapabilities, other.processorCapabilities); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetProcessorIdentifier()).compareTo(other.isSetProcessorIdentifier()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetProcessorIdentifier()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.processorIdentifier, other.processorIdentifier); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("CreateTableRequest("); + boolean first = true; + + sb.append("table:"); + if (this.table == null) { + sb.append("null"); + } else { + sb.append(this.table); + } + first = false; + if (isSetEnvContext()) { + if (!first) sb.append(", "); + sb.append("envContext:"); + if (this.envContext == null) { + sb.append("null"); + } else { + sb.append(this.envContext); + } + first = false; + } + if (isSetPrimaryKeys()) { + if (!first) sb.append(", "); + sb.append("primaryKeys:"); + if (this.primaryKeys == null) { + sb.append("null"); + } else { + sb.append(this.primaryKeys); + } + first = false; + } + if (isSetForeignKeys()) { + if (!first) sb.append(", "); + sb.append("foreignKeys:"); + if (this.foreignKeys == null) { + sb.append("null"); + } else { + sb.append(this.foreignKeys); + } + first = false; + } + if (isSetUniqueConstraints()) { + if (!first) sb.append(", "); + sb.append("uniqueConstraints:"); + if (this.uniqueConstraints == null) { + sb.append("null"); + } else { + sb.append(this.uniqueConstraints); + } + first = false; + } + if (isSetNotNullConstraints()) { + if (!first) sb.append(", "); + sb.append("notNullConstraints:"); + if (this.notNullConstraints == null) { + sb.append("null"); + } else { + sb.append(this.notNullConstraints); + } + first = false; + } + if (isSetDefaultConstraints()) { + if (!first) sb.append(", "); + sb.append("defaultConstraints:"); + if (this.defaultConstraints == null) { + sb.append("null"); + } else { + sb.append(this.defaultConstraints); + } + first = false; + } + if (isSetCheckConstraints()) { + if (!first) sb.append(", "); + sb.append("checkConstraints:"); + if (this.checkConstraints == null) { + sb.append("null"); + } else { + sb.append(this.checkConstraints); + } + first = false; + } + if (isSetProcessorCapabilities()) { + if (!first) sb.append(", "); + sb.append("processorCapabilities:"); + if (this.processorCapabilities == null) { + sb.append("null"); + } else { + sb.append(this.processorCapabilities); + } + first = false; + } + if (isSetProcessorIdentifier()) { + if (!first) sb.append(", "); + sb.append("processorIdentifier:"); + if (this.processorIdentifier == null) { + sb.append("null"); + } else { + sb.append(this.processorIdentifier); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetTable()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + if (table != null) { + table.validate(); + } + if (envContext != null) { + envContext.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class CreateTableRequestStandardSchemeFactory implements SchemeFactory { + public CreateTableRequestStandardScheme getScheme() { + return new CreateTableRequestStandardScheme(); + } + } + + private static class CreateTableRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, CreateTableRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TABLE + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.table = new Table(); + struct.table.read(iprot); + struct.setTableIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // ENV_CONTEXT + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.envContext = new EnvironmentContext(); + struct.envContext.read(iprot); + struct.setEnvContextIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // PRIMARY_KEYS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1040 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list1040.size); + SQLPrimaryKey _elem1041; + for (int _i1042 = 0; _i1042 < _list1040.size; ++_i1042) + { + _elem1041 = new SQLPrimaryKey(); + _elem1041.read(iprot); + struct.primaryKeys.add(_elem1041); + } + iprot.readListEnd(); + } + struct.setPrimaryKeysIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // FOREIGN_KEYS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1043 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list1043.size); + SQLForeignKey _elem1044; + for (int _i1045 = 0; _i1045 < _list1043.size; ++_i1045) + { + _elem1044 = new SQLForeignKey(); + _elem1044.read(iprot); + struct.foreignKeys.add(_elem1044); + } + iprot.readListEnd(); + } + struct.setForeignKeysIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // UNIQUE_CONSTRAINTS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1046 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList(_list1046.size); + SQLUniqueConstraint _elem1047; + for (int _i1048 = 0; _i1048 < _list1046.size; ++_i1048) + { + _elem1047 = new SQLUniqueConstraint(); + _elem1047.read(iprot); + struct.uniqueConstraints.add(_elem1047); + } + iprot.readListEnd(); + } + struct.setUniqueConstraintsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 6: // NOT_NULL_CONSTRAINTS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1049 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList(_list1049.size); + SQLNotNullConstraint _elem1050; + for (int _i1051 = 0; _i1051 < _list1049.size; ++_i1051) + { + _elem1050 = new SQLNotNullConstraint(); + _elem1050.read(iprot); + struct.notNullConstraints.add(_elem1050); + } + iprot.readListEnd(); + } + struct.setNotNullConstraintsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 7: // DEFAULT_CONSTRAINTS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1052 = iprot.readListBegin(); + struct.defaultConstraints = new ArrayList(_list1052.size); + SQLDefaultConstraint _elem1053; + for (int _i1054 = 0; _i1054 < _list1052.size; ++_i1054) + { + _elem1053 = new SQLDefaultConstraint(); + _elem1053.read(iprot); + struct.defaultConstraints.add(_elem1053); + } + iprot.readListEnd(); + } + struct.setDefaultConstraintsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 8: // CHECK_CONSTRAINTS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1055 = iprot.readListBegin(); + struct.checkConstraints = new ArrayList(_list1055.size); + SQLCheckConstraint _elem1056; + for (int _i1057 = 0; _i1057 < _list1055.size; ++_i1057) + { + _elem1056 = new SQLCheckConstraint(); + _elem1056.read(iprot); + struct.checkConstraints.add(_elem1056); + } + iprot.readListEnd(); + } + struct.setCheckConstraintsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 9: // PROCESSOR_CAPABILITIES + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1058 = iprot.readListBegin(); + struct.processorCapabilities = new ArrayList(_list1058.size); + String _elem1059; + for (int _i1060 = 0; _i1060 < _list1058.size; ++_i1060) + { + _elem1059 = iprot.readString(); + struct.processorCapabilities.add(_elem1059); + } + iprot.readListEnd(); + } + struct.setProcessorCapabilitiesIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 10: // PROCESSOR_IDENTIFIER + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.processorIdentifier = iprot.readString(); + struct.setProcessorIdentifierIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, CreateTableRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.table != null) { + oprot.writeFieldBegin(TABLE_FIELD_DESC); + struct.table.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.envContext != null) { + if (struct.isSetEnvContext()) { + oprot.writeFieldBegin(ENV_CONTEXT_FIELD_DESC); + struct.envContext.write(oprot); + oprot.writeFieldEnd(); + } + } + if (struct.primaryKeys != null) { + if (struct.isSetPrimaryKeys()) { + oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); + for (SQLPrimaryKey _iter1061 : struct.primaryKeys) + { + _iter1061.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + } + if (struct.foreignKeys != null) { + if (struct.isSetForeignKeys()) { + oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); + for (SQLForeignKey _iter1062 : struct.foreignKeys) + { + _iter1062.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + } + if (struct.uniqueConstraints != null) { + if (struct.isSetUniqueConstraints()) { + oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); + for (SQLUniqueConstraint _iter1063 : struct.uniqueConstraints) + { + _iter1063.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + } + if (struct.notNullConstraints != null) { + if (struct.isSetNotNullConstraints()) { + oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); + for (SQLNotNullConstraint _iter1064 : struct.notNullConstraints) + { + _iter1064.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + } + if (struct.defaultConstraints != null) { + if (struct.isSetDefaultConstraints()) { + oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); + for (SQLDefaultConstraint _iter1065 : struct.defaultConstraints) + { + _iter1065.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + } + if (struct.checkConstraints != null) { + if (struct.isSetCheckConstraints()) { + oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size())); + for (SQLCheckConstraint _iter1066 : struct.checkConstraints) + { + _iter1066.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + } + if (struct.processorCapabilities != null) { + if (struct.isSetProcessorCapabilities()) { + oprot.writeFieldBegin(PROCESSOR_CAPABILITIES_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.processorCapabilities.size())); + for (String _iter1067 : struct.processorCapabilities) + { + oprot.writeString(_iter1067); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + } + if (struct.processorIdentifier != null) { + if (struct.isSetProcessorIdentifier()) { + oprot.writeFieldBegin(PROCESSOR_IDENTIFIER_FIELD_DESC); + oprot.writeString(struct.processorIdentifier); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class CreateTableRequestTupleSchemeFactory implements SchemeFactory { + public CreateTableRequestTupleScheme getScheme() { + return new CreateTableRequestTupleScheme(); + } + } + + private static class CreateTableRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, CreateTableRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + struct.table.write(oprot); + BitSet optionals = new BitSet(); + if (struct.isSetEnvContext()) { + optionals.set(0); + } + if (struct.isSetPrimaryKeys()) { + optionals.set(1); + } + if (struct.isSetForeignKeys()) { + optionals.set(2); + } + if (struct.isSetUniqueConstraints()) { + optionals.set(3); + } + if (struct.isSetNotNullConstraints()) { + optionals.set(4); + } + if (struct.isSetDefaultConstraints()) { + optionals.set(5); + } + if (struct.isSetCheckConstraints()) { + optionals.set(6); + } + if (struct.isSetProcessorCapabilities()) { + optionals.set(7); + } + if (struct.isSetProcessorIdentifier()) { + optionals.set(8); + } + oprot.writeBitSet(optionals, 9); + if (struct.isSetEnvContext()) { + struct.envContext.write(oprot); + } + if (struct.isSetPrimaryKeys()) { + { + oprot.writeI32(struct.primaryKeys.size()); + for (SQLPrimaryKey _iter1068 : struct.primaryKeys) + { + _iter1068.write(oprot); + } + } + } + if (struct.isSetForeignKeys()) { + { + oprot.writeI32(struct.foreignKeys.size()); + for (SQLForeignKey _iter1069 : struct.foreignKeys) + { + _iter1069.write(oprot); + } + } + } + if (struct.isSetUniqueConstraints()) { + { + oprot.writeI32(struct.uniqueConstraints.size()); + for (SQLUniqueConstraint _iter1070 : struct.uniqueConstraints) + { + _iter1070.write(oprot); + } + } + } + if (struct.isSetNotNullConstraints()) { + { + oprot.writeI32(struct.notNullConstraints.size()); + for (SQLNotNullConstraint _iter1071 : struct.notNullConstraints) + { + _iter1071.write(oprot); + } + } + } + if (struct.isSetDefaultConstraints()) { + { + oprot.writeI32(struct.defaultConstraints.size()); + for (SQLDefaultConstraint _iter1072 : struct.defaultConstraints) + { + _iter1072.write(oprot); + } + } + } + if (struct.isSetCheckConstraints()) { + { + oprot.writeI32(struct.checkConstraints.size()); + for (SQLCheckConstraint _iter1073 : struct.checkConstraints) + { + _iter1073.write(oprot); + } + } + } + if (struct.isSetProcessorCapabilities()) { + { + oprot.writeI32(struct.processorCapabilities.size()); + for (String _iter1074 : struct.processorCapabilities) + { + oprot.writeString(_iter1074); + } + } + } + if (struct.isSetProcessorIdentifier()) { + oprot.writeString(struct.processorIdentifier); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, CreateTableRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.table = new Table(); + struct.table.read(iprot); + struct.setTableIsSet(true); + BitSet incoming = iprot.readBitSet(9); + if (incoming.get(0)) { + struct.envContext = new EnvironmentContext(); + struct.envContext.read(iprot); + struct.setEnvContextIsSet(true); + } + if (incoming.get(1)) { + { + org.apache.thrift.protocol.TList _list1075 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list1075.size); + SQLPrimaryKey _elem1076; + for (int _i1077 = 0; _i1077 < _list1075.size; ++_i1077) + { + _elem1076 = new SQLPrimaryKey(); + _elem1076.read(iprot); + struct.primaryKeys.add(_elem1076); + } + } + struct.setPrimaryKeysIsSet(true); + } + if (incoming.get(2)) { + { + org.apache.thrift.protocol.TList _list1078 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list1078.size); + SQLForeignKey _elem1079; + for (int _i1080 = 0; _i1080 < _list1078.size; ++_i1080) + { + _elem1079 = new SQLForeignKey(); + _elem1079.read(iprot); + struct.foreignKeys.add(_elem1079); + } + } + struct.setForeignKeysIsSet(true); + } + if (incoming.get(3)) { + { + org.apache.thrift.protocol.TList _list1081 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraints = new ArrayList(_list1081.size); + SQLUniqueConstraint _elem1082; + for (int _i1083 = 0; _i1083 < _list1081.size; ++_i1083) + { + _elem1082 = new SQLUniqueConstraint(); + _elem1082.read(iprot); + struct.uniqueConstraints.add(_elem1082); + } + } + struct.setUniqueConstraintsIsSet(true); + } + if (incoming.get(4)) { + { + org.apache.thrift.protocol.TList _list1084 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraints = new ArrayList(_list1084.size); + SQLNotNullConstraint _elem1085; + for (int _i1086 = 0; _i1086 < _list1084.size; ++_i1086) + { + _elem1085 = new SQLNotNullConstraint(); + _elem1085.read(iprot); + struct.notNullConstraints.add(_elem1085); + } + } + struct.setNotNullConstraintsIsSet(true); + } + if (incoming.get(5)) { + { + org.apache.thrift.protocol.TList _list1087 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraints = new ArrayList(_list1087.size); + SQLDefaultConstraint _elem1088; + for (int _i1089 = 0; _i1089 < _list1087.size; ++_i1089) + { + _elem1088 = new SQLDefaultConstraint(); + _elem1088.read(iprot); + struct.defaultConstraints.add(_elem1088); + } + } + struct.setDefaultConstraintsIsSet(true); + } + if (incoming.get(6)) { + { + org.apache.thrift.protocol.TList _list1090 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.checkConstraints = new ArrayList(_list1090.size); + SQLCheckConstraint _elem1091; + for (int _i1092 = 0; _i1092 < _list1090.size; ++_i1092) + { + _elem1091 = new SQLCheckConstraint(); + _elem1091.read(iprot); + struct.checkConstraints.add(_elem1091); + } + } + struct.setCheckConstraintsIsSet(true); + } + if (incoming.get(7)) { + { + org.apache.thrift.protocol.TList _list1093 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.processorCapabilities = new ArrayList(_list1093.size); + String _elem1094; + for (int _i1095 = 0; _i1095 < _list1093.size; ++_i1095) + { + _elem1094 = iprot.readString(); + struct.processorCapabilities.add(_elem1094); + } + } + struct.setProcessorCapabilitiesIsSet(true); + } + if (incoming.get(8)) { + struct.processorIdentifier = iprot.readString(); + struct.setProcessorIdentifierIsSet(true); + } + } + } + +} + diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ExtendedTableInfo.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ExtendedTableInfo.java index f75a180800..b7e46e8f1d 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ExtendedTableInfo.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ExtendedTableInfo.java @@ -529,13 +529,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ExtendedTableInfo s case 3: // PROCESSOR_CAPABILITIES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list936 = iprot.readListBegin(); - struct.processorCapabilities = new ArrayList(_list936.size); - String _elem937; - for (int _i938 = 0; _i938 < _list936.size; ++_i938) + org.apache.thrift.protocol.TList _list944 = iprot.readListBegin(); + struct.processorCapabilities = new ArrayList(_list944.size); + String _elem945; + for (int _i946 = 0; _i946 < _list944.size; ++_i946) { - _elem937 = iprot.readString(); - struct.processorCapabilities.add(_elem937); + _elem945 = iprot.readString(); + struct.processorCapabilities.add(_elem945); } iprot.readListEnd(); } @@ -572,9 +572,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ExtendedTableInfo oprot.writeFieldBegin(PROCESSOR_CAPABILITIES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.processorCapabilities.size())); - for (String _iter939 : struct.processorCapabilities) + for (String _iter947 : struct.processorCapabilities) { - oprot.writeString(_iter939); + oprot.writeString(_iter947); } oprot.writeListEnd(); } @@ -613,9 +613,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ExtendedTableInfo s if (struct.isSetProcessorCapabilities()) { { oprot.writeI32(struct.processorCapabilities.size()); - for (String _iter940 : struct.processorCapabilities) + for (String _iter948 : struct.processorCapabilities) { - oprot.writeString(_iter940); + oprot.writeString(_iter948); } } } @@ -633,13 +633,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ExtendedTableInfo st } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list941 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.processorCapabilities = new ArrayList(_list941.size); - String _elem942; - for (int _i943 = 0; _i943 < _list941.size; ++_i943) + org.apache.thrift.protocol.TList _list949 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.processorCapabilities = new ArrayList(_list949.size); + String _elem950; + for (int _i951 = 0; _i951 < _list949.size; ++_i951) { - _elem942 = iprot.readString(); - struct.processorCapabilities.add(_elem942); + _elem950 = iprot.readString(); + struct.processorCapabilities.add(_elem950); } } struct.setProcessorCapabilitiesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java index 407398663b..be30dbce92 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, FindSchemasByColsRe case 1: // SCHEMA_VERSIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1016 = iprot.readListBegin(); - struct.schemaVersions = new ArrayList(_list1016.size); - SchemaVersionDescriptor _elem1017; - for (int _i1018 = 0; _i1018 < _list1016.size; ++_i1018) + org.apache.thrift.protocol.TList _list1024 = iprot.readListBegin(); + struct.schemaVersions = new ArrayList(_list1024.size); + SchemaVersionDescriptor _elem1025; + for (int _i1026 = 0; _i1026 < _list1024.size; ++_i1026) { - _elem1017 = new SchemaVersionDescriptor(); - _elem1017.read(iprot); - struct.schemaVersions.add(_elem1017); + _elem1025 = new SchemaVersionDescriptor(); + _elem1025.read(iprot); + struct.schemaVersions.add(_elem1025); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, FindSchemasByColsR oprot.writeFieldBegin(SCHEMA_VERSIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.schemaVersions.size())); - for (SchemaVersionDescriptor _iter1019 : struct.schemaVersions) + for (SchemaVersionDescriptor _iter1027 : struct.schemaVersions) { - _iter1019.write(oprot); + _iter1027.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsRe if (struct.isSetSchemaVersions()) { { oprot.writeI32(struct.schemaVersions.size()); - for (SchemaVersionDescriptor _iter1020 : struct.schemaVersions) + for (SchemaVersionDescriptor _iter1028 : struct.schemaVersions) { - _iter1020.write(oprot); + _iter1028.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsRes BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1021 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.schemaVersions = new ArrayList(_list1021.size); - SchemaVersionDescriptor _elem1022; - for (int _i1023 = 0; _i1023 < _list1021.size; ++_i1023) + org.apache.thrift.protocol.TList _list1029 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.schemaVersions = new ArrayList(_list1029.size); + SchemaVersionDescriptor _elem1030; + for (int _i1031 = 0; _i1031 < _list1029.size; ++_i1031) { - _elem1022 = new SchemaVersionDescriptor(); - _elem1022.read(iprot); - struct.schemaVersions.add(_elem1022); + _elem1030 = new SchemaVersionDescriptor(); + _elem1030.read(iprot); + struct.schemaVersions.add(_elem1030); } } struct.setSchemaVersionsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java index dd3097d35d..93c8bd9dc9 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java @@ -794,13 +794,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, FireEventRequest st case 5: // PARTITION_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list804 = iprot.readListBegin(); - struct.partitionVals = new ArrayList(_list804.size); - String _elem805; - for (int _i806 = 0; _i806 < _list804.size; ++_i806) + org.apache.thrift.protocol.TList _list812 = iprot.readListBegin(); + struct.partitionVals = new ArrayList(_list812.size); + String _elem813; + for (int _i814 = 0; _i814 < _list812.size; ++_i814) { - _elem805 = iprot.readString(); - struct.partitionVals.add(_elem805); + _elem813 = iprot.readString(); + struct.partitionVals.add(_elem813); } iprot.readListEnd(); } @@ -857,9 +857,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, FireEventRequest s oprot.writeFieldBegin(PARTITION_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionVals.size())); - for (String _iter807 : struct.partitionVals) + for (String _iter815 : struct.partitionVals) { - oprot.writeString(_iter807); + oprot.writeString(_iter815); } oprot.writeListEnd(); } @@ -915,9 +915,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FireEventRequest st if (struct.isSetPartitionVals()) { { oprot.writeI32(struct.partitionVals.size()); - for (String _iter808 : struct.partitionVals) + for (String _iter816 : struct.partitionVals) { - oprot.writeString(_iter808); + oprot.writeString(_iter816); } } } @@ -945,13 +945,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FireEventRequest str } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionVals = new ArrayList(_list809.size); - String _elem810; - for (int _i811 = 0; _i811 < _list809.size; ++_i811) + org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionVals = new ArrayList(_list817.size); + String _elem818; + for (int _i819 = 0; _i819 < _list817.size; ++_i819) { - _elem810 = iprot.readString(); - struct.partitionVals.add(_elem810); + _elem818 = iprot.readString(); + struct.partitionVals.add(_elem818); } } struct.setPartitionValsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java index d35f5f2c20..94661fd2f0 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetAllFunctionsResp case 1: // FUNCTIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list880 = iprot.readListBegin(); - struct.functions = new ArrayList(_list880.size); - Function _elem881; - for (int _i882 = 0; _i882 < _list880.size; ++_i882) + org.apache.thrift.protocol.TList _list888 = iprot.readListBegin(); + struct.functions = new ArrayList(_list888.size); + Function _elem889; + for (int _i890 = 0; _i890 < _list888.size; ++_i890) { - _elem881 = new Function(); - _elem881.read(iprot); - struct.functions.add(_elem881); + _elem889 = new Function(); + _elem889.read(iprot); + struct.functions.add(_elem889); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetAllFunctionsRes oprot.writeFieldBegin(FUNCTIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.functions.size())); - for (Function _iter883 : struct.functions) + for (Function _iter891 : struct.functions) { - _iter883.write(oprot); + _iter891.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsResp if (struct.isSetFunctions()) { { oprot.writeI32(struct.functions.size()); - for (Function _iter884 : struct.functions) + for (Function _iter892 : struct.functions) { - _iter884.write(oprot); + _iter892.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsRespo BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list885 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.functions = new ArrayList(_list885.size); - Function _elem886; - for (int _i887 = 0; _i887 < _list885.size; ++_i887) + org.apache.thrift.protocol.TList _list893 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.functions = new ArrayList(_list893.size); + Function _elem894; + for (int _i895 = 0; _i895 < _list893.size; ++_i895) { - _elem886 = new Function(); - _elem886.read(iprot); - struct.functions.add(_elem886); + _elem894 = new Function(); + _elem894.read(iprot); + struct.functions.add(_elem894); } } struct.setFunctionsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java index 1fa7a22371..5c504fcf24 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java @@ -619,13 +619,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list830 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list830.size); - long _elem831; - for (int _i832 = 0; _i832 < _list830.size; ++_i832) + org.apache.thrift.protocol.TList _list838 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list838.size); + long _elem839; + for (int _i840 = 0; _i840 < _list838.size; ++_i840) { - _elem831 = iprot.readI64(); - struct.fileIds.add(_elem831); + _elem839 = iprot.readI64(); + struct.fileIds.add(_elem839); } iprot.readListEnd(); } @@ -675,9 +675,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter833 : struct.fileIds) + for (long _iter841 : struct.fileIds) { - oprot.writeI64(_iter833); + oprot.writeI64(_iter841); } oprot.writeListEnd(); } @@ -719,9 +719,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter834 : struct.fileIds) + for (long _iter842 : struct.fileIds) { - oprot.writeI64(_iter834); + oprot.writeI64(_iter842); } } oprot.writeBinary(struct.expr); @@ -745,13 +745,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list835 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list835.size); - long _elem836; - for (int _i837 = 0; _i837 < _list835.size; ++_i837) + org.apache.thrift.protocol.TList _list843 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list843.size); + long _elem844; + for (int _i845 = 0; _i845 < _list843.size; ++_i845) { - _elem836 = iprot.readI64(); - struct.fileIds.add(_elem836); + _elem844 = iprot.readI64(); + struct.fileIds.add(_elem844); } } struct.setFileIdsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java index a316b47d6f..70ae61e5b0 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java @@ -444,16 +444,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map820 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map820.size); - long _key821; - MetadataPpdResult _val822; - for (int _i823 = 0; _i823 < _map820.size; ++_i823) + org.apache.thrift.protocol.TMap _map828 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map828.size); + long _key829; + MetadataPpdResult _val830; + for (int _i831 = 0; _i831 < _map828.size; ++_i831) { - _key821 = iprot.readI64(); - _val822 = new MetadataPpdResult(); - _val822.read(iprot); - struct.metadata.put(_key821, _val822); + _key829 = iprot.readI64(); + _val830 = new MetadataPpdResult(); + _val830.read(iprot); + struct.metadata.put(_key829, _val830); } iprot.readMapEnd(); } @@ -487,10 +487,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, struct.metadata.size())); - for (Map.Entry _iter824 : struct.metadata.entrySet()) + for (Map.Entry _iter832 : struct.metadata.entrySet()) { - oprot.writeI64(_iter824.getKey()); - _iter824.getValue().write(oprot); + oprot.writeI64(_iter832.getKey()); + _iter832.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -518,10 +518,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter825 : struct.metadata.entrySet()) + for (Map.Entry _iter833 : struct.metadata.entrySet()) { - oprot.writeI64(_iter825.getKey()); - _iter825.getValue().write(oprot); + oprot.writeI64(_iter833.getKey()); + _iter833.getValue().write(oprot); } } oprot.writeBool(struct.isSupported); @@ -531,16 +531,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map826 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.metadata = new HashMap(2*_map826.size); - long _key827; - MetadataPpdResult _val828; - for (int _i829 = 0; _i829 < _map826.size; ++_i829) + org.apache.thrift.protocol.TMap _map834 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.metadata = new HashMap(2*_map834.size); + long _key835; + MetadataPpdResult _val836; + for (int _i837 = 0; _i837 < _map834.size; ++_i837) { - _key827 = iprot.readI64(); - _val828 = new MetadataPpdResult(); - _val828.read(iprot); - struct.metadata.put(_key827, _val828); + _key835 = iprot.readI64(); + _val836 = new MetadataPpdResult(); + _val836.read(iprot); + struct.metadata.put(_key835, _val836); } } struct.setMetadataIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java index 571727807d..5c55f38d1f 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list848 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list848.size); - long _elem849; - for (int _i850 = 0; _i850 < _list848.size; ++_i850) + org.apache.thrift.protocol.TList _list856 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list856.size); + long _elem857; + for (int _i858 = 0; _i858 < _list856.size; ++_i858) { - _elem849 = iprot.readI64(); - struct.fileIds.add(_elem849); + _elem857 = iprot.readI64(); + struct.fileIds.add(_elem857); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter851 : struct.fileIds) + for (long _iter859 : struct.fileIds) { - oprot.writeI64(_iter851); + oprot.writeI64(_iter859); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter852 : struct.fileIds) + for (long _iter860 : struct.fileIds) { - oprot.writeI64(_iter852); + oprot.writeI64(_iter860); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list853 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list853.size); - long _elem854; - for (int _i855 = 0; _i855 < _list853.size; ++_i855) + org.apache.thrift.protocol.TList _list861 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list861.size); + long _elem862; + for (int _i863 = 0; _i863 < _list861.size; ++_i863) { - _elem854 = iprot.readI64(); - struct.fileIds.add(_elem854); + _elem862 = iprot.readI64(); + struct.fileIds.add(_elem862); } } struct.setFileIdsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java index 0820509d1b..7889436f31 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java @@ -433,15 +433,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataResu case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map838 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map838.size); - long _key839; - ByteBuffer _val840; - for (int _i841 = 0; _i841 < _map838.size; ++_i841) + org.apache.thrift.protocol.TMap _map846 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map846.size); + long _key847; + ByteBuffer _val848; + for (int _i849 = 0; _i849 < _map846.size; ++_i849) { - _key839 = iprot.readI64(); - _val840 = iprot.readBinary(); - struct.metadata.put(_key839, _val840); + _key847 = iprot.readI64(); + _val848 = iprot.readBinary(); + struct.metadata.put(_key847, _val848); } iprot.readMapEnd(); } @@ -475,10 +475,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataRes oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (Map.Entry _iter842 : struct.metadata.entrySet()) + for (Map.Entry _iter850 : struct.metadata.entrySet()) { - oprot.writeI64(_iter842.getKey()); - oprot.writeBinary(_iter842.getValue()); + oprot.writeI64(_iter850.getKey()); + oprot.writeBinary(_iter850.getValue()); } oprot.writeMapEnd(); } @@ -506,10 +506,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter843 : struct.metadata.entrySet()) + for (Map.Entry _iter851 : struct.metadata.entrySet()) { - oprot.writeI64(_iter843.getKey()); - oprot.writeBinary(_iter843.getValue()); + oprot.writeI64(_iter851.getKey()); + oprot.writeBinary(_iter851.getValue()); } } oprot.writeBool(struct.isSupported); @@ -519,15 +519,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map844 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new HashMap(2*_map844.size); - long _key845; - ByteBuffer _val846; - for (int _i847 = 0; _i847 < _map844.size; ++_i847) + org.apache.thrift.protocol.TMap _map852 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new HashMap(2*_map852.size); + long _key853; + ByteBuffer _val854; + for (int _i855 = 0; _i855 < _map852.size; ++_i855) { - _key845 = iprot.readI64(); - _val846 = iprot.readBinary(); - struct.metadata.put(_key845, _val846); + _key853 = iprot.readI64(); + _val854 = iprot.readBinary(); + struct.metadata.put(_key853, _val854); } } struct.setMetadataIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java index 9924f20fd4..2fa06250b1 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java @@ -44,6 +44,7 @@ private static final org.apache.thrift.protocol.TField GET_COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("get_col_stats", org.apache.thrift.protocol.TType.BOOL, (short)4); private static final org.apache.thrift.protocol.TField PROCESSOR_CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("processorCapabilities", org.apache.thrift.protocol.TType.LIST, (short)5); private static final org.apache.thrift.protocol.TField PROCESSOR_IDENTIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("processorIdentifier", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)7); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -57,6 +58,7 @@ private boolean get_col_stats; // optional private List processorCapabilities; // optional private String processorIdentifier; // optional + private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -65,7 +67,8 @@ NAMES((short)3, "names"), GET_COL_STATS((short)4, "get_col_stats"), PROCESSOR_CAPABILITIES((short)5, "processorCapabilities"), - PROCESSOR_IDENTIFIER((short)6, "processorIdentifier"); + PROCESSOR_IDENTIFIER((short)6, "processorIdentifier"), + VALID_WRITE_ID_LIST((short)7, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -92,6 +95,8 @@ public static _Fields findByThriftId(int fieldId) { return PROCESSOR_CAPABILITIES; case 6: // PROCESSOR_IDENTIFIER return PROCESSOR_IDENTIFIER; + case 7: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -134,7 +139,7 @@ public String getFieldName() { // isset id assignments private static final int __GET_COL_STATS_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.NAMES,_Fields.GET_COL_STATS,_Fields.PROCESSOR_CAPABILITIES,_Fields.PROCESSOR_IDENTIFIER}; + private static final _Fields optionals[] = {_Fields.NAMES,_Fields.GET_COL_STATS,_Fields.PROCESSOR_CAPABILITIES,_Fields.PROCESSOR_IDENTIFIER,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -152,6 +157,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.PROCESSOR_IDENTIFIER, new org.apache.thrift.meta_data.FieldMetaData("processorIdentifier", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetPartitionsByNamesRequest.class, metaDataMap); } @@ -191,6 +198,9 @@ public GetPartitionsByNamesRequest(GetPartitionsByNamesRequest other) { if (other.isSetProcessorIdentifier()) { this.processorIdentifier = other.processorIdentifier; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public GetPartitionsByNamesRequest deepCopy() { @@ -206,6 +216,7 @@ public void clear() { this.get_col_stats = false; this.processorCapabilities = null; this.processorIdentifier = null; + this.validWriteIdList = null; } public String getDb_name() { @@ -375,6 +386,29 @@ public void setProcessorIdentifierIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -425,6 +459,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -448,6 +490,9 @@ public Object getFieldValue(_Fields field) { case PROCESSOR_IDENTIFIER: return getProcessorIdentifier(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -471,6 +516,8 @@ public boolean isSet(_Fields field) { return isSetProcessorCapabilities(); case PROCESSOR_IDENTIFIER: return isSetProcessorIdentifier(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -542,6 +589,15 @@ public boolean equals(GetPartitionsByNamesRequest that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -579,6 +635,11 @@ public int hashCode() { if (present_processorIdentifier) list.add(processorIdentifier); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -650,6 +711,16 @@ public int compareTo(GetPartitionsByNamesRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -721,6 +792,16 @@ public String toString() { } first = false; } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -842,6 +923,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsByName org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -905,6 +994,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsByNam oprot.writeFieldEnd(); } } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -937,7 +1033,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsByName if (struct.isSetProcessorIdentifier()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetValidWriteIdList()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); @@ -962,6 +1061,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsByName if (struct.isSetProcessorIdentifier()) { oprot.writeString(struct.processorIdentifier); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override @@ -971,7 +1073,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsByNames struct.setDb_nameIsSet(true); struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list572 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); @@ -1006,6 +1108,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsByNames struct.processorIdentifier = iprot.readString(); struct.setProcessorIdentifierIsSet(true); } + if (incoming.get(4)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java index 2f2c3c83c5..d025d5e446 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java @@ -444,13 +444,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsFilter case 8: // FILTERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1048 = iprot.readListBegin(); - struct.filters = new ArrayList(_list1048.size); - String _elem1049; - for (int _i1050 = 0; _i1050 < _list1048.size; ++_i1050) + org.apache.thrift.protocol.TList _list1056 = iprot.readListBegin(); + struct.filters = new ArrayList(_list1056.size); + String _elem1057; + for (int _i1058 = 0; _i1058 < _list1056.size; ++_i1058) { - _elem1049 = iprot.readString(); - struct.filters.add(_elem1049); + _elem1057 = iprot.readString(); + struct.filters.add(_elem1057); } iprot.readListEnd(); } @@ -484,9 +484,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsFilte oprot.writeFieldBegin(FILTERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filters.size())); - for (String _iter1051 : struct.filters) + for (String _iter1059 : struct.filters) { - oprot.writeString(_iter1051); + oprot.writeString(_iter1059); } oprot.writeListEnd(); } @@ -524,9 +524,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsFilter if (struct.isSetFilters()) { { oprot.writeI32(struct.filters.size()); - for (String _iter1052 : struct.filters) + for (String _iter1060 : struct.filters) { - oprot.writeString(_iter1052); + oprot.writeString(_iter1060); } } } @@ -542,13 +542,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsFilterS } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1053 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filters = new ArrayList(_list1053.size); - String _elem1054; - for (int _i1055 = 0; _i1055 < _list1053.size; ++_i1055) + org.apache.thrift.protocol.TList _list1061 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filters = new ArrayList(_list1061.size); + String _elem1062; + for (int _i1063 = 0; _i1063 < _list1061.size; ++_i1063) { - _elem1054 = iprot.readString(); - struct.filters.add(_elem1054); + _elem1062 = iprot.readString(); + struct.filters.add(_elem1062); } } struct.setFiltersIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectionSpec.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectionSpec.java index 8483633de3..d4435721ce 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectionSpec.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectionSpec.java @@ -509,13 +509,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsProjec case 1: // FIELD_LIST if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1040 = iprot.readListBegin(); - struct.fieldList = new ArrayList(_list1040.size); - String _elem1041; - for (int _i1042 = 0; _i1042 < _list1040.size; ++_i1042) + org.apache.thrift.protocol.TList _list1048 = iprot.readListBegin(); + struct.fieldList = new ArrayList(_list1048.size); + String _elem1049; + for (int _i1050 = 0; _i1050 < _list1048.size; ++_i1050) { - _elem1041 = iprot.readString(); - struct.fieldList.add(_elem1041); + _elem1049 = iprot.readString(); + struct.fieldList.add(_elem1049); } iprot.readListEnd(); } @@ -557,9 +557,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsProje oprot.writeFieldBegin(FIELD_LIST_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.fieldList.size())); - for (String _iter1043 : struct.fieldList) + for (String _iter1051 : struct.fieldList) { - oprot.writeString(_iter1043); + oprot.writeString(_iter1051); } oprot.writeListEnd(); } @@ -606,9 +606,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsProjec if (struct.isSetFieldList()) { { oprot.writeI32(struct.fieldList.size()); - for (String _iter1044 : struct.fieldList) + for (String _iter1052 : struct.fieldList) { - oprot.writeString(_iter1044); + oprot.writeString(_iter1052); } } } @@ -626,13 +626,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsProject BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1045 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.fieldList = new ArrayList(_list1045.size); - String _elem1046; - for (int _i1047 = 0; _i1047 < _list1045.size; ++_i1047) + org.apache.thrift.protocol.TList _list1053 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.fieldList = new ArrayList(_list1053.size); + String _elem1054; + for (int _i1055 = 0; _i1055 < _list1053.size; ++_i1055) { - _elem1046 = iprot.readString(); - struct.fieldList.add(_elem1046); + _elem1054 = iprot.readString(); + struct.fieldList.add(_elem1054); } } struct.setFieldListIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java index 256d4f54f6..e0d3436756 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java @@ -48,6 +48,7 @@ private static final org.apache.thrift.protocol.TField FILTER_SPEC_FIELD_DESC = new org.apache.thrift.protocol.TField("filterSpec", org.apache.thrift.protocol.TType.STRUCT, (short)8); private static final org.apache.thrift.protocol.TField PROCESSOR_CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("processorCapabilities", org.apache.thrift.protocol.TType.LIST, (short)9); private static final org.apache.thrift.protocol.TField PROCESSOR_IDENTIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("processorIdentifier", org.apache.thrift.protocol.TType.STRING, (short)10); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)11); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -65,6 +66,7 @@ private GetPartitionsFilterSpec filterSpec; // required private List processorCapabilities; // optional private String processorIdentifier; // optional + private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -77,7 +79,8 @@ PROJECTION_SPEC((short)7, "projectionSpec"), FILTER_SPEC((short)8, "filterSpec"), PROCESSOR_CAPABILITIES((short)9, "processorCapabilities"), - PROCESSOR_IDENTIFIER((short)10, "processorIdentifier"); + PROCESSOR_IDENTIFIER((short)10, "processorIdentifier"), + VALID_WRITE_ID_LIST((short)11, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -112,6 +115,8 @@ public static _Fields findByThriftId(int fieldId) { return PROCESSOR_CAPABILITIES; case 10: // PROCESSOR_IDENTIFIER return PROCESSOR_IDENTIFIER; + case 11: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -154,7 +159,7 @@ public String getFieldName() { // isset id assignments private static final int __WITHAUTH_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.WITH_AUTH,_Fields.USER,_Fields.GROUP_NAMES,_Fields.PROCESSOR_CAPABILITIES,_Fields.PROCESSOR_IDENTIFIER}; + private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.WITH_AUTH,_Fields.USER,_Fields.GROUP_NAMES,_Fields.PROCESSOR_CAPABILITIES,_Fields.PROCESSOR_IDENTIFIER,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -180,6 +185,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.PROCESSOR_IDENTIFIER, new org.apache.thrift.meta_data.FieldMetaData("processorIdentifier", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetPartitionsRequest.class, metaDataMap); } @@ -235,6 +242,9 @@ public GetPartitionsRequest(GetPartitionsRequest other) { if (other.isSetProcessorIdentifier()) { this.processorIdentifier = other.processorIdentifier; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public GetPartitionsRequest deepCopy() { @@ -254,6 +264,7 @@ public void clear() { this.filterSpec = null; this.processorCapabilities = null; this.processorIdentifier = null; + this.validWriteIdList = null; } public String getCatName() { @@ -515,6 +526,29 @@ public void setProcessorIdentifierIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case CAT_NAME: @@ -597,6 +631,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -632,6 +674,9 @@ public Object getFieldValue(_Fields field) { case PROCESSOR_IDENTIFIER: return getProcessorIdentifier(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -663,6 +708,8 @@ public boolean isSet(_Fields field) { return isSetProcessorCapabilities(); case PROCESSOR_IDENTIFIER: return isSetProcessorIdentifier(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -770,6 +817,15 @@ public boolean equals(GetPartitionsRequest that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -827,6 +883,11 @@ public int hashCode() { if (present_processorIdentifier) list.add(processorIdentifier); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -938,6 +999,16 @@ public int compareTo(GetPartitionsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1045,6 +1116,16 @@ public String toString() { } first = false; } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1139,13 +1220,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsReques case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1064 = iprot.readListBegin(); - struct.groupNames = new ArrayList(_list1064.size); - String _elem1065; - for (int _i1066 = 0; _i1066 < _list1064.size; ++_i1066) + org.apache.thrift.protocol.TList _list1072 = iprot.readListBegin(); + struct.groupNames = new ArrayList(_list1072.size); + String _elem1073; + for (int _i1074 = 0; _i1074 < _list1072.size; ++_i1074) { - _elem1065 = iprot.readString(); - struct.groupNames.add(_elem1065); + _elem1073 = iprot.readString(); + struct.groupNames.add(_elem1073); } iprot.readListEnd(); } @@ -1175,13 +1256,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsReques case 9: // PROCESSOR_CAPABILITIES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1067 = iprot.readListBegin(); - struct.processorCapabilities = new ArrayList(_list1067.size); - String _elem1068; - for (int _i1069 = 0; _i1069 < _list1067.size; ++_i1069) + org.apache.thrift.protocol.TList _list1075 = iprot.readListBegin(); + struct.processorCapabilities = new ArrayList(_list1075.size); + String _elem1076; + for (int _i1077 = 0; _i1077 < _list1075.size; ++_i1077) { - _elem1068 = iprot.readString(); - struct.processorCapabilities.add(_elem1068); + _elem1076 = iprot.readString(); + struct.processorCapabilities.add(_elem1076); } iprot.readListEnd(); } @@ -1198,6 +1279,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsReques org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 11: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1245,9 +1334,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsReque oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.groupNames.size())); - for (String _iter1070 : struct.groupNames) + for (String _iter1078 : struct.groupNames) { - oprot.writeString(_iter1070); + oprot.writeString(_iter1078); } oprot.writeListEnd(); } @@ -1269,9 +1358,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsReque oprot.writeFieldBegin(PROCESSOR_CAPABILITIES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.processorCapabilities.size())); - for (String _iter1071 : struct.processorCapabilities) + for (String _iter1079 : struct.processorCapabilities) { - oprot.writeString(_iter1071); + oprot.writeString(_iter1079); } oprot.writeListEnd(); } @@ -1285,6 +1374,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsReque oprot.writeFieldEnd(); } } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1333,7 +1429,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsReques if (struct.isSetProcessorIdentifier()) { optionals.set(9); } - oprot.writeBitSet(optionals, 10); + if (struct.isSetValidWriteIdList()) { + optionals.set(10); + } + oprot.writeBitSet(optionals, 11); if (struct.isSetCatName()) { oprot.writeString(struct.catName); } @@ -1352,9 +1451,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsReques if (struct.isSetGroupNames()) { { oprot.writeI32(struct.groupNames.size()); - for (String _iter1072 : struct.groupNames) + for (String _iter1080 : struct.groupNames) { - oprot.writeString(_iter1072); + oprot.writeString(_iter1080); } } } @@ -1367,21 +1466,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsReques if (struct.isSetProcessorCapabilities()) { { oprot.writeI32(struct.processorCapabilities.size()); - for (String _iter1073 : struct.processorCapabilities) + for (String _iter1081 : struct.processorCapabilities) { - oprot.writeString(_iter1073); + oprot.writeString(_iter1081); } } } if (struct.isSetProcessorIdentifier()) { oprot.writeString(struct.processorIdentifier); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(10); + BitSet incoming = iprot.readBitSet(11); if (incoming.get(0)) { struct.catName = iprot.readString(); struct.setCatNameIsSet(true); @@ -1404,13 +1506,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRequest } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1074 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.groupNames = new ArrayList(_list1074.size); - String _elem1075; - for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076) + org.apache.thrift.protocol.TList _list1082 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.groupNames = new ArrayList(_list1082.size); + String _elem1083; + for (int _i1084 = 0; _i1084 < _list1082.size; ++_i1084) { - _elem1075 = iprot.readString(); - struct.groupNames.add(_elem1075); + _elem1083 = iprot.readString(); + struct.groupNames.add(_elem1083); } } struct.setGroupNamesIsSet(true); @@ -1427,13 +1529,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRequest } if (incoming.get(8)) { { - org.apache.thrift.protocol.TList _list1077 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.processorCapabilities = new ArrayList(_list1077.size); - String _elem1078; - for (int _i1079 = 0; _i1079 < _list1077.size; ++_i1079) + org.apache.thrift.protocol.TList _list1085 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.processorCapabilities = new ArrayList(_list1085.size); + String _elem1086; + for (int _i1087 = 0; _i1087 < _list1085.size; ++_i1087) { - _elem1078 = iprot.readString(); - struct.processorCapabilities.add(_elem1078); + _elem1086 = iprot.readString(); + struct.processorCapabilities.add(_elem1086); } } struct.setProcessorCapabilitiesIsSet(true); @@ -1442,6 +1544,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRequest struct.processorIdentifier = iprot.readString(); struct.setProcessorIdentifierIsSet(true); } + if (incoming.get(10)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java index ad1987b11b..b5e127edd3 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsRespon case 1: // PARTITION_SPEC if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1056 = iprot.readListBegin(); - struct.partitionSpec = new ArrayList(_list1056.size); - PartitionSpec _elem1057; - for (int _i1058 = 0; _i1058 < _list1056.size; ++_i1058) + org.apache.thrift.protocol.TList _list1064 = iprot.readListBegin(); + struct.partitionSpec = new ArrayList(_list1064.size); + PartitionSpec _elem1065; + for (int _i1066 = 0; _i1066 < _list1064.size; ++_i1066) { - _elem1057 = new PartitionSpec(); - _elem1057.read(iprot); - struct.partitionSpec.add(_elem1057); + _elem1065 = new PartitionSpec(); + _elem1065.read(iprot); + struct.partitionSpec.add(_elem1065); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsRespo oprot.writeFieldBegin(PARTITION_SPEC_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionSpec.size())); - for (PartitionSpec _iter1059 : struct.partitionSpec) + for (PartitionSpec _iter1067 : struct.partitionSpec) { - _iter1059.write(oprot); + _iter1067.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRespon if (struct.isSetPartitionSpec()) { { oprot.writeI32(struct.partitionSpec.size()); - for (PartitionSpec _iter1060 : struct.partitionSpec) + for (PartitionSpec _iter1068 : struct.partitionSpec) { - _iter1060.write(oprot); + _iter1068.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRespons BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1061 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitionSpec = new ArrayList(_list1061.size); - PartitionSpec _elem1062; - for (int _i1063 = 0; _i1063 < _list1061.size; ++_i1063) + org.apache.thrift.protocol.TList _list1069 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionSpec = new ArrayList(_list1069.size); + PartitionSpec _elem1070; + for (int _i1071 = 0; _i1071 < _list1069.size; ++_i1071) { - _elem1062 = new PartitionSpec(); - _elem1062.read(iprot); - struct.partitionSpec.add(_elem1062); + _elem1070 = new PartitionSpec(); + _elem1070.read(iprot); + struct.partitionSpec.add(_elem1070); } } struct.setPartitionSpecIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java index f751e397f5..0630929cb5 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java @@ -974,13 +974,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTableRequest str case 8: // PROCESSOR_CAPABILITIES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list896 = iprot.readListBegin(); - struct.processorCapabilities = new ArrayList(_list896.size); - String _elem897; - for (int _i898 = 0; _i898 < _list896.size; ++_i898) + org.apache.thrift.protocol.TList _list904 = iprot.readListBegin(); + struct.processorCapabilities = new ArrayList(_list904.size); + String _elem905; + for (int _i906 = 0; _i906 < _list904.size; ++_i906) { - _elem897 = iprot.readString(); - struct.processorCapabilities.add(_elem897); + _elem905 = iprot.readString(); + struct.processorCapabilities.add(_elem905); } iprot.readListEnd(); } @@ -1051,9 +1051,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTableRequest st oprot.writeFieldBegin(PROCESSOR_CAPABILITIES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.processorCapabilities.size())); - for (String _iter899 : struct.processorCapabilities) + for (String _iter907 : struct.processorCapabilities) { - oprot.writeString(_iter899); + oprot.writeString(_iter907); } oprot.writeListEnd(); } @@ -1121,9 +1121,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTableRequest str if (struct.isSetProcessorCapabilities()) { { oprot.writeI32(struct.processorCapabilities.size()); - for (String _iter900 : struct.processorCapabilities) + for (String _iter908 : struct.processorCapabilities) { - oprot.writeString(_iter900); + oprot.writeString(_iter908); } } } @@ -1159,13 +1159,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTableRequest stru } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.processorCapabilities = new ArrayList(_list901.size); - String _elem902; - for (int _i903 = 0; _i903 < _list901.size; ++_i903) + org.apache.thrift.protocol.TList _list909 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.processorCapabilities = new ArrayList(_list909.size); + String _elem910; + for (int _i911 = 0; _i911 < _list909.size; ++_i911) { - _elem902 = iprot.readString(); - struct.processorCapabilities.add(_elem902); + _elem910 = iprot.readString(); + struct.processorCapabilities.add(_elem910); } } struct.setProcessorCapabilitiesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesExtRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesExtRequest.java index 3bb90e9817..901956f6cc 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesExtRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesExtRequest.java @@ -885,13 +885,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesExtRequest case 6: // PROCESSOR_CAPABILITIES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list928 = iprot.readListBegin(); - struct.processorCapabilities = new ArrayList(_list928.size); - String _elem929; - for (int _i930 = 0; _i930 < _list928.size; ++_i930) + org.apache.thrift.protocol.TList _list936 = iprot.readListBegin(); + struct.processorCapabilities = new ArrayList(_list936.size); + String _elem937; + for (int _i938 = 0; _i938 < _list936.size; ++_i938) { - _elem929 = iprot.readString(); - struct.processorCapabilities.add(_elem929); + _elem937 = iprot.readString(); + struct.processorCapabilities.add(_elem937); } iprot.readListEnd(); } @@ -949,9 +949,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesExtReques oprot.writeFieldBegin(PROCESSOR_CAPABILITIES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.processorCapabilities.size())); - for (String _iter931 : struct.processorCapabilities) + for (String _iter939 : struct.processorCapabilities) { - oprot.writeString(_iter931); + oprot.writeString(_iter939); } oprot.writeListEnd(); } @@ -1003,9 +1003,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesExtRequest if (struct.isSetProcessorCapabilities()) { { oprot.writeI32(struct.processorCapabilities.size()); - for (String _iter932 : struct.processorCapabilities) + for (String _iter940 : struct.processorCapabilities) { - oprot.writeString(_iter932); + oprot.writeString(_iter940); } } } @@ -1032,13 +1032,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesExtRequest } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list933 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.processorCapabilities = new ArrayList(_list933.size); - String _elem934; - for (int _i935 = 0; _i935 < _list933.size; ++_i935) + org.apache.thrift.protocol.TList _list941 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.processorCapabilities = new ArrayList(_list941.size); + String _elem942; + for (int _i943 = 0; _i943 < _list941.size; ++_i943) { - _elem934 = iprot.readString(); - struct.processorCapabilities.add(_elem934); + _elem942 = iprot.readString(); + struct.processorCapabilities.add(_elem942); } } struct.setProcessorCapabilitiesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java index c764543282..b65e54b7b2 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java @@ -785,13 +785,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesRequest st case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list904 = iprot.readListBegin(); - struct.tblNames = new ArrayList(_list904.size); - String _elem905; - for (int _i906 = 0; _i906 < _list904.size; ++_i906) + org.apache.thrift.protocol.TList _list912 = iprot.readListBegin(); + struct.tblNames = new ArrayList(_list912.size); + String _elem913; + for (int _i914 = 0; _i914 < _list912.size; ++_i914) { - _elem905 = iprot.readString(); - struct.tblNames.add(_elem905); + _elem913 = iprot.readString(); + struct.tblNames.add(_elem913); } iprot.readListEnd(); } @@ -820,13 +820,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesRequest st case 5: // PROCESSOR_CAPABILITIES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list907 = iprot.readListBegin(); - struct.processorCapabilities = new ArrayList(_list907.size); - String _elem908; - for (int _i909 = 0; _i909 < _list907.size; ++_i909) + org.apache.thrift.protocol.TList _list915 = iprot.readListBegin(); + struct.processorCapabilities = new ArrayList(_list915.size); + String _elem916; + for (int _i917 = 0; _i917 < _list915.size; ++_i917) { - _elem908 = iprot.readString(); - struct.processorCapabilities.add(_elem908); + _elem916 = iprot.readString(); + struct.processorCapabilities.add(_elem916); } iprot.readListEnd(); } @@ -866,9 +866,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesRequest s oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tblNames.size())); - for (String _iter910 : struct.tblNames) + for (String _iter918 : struct.tblNames) { - oprot.writeString(_iter910); + oprot.writeString(_iter918); } oprot.writeListEnd(); } @@ -894,9 +894,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesRequest s oprot.writeFieldBegin(PROCESSOR_CAPABILITIES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.processorCapabilities.size())); - for (String _iter911 : struct.processorCapabilities) + for (String _iter919 : struct.processorCapabilities) { - oprot.writeString(_iter911); + oprot.writeString(_iter919); } oprot.writeListEnd(); } @@ -948,9 +948,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest st if (struct.isSetTblNames()) { { oprot.writeI32(struct.tblNames.size()); - for (String _iter912 : struct.tblNames) + for (String _iter920 : struct.tblNames) { - oprot.writeString(_iter912); + oprot.writeString(_iter920); } } } @@ -963,9 +963,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest st if (struct.isSetProcessorCapabilities()) { { oprot.writeI32(struct.processorCapabilities.size()); - for (String _iter913 : struct.processorCapabilities) + for (String _iter921 : struct.processorCapabilities) { - oprot.writeString(_iter913); + oprot.writeString(_iter921); } } } @@ -982,13 +982,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest str BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list914 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tblNames = new ArrayList(_list914.size); - String _elem915; - for (int _i916 = 0; _i916 < _list914.size; ++_i916) + org.apache.thrift.protocol.TList _list922 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tblNames = new ArrayList(_list922.size); + String _elem923; + for (int _i924 = 0; _i924 < _list922.size; ++_i924) { - _elem915 = iprot.readString(); - struct.tblNames.add(_elem915); + _elem923 = iprot.readString(); + struct.tblNames.add(_elem923); } } struct.setTblNamesIsSet(true); @@ -1004,13 +1004,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest str } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list917 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.processorCapabilities = new ArrayList(_list917.size); - String _elem918; - for (int _i919 = 0; _i919 < _list917.size; ++_i919) + org.apache.thrift.protocol.TList _list925 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.processorCapabilities = new ArrayList(_list925.size); + String _elem926; + for (int _i927 = 0; _i927 < _list925.size; ++_i927) { - _elem918 = iprot.readString(); - struct.processorCapabilities.add(_elem918); + _elem926 = iprot.readString(); + struct.processorCapabilities.add(_elem926); } } struct.setProcessorCapabilitiesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java index 7d1aff00e5..75e450ea41 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesResult str case 1: // TABLES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list920 = iprot.readListBegin(); - struct.tables = new ArrayList(_list920.size); - Table _elem921; - for (int _i922 = 0; _i922 < _list920.size; ++_i922) + org.apache.thrift.protocol.TList _list928 = iprot.readListBegin(); + struct.tables = new ArrayList
(_list928.size); + Table _elem929; + for (int _i930 = 0; _i930 < _list928.size; ++_i930) { - _elem921 = new Table(); - _elem921.read(iprot); - struct.tables.add(_elem921); + _elem929 = new Table(); + _elem929.read(iprot); + struct.tables.add(_elem929); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesResult st oprot.writeFieldBegin(TABLES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tables.size())); - for (Table _iter923 : struct.tables) + for (Table _iter931 : struct.tables) { - _iter923.write(oprot); + _iter931.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult str TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tables.size()); - for (Table _iter924 : struct.tables) + for (Table _iter932 : struct.tables) { - _iter924.write(oprot); + _iter932.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult str public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list925 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tables = new ArrayList
(_list925.size); - Table _elem926; - for (int _i927 = 0; _i927 < _list925.size; ++_i927) + org.apache.thrift.protocol.TList _list933 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tables = new ArrayList
(_list933.size); + Table _elem934; + for (int _i935 = 0; _i935 < _list933.size; ++_i935) { - _elem926 = new Table(); - _elem926.read(iprot); - struct.tables.add(_elem926); + _elem934 = new Table(); + _elem934.read(iprot); + struct.tables.add(_elem934); } } struct.setTablesIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTxnTableWriteIdsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTxnTableWriteIdsResponse.java new file mode 100644 index 0000000000..32422279bb --- /dev/null +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTxnTableWriteIdsResponse.java @@ -0,0 +1,443 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetTxnTableWriteIdsResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTxnTableWriteIdsResponse"); + + private static final org.apache.thrift.protocol.TField TABLE_WRITE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("tableWriteIds", org.apache.thrift.protocol.TType.LIST, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetTxnTableWriteIdsResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetTxnTableWriteIdsResponseTupleSchemeFactory()); + } + + private List tableWriteIds; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TABLE_WRITE_IDS((short)1, "tableWriteIds"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TABLE_WRITE_IDS + return TABLE_WRITE_IDS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TABLE_WRITE_IDS, new org.apache.thrift.meta_data.FieldMetaData("tableWriteIds", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TableWriteId.class)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTxnTableWriteIdsResponse.class, metaDataMap); + } + + public GetTxnTableWriteIdsResponse() { + } + + public GetTxnTableWriteIdsResponse( + List tableWriteIds) + { + this(); + this.tableWriteIds = tableWriteIds; + } + + /** + * Performs a deep copy on other. + */ + public GetTxnTableWriteIdsResponse(GetTxnTableWriteIdsResponse other) { + if (other.isSetTableWriteIds()) { + List __this__tableWriteIds = new ArrayList(other.tableWriteIds.size()); + for (TableWriteId other_element : other.tableWriteIds) { + __this__tableWriteIds.add(new TableWriteId(other_element)); + } + this.tableWriteIds = __this__tableWriteIds; + } + } + + public GetTxnTableWriteIdsResponse deepCopy() { + return new GetTxnTableWriteIdsResponse(this); + } + + @Override + public void clear() { + this.tableWriteIds = null; + } + + public int getTableWriteIdsSize() { + return (this.tableWriteIds == null) ? 0 : this.tableWriteIds.size(); + } + + public java.util.Iterator getTableWriteIdsIterator() { + return (this.tableWriteIds == null) ? null : this.tableWriteIds.iterator(); + } + + public void addToTableWriteIds(TableWriteId elem) { + if (this.tableWriteIds == null) { + this.tableWriteIds = new ArrayList(); + } + this.tableWriteIds.add(elem); + } + + public List getTableWriteIds() { + return this.tableWriteIds; + } + + public void setTableWriteIds(List tableWriteIds) { + this.tableWriteIds = tableWriteIds; + } + + public void unsetTableWriteIds() { + this.tableWriteIds = null; + } + + /** Returns true if field tableWriteIds is set (has been assigned a value) and false otherwise */ + public boolean isSetTableWriteIds() { + return this.tableWriteIds != null; + } + + public void setTableWriteIdsIsSet(boolean value) { + if (!value) { + this.tableWriteIds = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TABLE_WRITE_IDS: + if (value == null) { + unsetTableWriteIds(); + } else { + setTableWriteIds((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TABLE_WRITE_IDS: + return getTableWriteIds(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TABLE_WRITE_IDS: + return isSetTableWriteIds(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetTxnTableWriteIdsResponse) + return this.equals((GetTxnTableWriteIdsResponse)that); + return false; + } + + public boolean equals(GetTxnTableWriteIdsResponse that) { + if (that == null) + return false; + + boolean this_present_tableWriteIds = true && this.isSetTableWriteIds(); + boolean that_present_tableWriteIds = true && that.isSetTableWriteIds(); + if (this_present_tableWriteIds || that_present_tableWriteIds) { + if (!(this_present_tableWriteIds && that_present_tableWriteIds)) + return false; + if (!this.tableWriteIds.equals(that.tableWriteIds)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_tableWriteIds = true && (isSetTableWriteIds()); + list.add(present_tableWriteIds); + if (present_tableWriteIds) + list.add(tableWriteIds); + + return list.hashCode(); + } + + @Override + public int compareTo(GetTxnTableWriteIdsResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetTableWriteIds()).compareTo(other.isSetTableWriteIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTableWriteIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableWriteIds, other.tableWriteIds); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetTxnTableWriteIdsResponse("); + boolean first = true; + + sb.append("tableWriteIds:"); + if (this.tableWriteIds == null) { + sb.append("null"); + } else { + sb.append(this.tableWriteIds); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetTableWriteIds()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableWriteIds' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetTxnTableWriteIdsResponseStandardSchemeFactory implements SchemeFactory { + public GetTxnTableWriteIdsResponseStandardScheme getScheme() { + return new GetTxnTableWriteIdsResponseStandardScheme(); + } + } + + private static class GetTxnTableWriteIdsResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetTxnTableWriteIdsResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TABLE_WRITE_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list674 = iprot.readListBegin(); + struct.tableWriteIds = new ArrayList(_list674.size); + TableWriteId _elem675; + for (int _i676 = 0; _i676 < _list674.size; ++_i676) + { + _elem675 = new TableWriteId(); + _elem675.read(iprot); + struct.tableWriteIds.add(_elem675); + } + iprot.readListEnd(); + } + struct.setTableWriteIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetTxnTableWriteIdsResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.tableWriteIds != null) { + oprot.writeFieldBegin(TABLE_WRITE_IDS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tableWriteIds.size())); + for (TableWriteId _iter677 : struct.tableWriteIds) + { + _iter677.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetTxnTableWriteIdsResponseTupleSchemeFactory implements SchemeFactory { + public GetTxnTableWriteIdsResponseTupleScheme getScheme() { + return new GetTxnTableWriteIdsResponseTupleScheme(); + } + } + + private static class GetTxnTableWriteIdsResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetTxnTableWriteIdsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.tableWriteIds.size()); + for (TableWriteId _iter678 : struct.tableWriteIds) + { + _iter678.write(oprot); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetTxnTableWriteIdsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list679 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tableWriteIds = new ArrayList(_list679.size); + TableWriteId _elem680; + for (int _i681 = 0; _i681 < _list679.size; ++_i681) + { + _elem680 = new TableWriteId(); + _elem680.read(iprot); + struct.tableWriteIds.add(_elem680); + } + } + struct.setTableWriteIdsIsSet(true); + } + } + +} + diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java index 5d4e026c98..c0e4ad0cfd 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetValidWriteIdsRes case 1: // TBL_VALID_WRITE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list674 = iprot.readListBegin(); - struct.tblValidWriteIds = new ArrayList(_list674.size); - TableValidWriteIds _elem675; - for (int _i676 = 0; _i676 < _list674.size; ++_i676) + org.apache.thrift.protocol.TList _list682 = iprot.readListBegin(); + struct.tblValidWriteIds = new ArrayList(_list682.size); + TableValidWriteIds _elem683; + for (int _i684 = 0; _i684 < _list682.size; ++_i684) { - _elem675 = new TableValidWriteIds(); - _elem675.read(iprot); - struct.tblValidWriteIds.add(_elem675); + _elem683 = new TableValidWriteIds(); + _elem683.read(iprot); + struct.tblValidWriteIds.add(_elem683); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetValidWriteIdsRe oprot.writeFieldBegin(TBL_VALID_WRITE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tblValidWriteIds.size())); - for (TableValidWriteIds _iter677 : struct.tblValidWriteIds) + for (TableValidWriteIds _iter685 : struct.tblValidWriteIds) { - _iter677.write(oprot); + _iter685.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRes TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tblValidWriteIds.size()); - for (TableValidWriteIds _iter678 : struct.tblValidWriteIds) + for (TableValidWriteIds _iter686 : struct.tblValidWriteIds) { - _iter678.write(oprot); + _iter686.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRes public void read(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list679 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tblValidWriteIds = new ArrayList(_list679.size); - TableValidWriteIds _elem680; - for (int _i681 = 0; _i681 < _list679.size; ++_i681) + org.apache.thrift.protocol.TList _list687 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tblValidWriteIds = new ArrayList(_list687.size); + TableValidWriteIds _elem688; + for (int _i689 = 0; _i689 < _list687.size; ++_i689) { - _elem680 = new TableValidWriteIds(); - _elem680.read(iprot); - struct.tblValidWriteIds.add(_elem680); + _elem688 = new TableValidWriteIds(); + _elem688.read(iprot); + struct.tblValidWriteIds.add(_elem688); } } struct.setTblValidWriteIdsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java index 7edd0ecc35..cd503879e4 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java @@ -453,13 +453,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 1: // ABORTED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set722 = iprot.readSetBegin(); - struct.aborted = new HashSet(2*_set722.size); - long _elem723; - for (int _i724 = 0; _i724 < _set722.size; ++_i724) + org.apache.thrift.protocol.TSet _set730 = iprot.readSetBegin(); + struct.aborted = new HashSet(2*_set730.size); + long _elem731; + for (int _i732 = 0; _i732 < _set730.size; ++_i732) { - _elem723 = iprot.readI64(); - struct.aborted.add(_elem723); + _elem731 = iprot.readI64(); + struct.aborted.add(_elem731); } iprot.readSetEnd(); } @@ -471,13 +471,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 2: // NOSUCH if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set725 = iprot.readSetBegin(); - struct.nosuch = new HashSet(2*_set725.size); - long _elem726; - for (int _i727 = 0; _i727 < _set725.size; ++_i727) + org.apache.thrift.protocol.TSet _set733 = iprot.readSetBegin(); + struct.nosuch = new HashSet(2*_set733.size); + long _elem734; + for (int _i735 = 0; _i735 < _set733.size; ++_i735) { - _elem726 = iprot.readI64(); - struct.nosuch.add(_elem726); + _elem734 = iprot.readI64(); + struct.nosuch.add(_elem734); } iprot.readSetEnd(); } @@ -503,9 +503,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(ABORTED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.aborted.size())); - for (long _iter728 : struct.aborted) + for (long _iter736 : struct.aborted) { - oprot.writeI64(_iter728); + oprot.writeI64(_iter736); } oprot.writeSetEnd(); } @@ -515,9 +515,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(NOSUCH_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.nosuch.size())); - for (long _iter729 : struct.nosuch) + for (long _iter737 : struct.nosuch) { - oprot.writeI64(_iter729); + oprot.writeI64(_iter737); } oprot.writeSetEnd(); } @@ -542,16 +542,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.aborted.size()); - for (long _iter730 : struct.aborted) + for (long _iter738 : struct.aborted) { - oprot.writeI64(_iter730); + oprot.writeI64(_iter738); } } { oprot.writeI32(struct.nosuch.size()); - for (long _iter731 : struct.nosuch) + for (long _iter739 : struct.nosuch) { - oprot.writeI64(_iter731); + oprot.writeI64(_iter739); } } } @@ -560,24 +560,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TSet _set732 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.aborted = new HashSet(2*_set732.size); - long _elem733; - for (int _i734 = 0; _i734 < _set732.size; ++_i734) + org.apache.thrift.protocol.TSet _set740 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.aborted = new HashSet(2*_set740.size); + long _elem741; + for (int _i742 = 0; _i742 < _set740.size; ++_i742) { - _elem733 = iprot.readI64(); - struct.aborted.add(_elem733); + _elem741 = iprot.readI64(); + struct.aborted.add(_elem741); } } struct.setAbortedIsSet(true); { - org.apache.thrift.protocol.TSet _set735 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.nosuch = new HashSet(2*_set735.size); - long _elem736; - for (int _i737 = 0; _i737 < _set735.size; ++_i737) + org.apache.thrift.protocol.TSet _set743 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.nosuch = new HashSet(2*_set743.size); + long _elem744; + for (int _i745 = 0; _i745 < _set743.size; ++_i745) { - _elem736 = iprot.readI64(); - struct.nosuch.add(_elem736); + _elem744 = iprot.readI64(); + struct.nosuch.add(_elem744); } } struct.setNosuchIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java index 85049745dd..acf51e4b11 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java @@ -636,13 +636,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 2: // FILES_ADDED if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list780 = iprot.readListBegin(); - struct.filesAdded = new ArrayList(_list780.size); - String _elem781; - for (int _i782 = 0; _i782 < _list780.size; ++_i782) + org.apache.thrift.protocol.TList _list788 = iprot.readListBegin(); + struct.filesAdded = new ArrayList(_list788.size); + String _elem789; + for (int _i790 = 0; _i790 < _list788.size; ++_i790) { - _elem781 = iprot.readString(); - struct.filesAdded.add(_elem781); + _elem789 = iprot.readString(); + struct.filesAdded.add(_elem789); } iprot.readListEnd(); } @@ -654,13 +654,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 3: // FILES_ADDED_CHECKSUM if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list783 = iprot.readListBegin(); - struct.filesAddedChecksum = new ArrayList(_list783.size); - String _elem784; - for (int _i785 = 0; _i785 < _list783.size; ++_i785) + org.apache.thrift.protocol.TList _list791 = iprot.readListBegin(); + struct.filesAddedChecksum = new ArrayList(_list791.size); + String _elem792; + for (int _i793 = 0; _i793 < _list791.size; ++_i793) { - _elem784 = iprot.readString(); - struct.filesAddedChecksum.add(_elem784); + _elem792 = iprot.readString(); + struct.filesAddedChecksum.add(_elem792); } iprot.readListEnd(); } @@ -672,13 +672,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 4: // SUB_DIRECTORY_LIST if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list786 = iprot.readListBegin(); - struct.subDirectoryList = new ArrayList(_list786.size); - String _elem787; - for (int _i788 = 0; _i788 < _list786.size; ++_i788) + org.apache.thrift.protocol.TList _list794 = iprot.readListBegin(); + struct.subDirectoryList = new ArrayList(_list794.size); + String _elem795; + for (int _i796 = 0; _i796 < _list794.size; ++_i796) { - _elem787 = iprot.readString(); - struct.subDirectoryList.add(_elem787); + _elem795 = iprot.readString(); + struct.subDirectoryList.add(_elem795); } iprot.readListEnd(); } @@ -709,9 +709,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAdded.size())); - for (String _iter789 : struct.filesAdded) + for (String _iter797 : struct.filesAdded) { - oprot.writeString(_iter789); + oprot.writeString(_iter797); } oprot.writeListEnd(); } @@ -722,9 +722,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_CHECKSUM_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAddedChecksum.size())); - for (String _iter790 : struct.filesAddedChecksum) + for (String _iter798 : struct.filesAddedChecksum) { - oprot.writeString(_iter790); + oprot.writeString(_iter798); } oprot.writeListEnd(); } @@ -736,9 +736,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(SUB_DIRECTORY_LIST_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.subDirectoryList.size())); - for (String _iter791 : struct.subDirectoryList) + for (String _iter799 : struct.subDirectoryList) { - oprot.writeString(_iter791); + oprot.writeString(_iter799); } oprot.writeListEnd(); } @@ -764,9 +764,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.filesAdded.size()); - for (String _iter792 : struct.filesAdded) + for (String _iter800 : struct.filesAdded) { - oprot.writeString(_iter792); + oprot.writeString(_iter800); } } BitSet optionals = new BitSet(); @@ -786,18 +786,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD if (struct.isSetFilesAddedChecksum()) { { oprot.writeI32(struct.filesAddedChecksum.size()); - for (String _iter793 : struct.filesAddedChecksum) + for (String _iter801 : struct.filesAddedChecksum) { - oprot.writeString(_iter793); + oprot.writeString(_iter801); } } } if (struct.isSetSubDirectoryList()) { { oprot.writeI32(struct.subDirectoryList.size()); - for (String _iter794 : struct.subDirectoryList) + for (String _iter802 : struct.subDirectoryList) { - oprot.writeString(_iter794); + oprot.writeString(_iter802); } } } @@ -807,13 +807,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestData struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list795 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAdded = new ArrayList(_list795.size); - String _elem796; - for (int _i797 = 0; _i797 < _list795.size; ++_i797) + org.apache.thrift.protocol.TList _list803 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAdded = new ArrayList(_list803.size); + String _elem804; + for (int _i805 = 0; _i805 < _list803.size; ++_i805) { - _elem796 = iprot.readString(); - struct.filesAdded.add(_elem796); + _elem804 = iprot.readString(); + struct.filesAdded.add(_elem804); } } struct.setFilesAddedIsSet(true); @@ -824,26 +824,26 @@ public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestDa } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list798 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAddedChecksum = new ArrayList(_list798.size); - String _elem799; - for (int _i800 = 0; _i800 < _list798.size; ++_i800) + org.apache.thrift.protocol.TList _list806 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAddedChecksum = new ArrayList(_list806.size); + String _elem807; + for (int _i808 = 0; _i808 < _list806.size; ++_i808) { - _elem799 = iprot.readString(); - struct.filesAddedChecksum.add(_elem799); + _elem807 = iprot.readString(); + struct.filesAddedChecksum.add(_elem807); } } struct.setFilesAddedChecksumIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.subDirectoryList = new ArrayList(_list801.size); - String _elem802; - for (int _i803 = 0; _i803 < _list801.size; ++_i803) + org.apache.thrift.protocol.TList _list809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.subDirectoryList = new ArrayList(_list809.size); + String _elem810; + for (int _i811 = 0; _i811 < _list809.size; ++_i811) { - _elem802 = iprot.readString(); - struct.subDirectoryList.add(_elem802); + _elem810 = iprot.readString(); + struct.subDirectoryList.add(_elem810); } } struct.setSubDirectoryListIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java index 2b0ffbab12..ee21f0843a 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java @@ -689,14 +689,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, LockRequest struct) case 1: // COMPONENT if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list706 = iprot.readListBegin(); - struct.component = new ArrayList(_list706.size); - LockComponent _elem707; - for (int _i708 = 0; _i708 < _list706.size; ++_i708) + org.apache.thrift.protocol.TList _list714 = iprot.readListBegin(); + struct.component = new ArrayList(_list714.size); + LockComponent _elem715; + for (int _i716 = 0; _i716 < _list714.size; ++_i716) { - _elem707 = new LockComponent(); - _elem707.read(iprot); - struct.component.add(_elem707); + _elem715 = new LockComponent(); + _elem715.read(iprot); + struct.component.add(_elem715); } iprot.readListEnd(); } @@ -754,9 +754,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, LockRequest struct oprot.writeFieldBegin(COMPONENT_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.component.size())); - for (LockComponent _iter709 : struct.component) + for (LockComponent _iter717 : struct.component) { - _iter709.write(oprot); + _iter717.write(oprot); } oprot.writeListEnd(); } @@ -803,9 +803,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.component.size()); - for (LockComponent _iter710 : struct.component) + for (LockComponent _iter718 : struct.component) { - _iter710.write(oprot); + _iter718.write(oprot); } } oprot.writeString(struct.user); @@ -830,14 +830,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) public void read(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list711 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.component = new ArrayList(_list711.size); - LockComponent _elem712; - for (int _i713 = 0; _i713 < _list711.size; ++_i713) + org.apache.thrift.protocol.TList _list719 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.component = new ArrayList(_list719.size); + LockComponent _elem720; + for (int _i721 = 0; _i721 < _list719.size; ++_i721) { - _elem712 = new LockComponent(); - _elem712.read(iprot); - struct.component.add(_elem712); + _elem720 = new LockComponent(); + _elem720.read(iprot); + struct.component.add(_elem720); } } struct.setComponentIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java index 32b9654d14..d9874562a5 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java @@ -525,13 +525,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventRe case 3: // EVENT_TYPE_SKIP_LIST if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list764 = iprot.readListBegin(); - struct.eventTypeSkipList = new ArrayList(_list764.size); - String _elem765; - for (int _i766 = 0; _i766 < _list764.size; ++_i766) + org.apache.thrift.protocol.TList _list772 = iprot.readListBegin(); + struct.eventTypeSkipList = new ArrayList(_list772.size); + String _elem773; + for (int _i774 = 0; _i774 < _list772.size; ++_i774) { - _elem765 = iprot.readString(); - struct.eventTypeSkipList.add(_elem765); + _elem773 = iprot.readString(); + struct.eventTypeSkipList.add(_elem773); } iprot.readListEnd(); } @@ -566,9 +566,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEventR oprot.writeFieldBegin(EVENT_TYPE_SKIP_LIST_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.eventTypeSkipList.size())); - for (String _iter767 : struct.eventTypeSkipList) + for (String _iter775 : struct.eventTypeSkipList) { - oprot.writeString(_iter767); + oprot.writeString(_iter775); } oprot.writeListEnd(); } @@ -607,9 +607,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe if (struct.isSetEventTypeSkipList()) { { oprot.writeI32(struct.eventTypeSkipList.size()); - for (String _iter768 : struct.eventTypeSkipList) + for (String _iter776 : struct.eventTypeSkipList) { - oprot.writeString(_iter768); + oprot.writeString(_iter776); } } } @@ -627,13 +627,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventReq } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.eventTypeSkipList = new ArrayList(_list769.size); - String _elem770; - for (int _i771 = 0; _i771 < _list769.size; ++_i771) + org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.eventTypeSkipList = new ArrayList(_list777.size); + String _elem778; + for (int _i779 = 0; _i779 < _list777.size; ++_i779) { - _elem770 = iprot.readString(); - struct.eventTypeSkipList.add(_elem770); + _elem778 = iprot.readString(); + struct.eventTypeSkipList.add(_elem778); } } struct.setEventTypeSkipListIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java index 580edf8a1d..46cf33a30c 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventRe case 1: // EVENTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list772 = iprot.readListBegin(); - struct.events = new ArrayList(_list772.size); - NotificationEvent _elem773; - for (int _i774 = 0; _i774 < _list772.size; ++_i774) + org.apache.thrift.protocol.TList _list780 = iprot.readListBegin(); + struct.events = new ArrayList(_list780.size); + NotificationEvent _elem781; + for (int _i782 = 0; _i782 < _list780.size; ++_i782) { - _elem773 = new NotificationEvent(); - _elem773.read(iprot); - struct.events.add(_elem773); + _elem781 = new NotificationEvent(); + _elem781.read(iprot); + struct.events.add(_elem781); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEventR oprot.writeFieldBegin(EVENTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.events.size())); - for (NotificationEvent _iter775 : struct.events) + for (NotificationEvent _iter783 : struct.events) { - _iter775.write(oprot); + _iter783.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.events.size()); - for (NotificationEvent _iter776 : struct.events) + for (NotificationEvent _iter784 : struct.events) { - _iter776.write(oprot); + _iter784.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.events = new ArrayList(_list777.size); - NotificationEvent _elem778; - for (int _i779 = 0; _i779 < _list777.size; ++_i779) + org.apache.thrift.protocol.TList _list785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.events = new ArrayList(_list785.size); + NotificationEvent _elem786; + for (int _i787 = 0; _i787 < _list785.size; ++_i787) { - _elem778 = new NotificationEvent(); - _elem778.read(iprot); - struct.events.add(_elem778); + _elem786 = new NotificationEvent(); + _elem786.read(iprot); + struct.events.add(_elem786); } } struct.setEventsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java index 4f7f322b91..1a63399563 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java @@ -47,6 +47,7 @@ private static final org.apache.thrift.protocol.TField ASCENDING_FIELD_DESC = new org.apache.thrift.protocol.TField("ascending", org.apache.thrift.protocol.TType.BOOL, (short)7); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxParts", org.apache.thrift.protocol.TType.I64, (short)8); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)10); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -63,6 +64,7 @@ private boolean ascending; // optional private long maxParts; // optional private String catName; // optional + private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -74,7 +76,8 @@ PARTITION_ORDER((short)6, "partitionOrder"), ASCENDING((short)7, "ascending"), MAX_PARTS((short)8, "maxParts"), - CAT_NAME((short)9, "catName"); + CAT_NAME((short)9, "catName"), + VALID_WRITE_ID_LIST((short)10, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -107,6 +110,8 @@ public static _Fields findByThriftId(int fieldId) { return MAX_PARTS; case 9: // CAT_NAME return CAT_NAME; + case 10: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -151,7 +156,7 @@ public String getFieldName() { private static final int __ASCENDING_ISSET_ID = 1; private static final int __MAXPARTS_ISSET_ID = 2; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.APPLY_DISTINCT,_Fields.FILTER,_Fields.PARTITION_ORDER,_Fields.ASCENDING,_Fields.MAX_PARTS,_Fields.CAT_NAME}; + private static final _Fields optionals[] = {_Fields.APPLY_DISTINCT,_Fields.FILTER,_Fields.PARTITION_ORDER,_Fields.ASCENDING,_Fields.MAX_PARTS,_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -175,6 +180,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionValuesRequest.class, metaDataMap); } @@ -233,6 +240,9 @@ public PartitionValuesRequest(PartitionValuesRequest other) { if (other.isSetCatName()) { this.catName = other.catName; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public PartitionValuesRequest deepCopy() { @@ -253,6 +263,7 @@ public void clear() { this.maxParts = -1L; this.catName = null; + this.validWriteIdList = null; } public String getDbName() { @@ -489,6 +500,29 @@ public void setCatNameIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -563,6 +597,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -595,6 +637,9 @@ public Object getFieldValue(_Fields field) { case CAT_NAME: return getCatName(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -624,6 +669,8 @@ public boolean isSet(_Fields field) { return isSetMaxParts(); case CAT_NAME: return isSetCatName(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -722,6 +769,15 @@ public boolean equals(PartitionValuesRequest that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -774,6 +830,11 @@ public int hashCode() { if (present_catName) list.add(catName); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -875,6 +936,16 @@ public int compareTo(PartitionValuesRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -966,6 +1037,16 @@ public String toString() { } first = false; } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1117,6 +1198,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesRequ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 10: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1195,6 +1284,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesReq oprot.writeFieldEnd(); } } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1240,7 +1336,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRequ if (struct.isSetCatName()) { optionals.set(5); } - oprot.writeBitSet(optionals, 6); + if (struct.isSetValidWriteIdList()) { + optionals.set(6); + } + oprot.writeBitSet(optionals, 7); if (struct.isSetApplyDistinct()) { oprot.writeBool(struct.applyDistinct); } @@ -1265,6 +1364,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRequ if (struct.isSetCatName()) { oprot.writeString(struct.catName); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override @@ -1286,7 +1388,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesReque } } struct.setPartitionKeysIsSet(true); - BitSet incoming = iprot.readBitSet(6); + BitSet incoming = iprot.readBitSet(7); if (incoming.get(0)) { struct.applyDistinct = iprot.readBool(); struct.setApplyDistinctIsSet(true); @@ -1321,6 +1423,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesReque struct.catName = iprot.readString(); struct.setCatNameIsSet(true); } + if (incoming.get(6)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java index 0e72625e01..0046f6a5ea 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java @@ -44,6 +44,7 @@ private static final org.apache.thrift.protocol.TField DEFAULT_PARTITION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("defaultPartitionName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxParts", org.apache.thrift.protocol.TType.I16, (short)5); private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)7); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -57,6 +58,7 @@ private String defaultPartitionName; // optional private short maxParts; // optional private String catName; // optional + private String validWriteIdList; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -65,7 +67,8 @@ EXPR((short)3, "expr"), DEFAULT_PARTITION_NAME((short)4, "defaultPartitionName"), MAX_PARTS((short)5, "maxParts"), - CAT_NAME((short)6, "catName"); + CAT_NAME((short)6, "catName"), + VALID_WRITE_ID_LIST((short)7, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -92,6 +95,8 @@ public static _Fields findByThriftId(int fieldId) { return MAX_PARTS; case 6: // CAT_NAME return CAT_NAME; + case 7: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -134,7 +139,7 @@ public String getFieldName() { // isset id assignments private static final int __MAXPARTS_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.DEFAULT_PARTITION_NAME,_Fields.MAX_PARTS,_Fields.CAT_NAME}; + private static final _Fields optionals[] = {_Fields.DEFAULT_PARTITION_NAME,_Fields.MAX_PARTS,_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -150,6 +155,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsByExprRequest.class, metaDataMap); } @@ -191,6 +198,9 @@ public PartitionsByExprRequest(PartitionsByExprRequest other) { if (other.isSetCatName()) { this.catName = other.catName; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public PartitionsByExprRequest deepCopy() { @@ -206,6 +216,7 @@ public void clear() { this.maxParts = (short)-1; this.catName = null; + this.validWriteIdList = null; } public String getDbName() { @@ -354,6 +365,29 @@ public void setCatNameIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -404,6 +438,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -427,6 +469,9 @@ public Object getFieldValue(_Fields field) { case CAT_NAME: return getCatName(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -450,6 +495,8 @@ public boolean isSet(_Fields field) { return isSetMaxParts(); case CAT_NAME: return isSetCatName(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -521,6 +568,15 @@ public boolean equals(PartitionsByExprRequest that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -558,6 +614,11 @@ public int hashCode() { if (present_catName) list.add(catName); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -629,6 +690,16 @@ public int compareTo(PartitionsByExprRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -698,6 +769,16 @@ public String toString() { } first = false; } + if (isSetValidWriteIdList()) { + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -803,6 +884,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsByExprReq org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -850,6 +939,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsByExprRe oprot.writeFieldEnd(); } } + if (struct.validWriteIdList != null) { + if (struct.isSetValidWriteIdList()) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -880,7 +976,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprReq if (struct.isSetCatName()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidWriteIdList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDefaultPartitionName()) { oprot.writeString(struct.defaultPartitionName); } @@ -890,6 +989,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprReq if (struct.isSetCatName()) { oprot.writeString(struct.catName); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override @@ -901,7 +1003,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRequ struct.setTblNameIsSet(true); struct.expr = iprot.readBinary(); struct.setExprIsSet(true); - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.defaultPartitionName = iprot.readString(); struct.setDefaultPartitionNameIsSet(true); @@ -914,6 +1016,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRequ struct.catName = iprot.readString(); struct.setCatNameIsSet(true); } + if (incoming.get(3)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java index 0cc9a69748..cc631aecd5 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java @@ -547,13 +547,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list856 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list856.size); - long _elem857; - for (int _i858 = 0; _i858 < _list856.size; ++_i858) + org.apache.thrift.protocol.TList _list864 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list864.size); + long _elem865; + for (int _i866 = 0; _i866 < _list864.size; ++_i866) { - _elem857 = iprot.readI64(); - struct.fileIds.add(_elem857); + _elem865 = iprot.readI64(); + struct.fileIds.add(_elem865); } iprot.readListEnd(); } @@ -565,13 +565,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 2: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list859 = iprot.readListBegin(); - struct.metadata = new ArrayList(_list859.size); - ByteBuffer _elem860; - for (int _i861 = 0; _i861 < _list859.size; ++_i861) + org.apache.thrift.protocol.TList _list867 = iprot.readListBegin(); + struct.metadata = new ArrayList(_list867.size); + ByteBuffer _elem868; + for (int _i869 = 0; _i869 < _list867.size; ++_i869) { - _elem860 = iprot.readBinary(); - struct.metadata.add(_elem860); + _elem868 = iprot.readBinary(); + struct.metadata.add(_elem868); } iprot.readListEnd(); } @@ -605,9 +605,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter862 : struct.fileIds) + for (long _iter870 : struct.fileIds) { - oprot.writeI64(_iter862); + oprot.writeI64(_iter870); } oprot.writeListEnd(); } @@ -617,9 +617,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (ByteBuffer _iter863 : struct.metadata) + for (ByteBuffer _iter871 : struct.metadata) { - oprot.writeBinary(_iter863); + oprot.writeBinary(_iter871); } oprot.writeListEnd(); } @@ -651,16 +651,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter864 : struct.fileIds) + for (long _iter872 : struct.fileIds) { - oprot.writeI64(_iter864); + oprot.writeI64(_iter872); } } { oprot.writeI32(struct.metadata.size()); - for (ByteBuffer _iter865 : struct.metadata) + for (ByteBuffer _iter873 : struct.metadata) { - oprot.writeBinary(_iter865); + oprot.writeBinary(_iter873); } } BitSet optionals = new BitSet(); @@ -677,24 +677,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list866 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list866.size); - long _elem867; - for (int _i868 = 0; _i868 < _list866.size; ++_i868) + org.apache.thrift.protocol.TList _list874 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list874.size); + long _elem875; + for (int _i876 = 0; _i876 < _list874.size; ++_i876) { - _elem867 = iprot.readI64(); - struct.fileIds.add(_elem867); + _elem875 = iprot.readI64(); + struct.fileIds.add(_elem875); } } struct.setFileIdsIsSet(true); { - org.apache.thrift.protocol.TList _list869 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new ArrayList(_list869.size); - ByteBuffer _elem870; - for (int _i871 = 0; _i871 < _list869.size; ++_i871) + org.apache.thrift.protocol.TList _list877 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new ArrayList(_list877.size); + ByteBuffer _elem878; + for (int _i879 = 0; _i879 < _list877.size; ++_i879) { - _elem870 = iprot.readBinary(); - struct.metadata.add(_elem870); + _elem878 = iprot.readBinary(); + struct.metadata.add(_elem878); } } struct.setMetadataIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java index dd509a4bc5..8a1cb8f5cc 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java @@ -796,13 +796,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, RenamePartitionRequ case 4: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1032 = iprot.readListBegin(); - struct.partVals = new ArrayList(_list1032.size); - String _elem1033; - for (int _i1034 = 0; _i1034 < _list1032.size; ++_i1034) + org.apache.thrift.protocol.TList _list1040 = iprot.readListBegin(); + struct.partVals = new ArrayList(_list1040.size); + String _elem1041; + for (int _i1042 = 0; _i1042 < _list1040.size; ++_i1042) { - _elem1033 = iprot.readString(); - struct.partVals.add(_elem1033); + _elem1041 = iprot.readString(); + struct.partVals.add(_elem1041); } iprot.readListEnd(); } @@ -862,9 +862,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, RenamePartitionReq oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partVals.size())); - for (String _iter1035 : struct.partVals) + for (String _iter1043 : struct.partVals) { - oprot.writeString(_iter1035); + oprot.writeString(_iter1043); } oprot.writeListEnd(); } @@ -903,9 +903,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, RenamePartitionRequ oprot.writeString(struct.tableName); { oprot.writeI32(struct.partVals.size()); - for (String _iter1036 : struct.partVals) + for (String _iter1044 : struct.partVals) { - oprot.writeString(_iter1036); + oprot.writeString(_iter1044); } } struct.newPart.write(oprot); @@ -933,13 +933,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, RenamePartitionReque struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); { - org.apache.thrift.protocol.TList _list1037 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partVals = new ArrayList(_list1037.size); - String _elem1038; - for (int _i1039 = 0; _i1039 < _list1037.size; ++_i1039) + org.apache.thrift.protocol.TList _list1045 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partVals = new ArrayList(_list1045.size); + String _elem1046; + for (int _i1047 = 0; _i1047 < _list1045.size; ++_i1047) { - _elem1038 = iprot.readString(); - struct.partVals.add(_elem1038); + _elem1046 = iprot.readString(); + struct.partVals.add(_elem1046); } } struct.setPartValsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java index de8c21a592..3a450ff936 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java @@ -1119,14 +1119,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SchemaVersion struc case 4: // COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1008 = iprot.readListBegin(); - struct.cols = new ArrayList(_list1008.size); - FieldSchema _elem1009; - for (int _i1010 = 0; _i1010 < _list1008.size; ++_i1010) + org.apache.thrift.protocol.TList _list1016 = iprot.readListBegin(); + struct.cols = new ArrayList(_list1016.size); + FieldSchema _elem1017; + for (int _i1018 = 0; _i1018 < _list1016.size; ++_i1018) { - _elem1009 = new FieldSchema(); - _elem1009.read(iprot); - struct.cols.add(_elem1009); + _elem1017 = new FieldSchema(); + _elem1017.read(iprot); + struct.cols.add(_elem1017); } iprot.readListEnd(); } @@ -1212,9 +1212,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SchemaVersion stru oprot.writeFieldBegin(COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.cols.size())); - for (FieldSchema _iter1011 : struct.cols) + for (FieldSchema _iter1019 : struct.cols) { - _iter1011.write(oprot); + _iter1019.write(oprot); } oprot.writeListEnd(); } @@ -1323,9 +1323,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SchemaVersion struc if (struct.isSetCols()) { { oprot.writeI32(struct.cols.size()); - for (FieldSchema _iter1012 : struct.cols) + for (FieldSchema _iter1020 : struct.cols) { - _iter1012.write(oprot); + _iter1020.write(oprot); } } } @@ -1368,14 +1368,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SchemaVersion struct } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list1013 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.cols = new ArrayList(_list1013.size); - FieldSchema _elem1014; - for (int _i1015 = 0; _i1015 < _list1013.size; ++_i1015) + org.apache.thrift.protocol.TList _list1021 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.cols = new ArrayList(_list1021.size); + FieldSchema _elem1022; + for (int _i1023 = 0; _i1023 < _list1021.size; ++_i1023) { - _elem1014 = new FieldSchema(); - _elem1014.read(iprot); - struct.cols.add(_elem1014); + _elem1022 = new FieldSchema(); + _elem1022.read(iprot); + struct.cols.add(_elem1022); } } struct.setColsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java index bc5758e943..7c03d5adea 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowCompactResponse case 1: // COMPACTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list748 = iprot.readListBegin(); - struct.compacts = new ArrayList(_list748.size); - ShowCompactResponseElement _elem749; - for (int _i750 = 0; _i750 < _list748.size; ++_i750) + org.apache.thrift.protocol.TList _list756 = iprot.readListBegin(); + struct.compacts = new ArrayList(_list756.size); + ShowCompactResponseElement _elem757; + for (int _i758 = 0; _i758 < _list756.size; ++_i758) { - _elem749 = new ShowCompactResponseElement(); - _elem749.read(iprot); - struct.compacts.add(_elem749); + _elem757 = new ShowCompactResponseElement(); + _elem757.read(iprot); + struct.compacts.add(_elem757); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowCompactRespons oprot.writeFieldBegin(COMPACTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.compacts.size())); - for (ShowCompactResponseElement _iter751 : struct.compacts) + for (ShowCompactResponseElement _iter759 : struct.compacts) { - _iter751.write(oprot); + _iter759.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.compacts.size()); - for (ShowCompactResponseElement _iter752 : struct.compacts) + for (ShowCompactResponseElement _iter760 : struct.compacts) { - _iter752.write(oprot); + _iter760.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse public void read(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.compacts = new ArrayList(_list753.size); - ShowCompactResponseElement _elem754; - for (int _i755 = 0; _i755 < _list753.size; ++_i755) + org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.compacts = new ArrayList(_list761.size); + ShowCompactResponseElement _elem762; + for (int _i763 = 0; _i763 < _list761.size; ++_i763) { - _elem754 = new ShowCompactResponseElement(); - _elem754.read(iprot); - struct.compacts.add(_elem754); + _elem762 = new ShowCompactResponseElement(); + _elem762.read(iprot); + struct.compacts.add(_elem762); } } struct.setCompactsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java index d4bf9fac73..14434f4742 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowLocksResponse s case 1: // LOCKS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list714 = iprot.readListBegin(); - struct.locks = new ArrayList(_list714.size); - ShowLocksResponseElement _elem715; - for (int _i716 = 0; _i716 < _list714.size; ++_i716) + org.apache.thrift.protocol.TList _list722 = iprot.readListBegin(); + struct.locks = new ArrayList(_list722.size); + ShowLocksResponseElement _elem723; + for (int _i724 = 0; _i724 < _list722.size; ++_i724) { - _elem715 = new ShowLocksResponseElement(); - _elem715.read(iprot); - struct.locks.add(_elem715); + _elem723 = new ShowLocksResponseElement(); + _elem723.read(iprot); + struct.locks.add(_elem723); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowLocksResponse oprot.writeFieldBegin(LOCKS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.locks.size())); - for (ShowLocksResponseElement _iter717 : struct.locks) + for (ShowLocksResponseElement _iter725 : struct.locks) { - _iter717.write(oprot); + _iter725.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse s if (struct.isSetLocks()) { { oprot.writeI32(struct.locks.size()); - for (ShowLocksResponseElement _iter718 : struct.locks) + for (ShowLocksResponseElement _iter726 : struct.locks) { - _iter718.write(oprot); + _iter726.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse st BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list719 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.locks = new ArrayList(_list719.size); - ShowLocksResponseElement _elem720; - for (int _i721 = 0; _i721 < _list719.size; ++_i721) + org.apache.thrift.protocol.TList _list727 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.locks = new ArrayList(_list727.size); + ShowLocksResponseElement _elem728; + for (int _i729 = 0; _i729 < _list727.size; ++_i729) { - _elem720 = new ShowLocksResponseElement(); - _elem720.read(iprot); - struct.locks.add(_elem720); + _elem728 = new ShowLocksResponseElement(); + _elem728.read(iprot); + struct.locks.add(_elem728); } } struct.setLocksIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableWriteId.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableWriteId.java new file mode 100644 index 0000000000..1517721f50 --- /dev/null +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableWriteId.java @@ -0,0 +1,488 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class TableWriteId implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TableWriteId"); + + private static final org.apache.thrift.protocol.TField FULL_TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("fullTableName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TableWriteIdStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TableWriteIdTupleSchemeFactory()); + } + + private String fullTableName; // required + private long writeId; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + FULL_TABLE_NAME((short)1, "fullTableName"), + WRITE_ID((short)2, "writeId"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // FULL_TABLE_NAME + return FULL_TABLE_NAME; + case 2: // WRITE_ID + return WRITE_ID; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __WRITEID_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.FULL_TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("fullTableName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableWriteId.class, metaDataMap); + } + + public TableWriteId() { + } + + public TableWriteId( + String fullTableName, + long writeId) + { + this(); + this.fullTableName = fullTableName; + this.writeId = writeId; + setWriteIdIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public TableWriteId(TableWriteId other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetFullTableName()) { + this.fullTableName = other.fullTableName; + } + this.writeId = other.writeId; + } + + public TableWriteId deepCopy() { + return new TableWriteId(this); + } + + @Override + public void clear() { + this.fullTableName = null; + setWriteIdIsSet(false); + this.writeId = 0; + } + + public String getFullTableName() { + return this.fullTableName; + } + + public void setFullTableName(String fullTableName) { + this.fullTableName = fullTableName; + } + + public void unsetFullTableName() { + this.fullTableName = null; + } + + /** Returns true if field fullTableName is set (has been assigned a value) and false otherwise */ + public boolean isSetFullTableName() { + return this.fullTableName != null; + } + + public void setFullTableNameIsSet(boolean value) { + if (!value) { + this.fullTableName = null; + } + } + + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case FULL_TABLE_NAME: + if (value == null) { + unsetFullTableName(); + } else { + setFullTableName((String)value); + } + break; + + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case FULL_TABLE_NAME: + return getFullTableName(); + + case WRITE_ID: + return getWriteId(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case FULL_TABLE_NAME: + return isSetFullTableName(); + case WRITE_ID: + return isSetWriteId(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TableWriteId) + return this.equals((TableWriteId)that); + return false; + } + + public boolean equals(TableWriteId that) { + if (that == null) + return false; + + boolean this_present_fullTableName = true && this.isSetFullTableName(); + boolean that_present_fullTableName = true && that.isSetFullTableName(); + if (this_present_fullTableName || that_present_fullTableName) { + if (!(this_present_fullTableName && that_present_fullTableName)) + return false; + if (!this.fullTableName.equals(that.fullTableName)) + return false; + } + + boolean this_present_writeId = true; + boolean that_present_writeId = true; + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_fullTableName = true && (isSetFullTableName()); + list.add(present_fullTableName); + if (present_fullTableName) + list.add(fullTableName); + + boolean present_writeId = true; + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + + return list.hashCode(); + } + + @Override + public int compareTo(TableWriteId other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetFullTableName()).compareTo(other.isSetFullTableName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFullTableName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fullTableName, other.fullTableName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TableWriteId("); + boolean first = true; + + sb.append("fullTableName:"); + if (this.fullTableName == null) { + sb.append("null"); + } else { + sb.append(this.fullTableName); + } + first = false; + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetFullTableName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'fullTableName' is unset! Struct:" + toString()); + } + + if (!isSetWriteId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'writeId' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TableWriteIdStandardSchemeFactory implements SchemeFactory { + public TableWriteIdStandardScheme getScheme() { + return new TableWriteIdStandardScheme(); + } + } + + private static class TableWriteIdStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TableWriteId struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // FULL_TABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.fullTableName = iprot.readString(); + struct.setFullTableNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TableWriteId struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.fullTableName != null) { + oprot.writeFieldBegin(FULL_TABLE_NAME_FIELD_DESC); + oprot.writeString(struct.fullTableName); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TableWriteIdTupleSchemeFactory implements SchemeFactory { + public TableWriteIdTupleScheme getScheme() { + return new TableWriteIdTupleScheme(); + } + } + + private static class TableWriteIdTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TableWriteId struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.fullTableName); + oprot.writeI64(struct.writeId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TableWriteId struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.fullTableName = iprot.readString(); + struct.setFullTableNameIsSet(true); + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + } + +} + diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index f4218ee042..ca2287ea0a 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -76,13 +76,13 @@ public Map get_type_all(String name) throws MetaException, org.apache.thrift.TException; - public List get_fields(String db_name, String table_name) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException; + public List get_fields(String db_name, String table_name, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException; - public List get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException; + public List get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException; - public List get_schema(String db_name, String table_name) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException; + public List get_schema(String db_name, String table_name, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException; - public List get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException; + public List get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException; public void create_table(Table tbl) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException; @@ -124,7 +124,7 @@ public List get_all_tables(String db_name) throws MetaException, org.apache.thrift.TException; - public Table get_table(String dbname, String tbl_name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public Table get_table(String dbname, String tbl_name, String validWriteIdList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; public List
get_table_objects_by_name(String dbname, List tbl_names) throws org.apache.thrift.TException; @@ -176,41 +176,41 @@ public DropPartitionsResult drop_partitions_req(DropPartitionsRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; - public Partition get_partition(String db_name, String tbl_name, List part_vals) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public Partition get_partition(String db_name, String tbl_name, List part_vals, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; public Partition exchange_partition(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException; public List exchange_partitions(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException; - public Partition get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public Partition get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public Partition get_partition_by_name(String db_name, String tbl_name, String part_name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public Partition get_partition_by_name(String db_name, String tbl_name, String part_name, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public List get_partitions(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + public List get_partitions(String db_name, String tbl_name, short max_parts, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; - public List get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + public List get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; - public List get_partitions_pspec(String db_name, String tbl_name, int max_parts) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + public List get_partitions_pspec(String db_name, String tbl_name, int max_parts, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; - public List get_partition_names(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + public List get_partition_names(String db_name, String tbl_name, short max_parts, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; public PartitionValuesResponse get_partition_values(PartitionValuesRequest request) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public List get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public List get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public List get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + public List get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; - public List get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public List get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public List get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public List get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public List get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public List get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; public PartitionsByExprResult get_partitions_by_expr(PartitionsByExprRequest req) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public int get_num_partitions_by_filter(String db_name, String tbl_name, String filter) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public int get_num_partitions_by_filter(String db_name, String tbl_name, String filter, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public List get_partitions_by_names(String db_name, String tbl_name, List names) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public List get_partitions_by_names(String db_name, String tbl_name, List names, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; public GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNamesRequest req) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; @@ -260,9 +260,9 @@ public SetPartitionsStatsResponse update_partition_column_statistics_req(SetPartitionsStatsRequest req) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException; - public ColumnStatistics get_table_column_statistics(String db_name, String tbl_name, String col_name) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException; + public ColumnStatistics get_table_column_statistics(String db_name, String tbl_name, String col_name, String validWriteIdList) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException; - public ColumnStatistics get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException; + public ColumnStatistics get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, String validWriteIdList) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException; public TableStatsResult get_table_statistics_req(TableStatsRequest request) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; @@ -530,13 +530,13 @@ public void get_type_all(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_fields(String db_name, String table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_fields(String db_name, String table_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_schema(String db_name, String table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_schema(String db_name, String table_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void create_table(Table tbl, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -578,7 +578,7 @@ public void get_all_tables(String db_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_table(String dbname, String tbl_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_table(String dbname, String tbl_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_table_objects_by_name(String dbname, List tbl_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -630,41 +630,41 @@ public void drop_partitions_req(DropPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partition(String db_name, String tbl_name, List part_vals, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partition(String db_name, String tbl_name, List part_vals, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void exchange_partition(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void exchange_partitions(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partition_by_name(String db_name, String tbl_name, String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partition_by_name(String db_name, String tbl_name, String part_name, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partitions(String db_name, String tbl_name, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partitions(String db_name, String tbl_name, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partitions_pspec(String db_name, String tbl_name, int max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partitions_pspec(String db_name, String tbl_name, int max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partition_names(String db_name, String tbl_name, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partition_names(String db_name, String tbl_name, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_partition_values(PartitionValuesRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_partitions_by_expr(PartitionsByExprRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_num_partitions_by_filter(String db_name, String tbl_name, String filter, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_num_partitions_by_filter(String db_name, String tbl_name, String filter, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partitions_by_names(String db_name, String tbl_name, List names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partitions_by_names(String db_name, String tbl_name, List names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_partitions_by_names_req(GetPartitionsByNamesRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -714,9 +714,9 @@ public void update_partition_column_statistics_req(SetPartitionsStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_table_column_statistics(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_table_column_statistics(String db_name, String tbl_name, String col_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_table_statistics_req(TableStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -1442,17 +1442,18 @@ public void send_get_type_all(String name) throws org.apache.thrift.TException throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_type_all failed: unknown result"); } - public List get_fields(String db_name, String table_name) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException + public List get_fields(String db_name, String table_name, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException { - send_get_fields(db_name, table_name); + send_get_fields(db_name, table_name, validWriteIdList); return recv_get_fields(); } - public void send_get_fields(String db_name, String table_name) throws org.apache.thrift.TException + public void send_get_fields(String db_name, String table_name, String validWriteIdList) throws org.apache.thrift.TException { get_fields_args args = new get_fields_args(); args.setDb_name(db_name); args.setTable_name(table_name); + args.setValidWriteIdList(validWriteIdList); sendBase("get_fields", args); } @@ -1475,18 +1476,19 @@ public void send_get_fields(String db_name, String table_name) throws org.apache throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_fields failed: unknown result"); } - public List get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException + public List get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException { - send_get_fields_with_environment_context(db_name, table_name, environment_context); + send_get_fields_with_environment_context(db_name, table_name, environment_context, validWriteIdList); return recv_get_fields_with_environment_context(); } - public void send_get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context) throws org.apache.thrift.TException + public void send_get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList) throws org.apache.thrift.TException { get_fields_with_environment_context_args args = new get_fields_with_environment_context_args(); args.setDb_name(db_name); args.setTable_name(table_name); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); sendBase("get_fields_with_environment_context", args); } @@ -1509,17 +1511,18 @@ public void send_get_fields_with_environment_context(String db_name, String tabl throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_fields_with_environment_context failed: unknown result"); } - public List get_schema(String db_name, String table_name) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException + public List get_schema(String db_name, String table_name, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException { - send_get_schema(db_name, table_name); + send_get_schema(db_name, table_name, validWriteIdList); return recv_get_schema(); } - public void send_get_schema(String db_name, String table_name) throws org.apache.thrift.TException + public void send_get_schema(String db_name, String table_name, String validWriteIdList) throws org.apache.thrift.TException { get_schema_args args = new get_schema_args(); args.setDb_name(db_name); args.setTable_name(table_name); + args.setValidWriteIdList(validWriteIdList); sendBase("get_schema", args); } @@ -1542,18 +1545,19 @@ public void send_get_schema(String db_name, String table_name) throws org.apache throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_schema failed: unknown result"); } - public List get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException + public List get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException { - send_get_schema_with_environment_context(db_name, table_name, environment_context); + send_get_schema_with_environment_context(db_name, table_name, environment_context, validWriteIdList); return recv_get_schema_with_environment_context(); } - public void send_get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context) throws org.apache.thrift.TException + public void send_get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList) throws org.apache.thrift.TException { get_schema_with_environment_context_args args = new get_schema_with_environment_context_args(); args.setDb_name(db_name); args.setTable_name(table_name); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); sendBase("get_schema_with_environment_context", args); } @@ -2129,17 +2133,18 @@ public void send_get_all_tables(String db_name) throws org.apache.thrift.TExcept throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_all_tables failed: unknown result"); } - public Table get_table(String dbname, String tbl_name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + public Table get_table(String dbname, String tbl_name, String validWriteIdList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_get_table(dbname, tbl_name); + send_get_table(dbname, tbl_name, validWriteIdList); return recv_get_table(); } - public void send_get_table(String dbname, String tbl_name) throws org.apache.thrift.TException + public void send_get_table(String dbname, String tbl_name, String validWriteIdList) throws org.apache.thrift.TException { get_table_args args = new get_table_args(); args.setDbname(dbname); args.setTbl_name(tbl_name); + args.setValidWriteIdList(validWriteIdList); sendBase("get_table", args); } @@ -2942,18 +2947,19 @@ public DropPartitionsResult recv_drop_partitions_req() throws NoSuchObjectExcept throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "drop_partitions_req failed: unknown result"); } - public Partition get_partition(String db_name, String tbl_name, List part_vals) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + public Partition get_partition(String db_name, String tbl_name, List part_vals, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_get_partition(db_name, tbl_name, part_vals); + send_get_partition(db_name, tbl_name, part_vals, validTxnList); return recv_get_partition(); } - public void send_get_partition(String db_name, String tbl_name, List part_vals) throws org.apache.thrift.TException + public void send_get_partition(String db_name, String tbl_name, List part_vals, String validTxnList) throws org.apache.thrift.TException { get_partition_args args = new get_partition_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_vals(part_vals); + args.setValidTxnList(validTxnList); sendBase("get_partition", args); } @@ -3051,13 +3057,13 @@ public void send_exchange_partitions(Map partitionSpecs, String s throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "exchange_partitions failed: unknown result"); } - public Partition get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + public Partition get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names); + send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names, validTxnList); return recv_get_partition_with_auth(); } - public void send_get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names) throws org.apache.thrift.TException + public void send_get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names, String validTxnList) throws org.apache.thrift.TException { get_partition_with_auth_args args = new get_partition_with_auth_args(); args.setDb_name(db_name); @@ -3065,6 +3071,7 @@ public void send_get_partition_with_auth(String db_name, String tbl_name, List get_partitions(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, org.apache.thrift.TException + public List get_partitions(String db_name, String tbl_name, short max_parts, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException { - send_get_partitions(db_name, tbl_name, max_parts); + send_get_partitions(db_name, tbl_name, max_parts, validTxnList); return recv_get_partitions(); } - public void send_get_partitions(String db_name, String tbl_name, short max_parts) throws org.apache.thrift.TException + public void send_get_partitions(String db_name, String tbl_name, short max_parts, String validTxnList) throws org.apache.thrift.TException { get_partitions_args args = new get_partitions_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); sendBase("get_partitions", args); } @@ -3146,13 +3155,13 @@ public void send_get_partitions(String db_name, String tbl_name, short max_parts throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions failed: unknown result"); } - public List get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names) throws NoSuchObjectException, MetaException, org.apache.thrift.TException + public List get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException { - send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names); + send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names, validTxnList); return recv_get_partitions_with_auth(); } - public void send_get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names) throws org.apache.thrift.TException + public void send_get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names, String validTxnList) throws org.apache.thrift.TException { get_partitions_with_auth_args args = new get_partitions_with_auth_args(); args.setDb_name(db_name); @@ -3160,6 +3169,7 @@ public void send_get_partitions_with_auth(String db_name, String tbl_name, short args.setMax_parts(max_parts); args.setUser_name(user_name); args.setGroup_names(group_names); + args.setValidTxnList(validTxnList); sendBase("get_partitions_with_auth", args); } @@ -3179,18 +3189,19 @@ public void send_get_partitions_with_auth(String db_name, String tbl_name, short throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions_with_auth failed: unknown result"); } - public List get_partitions_pspec(String db_name, String tbl_name, int max_parts) throws NoSuchObjectException, MetaException, org.apache.thrift.TException + public List get_partitions_pspec(String db_name, String tbl_name, int max_parts, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException { - send_get_partitions_pspec(db_name, tbl_name, max_parts); + send_get_partitions_pspec(db_name, tbl_name, max_parts, validTxnList); return recv_get_partitions_pspec(); } - public void send_get_partitions_pspec(String db_name, String tbl_name, int max_parts) throws org.apache.thrift.TException + public void send_get_partitions_pspec(String db_name, String tbl_name, int max_parts, String validTxnList) throws org.apache.thrift.TException { get_partitions_pspec_args args = new get_partitions_pspec_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); sendBase("get_partitions_pspec", args); } @@ -3210,18 +3221,19 @@ public void send_get_partitions_pspec(String db_name, String tbl_name, int max_p throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions_pspec failed: unknown result"); } - public List get_partition_names(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, org.apache.thrift.TException + public List get_partition_names(String db_name, String tbl_name, short max_parts, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException { - send_get_partition_names(db_name, tbl_name, max_parts); + send_get_partition_names(db_name, tbl_name, max_parts, validTxnList); return recv_get_partition_names(); } - public void send_get_partition_names(String db_name, String tbl_name, short max_parts) throws org.apache.thrift.TException + public void send_get_partition_names(String db_name, String tbl_name, short max_parts, String validTxnList) throws org.apache.thrift.TException { get_partition_names_args args = new get_partition_names_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); sendBase("get_partition_names", args); } @@ -3270,19 +3282,20 @@ public PartitionValuesResponse recv_get_partition_values() throws MetaException, throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partition_values failed: unknown result"); } - public List get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + public List get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts); + send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts, validTxnList); return recv_get_partitions_ps(); } - public void send_get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws org.apache.thrift.TException + public void send_get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList) throws org.apache.thrift.TException { get_partitions_ps_args args = new get_partitions_ps_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_vals(part_vals); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); sendBase("get_partitions_ps", args); } @@ -3302,13 +3315,13 @@ public void send_get_partitions_ps(String db_name, String tbl_name, List throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions_ps failed: unknown result"); } - public List get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names) throws NoSuchObjectException, MetaException, org.apache.thrift.TException + public List get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names, String validTxnList) throws NoSuchObjectException, MetaException, org.apache.thrift.TException { - send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names); + send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names, validTxnList); return recv_get_partitions_ps_with_auth(); } - public void send_get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names) throws org.apache.thrift.TException + public void send_get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names, String validTxnList) throws org.apache.thrift.TException { get_partitions_ps_with_auth_args args = new get_partitions_ps_with_auth_args(); args.setDb_name(db_name); @@ -3317,6 +3330,7 @@ public void send_get_partitions_ps_with_auth(String db_name, String tbl_name, Li args.setMax_parts(max_parts); args.setUser_name(user_name); args.setGroup_names(group_names); + args.setValidTxnList(validTxnList); sendBase("get_partitions_ps_with_auth", args); } @@ -3336,19 +3350,20 @@ public void send_get_partitions_ps_with_auth(String db_name, String tbl_name, Li throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions_ps_with_auth failed: unknown result"); } - public List get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + public List get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts); + send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts, validTxnList); return recv_get_partition_names_ps(); } - public void send_get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts) throws org.apache.thrift.TException + public void send_get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList) throws org.apache.thrift.TException { get_partition_names_ps_args args = new get_partition_names_ps_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_vals(part_vals); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); sendBase("get_partition_names_ps", args); } @@ -3368,19 +3383,20 @@ public void send_get_partition_names_ps(String db_name, String tbl_name, List get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + public List get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts); + send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts, validTxnList); return recv_get_partitions_by_filter(); } - public void send_get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts) throws org.apache.thrift.TException + public void send_get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, String validTxnList) throws org.apache.thrift.TException { get_partitions_by_filter_args args = new get_partitions_by_filter_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setFilter(filter); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); sendBase("get_partitions_by_filter", args); } @@ -3400,19 +3416,20 @@ public void send_get_partitions_by_filter(String db_name, String tbl_name, Strin throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions_by_filter failed: unknown result"); } - public List get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + public List get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts); + send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts, validTxnList); return recv_get_part_specs_by_filter(); } - public void send_get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts) throws org.apache.thrift.TException + public void send_get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, String validTxnList) throws org.apache.thrift.TException { get_part_specs_by_filter_args args = new get_part_specs_by_filter_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setFilter(filter); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); sendBase("get_part_specs_by_filter", args); } @@ -3461,18 +3478,19 @@ public PartitionsByExprResult recv_get_partitions_by_expr() throws MetaException throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions_by_expr failed: unknown result"); } - public int get_num_partitions_by_filter(String db_name, String tbl_name, String filter) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + public int get_num_partitions_by_filter(String db_name, String tbl_name, String filter, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_get_num_partitions_by_filter(db_name, tbl_name, filter); + send_get_num_partitions_by_filter(db_name, tbl_name, filter, validTxnList); return recv_get_num_partitions_by_filter(); } - public void send_get_num_partitions_by_filter(String db_name, String tbl_name, String filter) throws org.apache.thrift.TException + public void send_get_num_partitions_by_filter(String db_name, String tbl_name, String filter, String validTxnList) throws org.apache.thrift.TException { get_num_partitions_by_filter_args args = new get_num_partitions_by_filter_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setFilter(filter); + args.setValidTxnList(validTxnList); sendBase("get_num_partitions_by_filter", args); } @@ -3492,18 +3510,19 @@ public int recv_get_num_partitions_by_filter() throws MetaException, NoSuchObjec throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_num_partitions_by_filter failed: unknown result"); } - public List get_partitions_by_names(String db_name, String tbl_name, List names) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + public List get_partitions_by_names(String db_name, String tbl_name, List names, String validTxnList) throws MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_get_partitions_by_names(db_name, tbl_name, names); + send_get_partitions_by_names(db_name, tbl_name, names, validTxnList); return recv_get_partitions_by_names(); } - public void send_get_partitions_by_names(String db_name, String tbl_name, List names) throws org.apache.thrift.TException + public void send_get_partitions_by_names(String db_name, String tbl_name, List names, String validTxnList) throws org.apache.thrift.TException { get_partitions_by_names_args args = new get_partitions_by_names_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setNames(names); + args.setValidTxnList(validTxnList); sendBase("get_partitions_by_names", args); } @@ -4258,18 +4277,19 @@ public SetPartitionsStatsResponse recv_update_partition_column_statistics_req() throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "update_partition_column_statistics_req failed: unknown result"); } - public ColumnStatistics get_table_column_statistics(String db_name, String tbl_name, String col_name) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException + public ColumnStatistics get_table_column_statistics(String db_name, String tbl_name, String col_name, String validWriteIdList) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException { - send_get_table_column_statistics(db_name, tbl_name, col_name); + send_get_table_column_statistics(db_name, tbl_name, col_name, validWriteIdList); return recv_get_table_column_statistics(); } - public void send_get_table_column_statistics(String db_name, String tbl_name, String col_name) throws org.apache.thrift.TException + public void send_get_table_column_statistics(String db_name, String tbl_name, String col_name, String validWriteIdList) throws org.apache.thrift.TException { get_table_column_statistics_args args = new get_table_column_statistics_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setCol_name(col_name); + args.setValidWriteIdList(validWriteIdList); sendBase("get_table_column_statistics", args); } @@ -4295,19 +4315,20 @@ public ColumnStatistics recv_get_table_column_statistics() throws NoSuchObjectEx throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_table_column_statistics failed: unknown result"); } - public ColumnStatistics get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException + public ColumnStatistics get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, String validWriteIdList) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException { - send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name); + send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name, validWriteIdList); return recv_get_partition_column_statistics(); } - public void send_get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name) throws org.apache.thrift.TException + public void send_get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, String validWriteIdList) throws org.apache.thrift.TException { get_partition_column_statistics_args args = new get_partition_column_statistics_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_name(part_name); args.setCol_name(col_name); + args.setValidWriteIdList(validWriteIdList); sendBase("get_partition_column_statistics", args); } @@ -7950,9 +7971,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_fields(String db_name, String table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_fields(String db_name, String table_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_fields_call method_call = new get_fields_call(db_name, table_name, resultHandler, this, ___protocolFactory, ___transport); + get_fields_call method_call = new get_fields_call(db_name, table_name, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -7960,10 +7981,12 @@ public void get_fields(String db_name, String table_name, org.apache.thrift.asyn @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_fields_call extends org.apache.thrift.async.TAsyncMethodCall { private String db_name; private String table_name; - public get_fields_call(String db_name, String table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public get_fields_call(String db_name, String table_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.table_name = table_name; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -7971,6 +7994,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa get_fields_args args = new get_fields_args(); args.setDb_name(db_name); args.setTable_name(table_name); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -7985,9 +8009,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_fields_with_environment_context_call method_call = new get_fields_with_environment_context_call(db_name, table_name, environment_context, resultHandler, this, ___protocolFactory, ___transport); + get_fields_with_environment_context_call method_call = new get_fields_with_environment_context_call(db_name, table_name, environment_context, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -7996,11 +8020,13 @@ public void get_fields_with_environment_context(String db_name, String table_nam private String db_name; private String table_name; private EnvironmentContext environment_context; - public get_fields_with_environment_context_call(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public get_fields_with_environment_context_call(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.table_name = table_name; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -8009,6 +8035,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTable_name(table_name); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -8023,9 +8050,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_schema(String db_name, String table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_schema(String db_name, String table_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_schema_call method_call = new get_schema_call(db_name, table_name, resultHandler, this, ___protocolFactory, ___transport); + get_schema_call method_call = new get_schema_call(db_name, table_name, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -8033,10 +8060,12 @@ public void get_schema(String db_name, String table_name, org.apache.thrift.asyn @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_schema_call extends org.apache.thrift.async.TAsyncMethodCall { private String db_name; private String table_name; - public get_schema_call(String db_name, String table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public get_schema_call(String db_name, String table_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.table_name = table_name; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -8044,6 +8073,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa get_schema_args args = new get_schema_args(); args.setDb_name(db_name); args.setTable_name(table_name); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -8058,9 +8088,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_schema_with_environment_context_call method_call = new get_schema_with_environment_context_call(db_name, table_name, environment_context, resultHandler, this, ___protocolFactory, ___transport); + get_schema_with_environment_context_call method_call = new get_schema_with_environment_context_call(db_name, table_name, environment_context, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -8069,11 +8099,13 @@ public void get_schema_with_environment_context(String db_name, String table_nam private String db_name; private String table_name; private EnvironmentContext environment_context; - public get_schema_with_environment_context_call(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public get_schema_with_environment_context_call(String db_name, String table_name, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.table_name = table_name; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -8082,6 +8114,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTable_name(table_name); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -8790,9 +8823,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_table(String dbname, String tbl_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_table(String dbname, String tbl_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_table_call method_call = new get_table_call(dbname, tbl_name, resultHandler, this, ___protocolFactory, ___transport); + get_table_call method_call = new get_table_call(dbname, tbl_name, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -8800,10 +8833,12 @@ public void get_table(String dbname, String tbl_name, org.apache.thrift.async.As @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_call extends org.apache.thrift.async.TAsyncMethodCall { private String dbname; private String tbl_name; - public get_table_call(String dbname, String tbl_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public get_table_call(String dbname, String tbl_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.dbname = dbname; this.tbl_name = tbl_name; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -8811,6 +8846,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa get_table_args args = new get_table_args(); args.setDbname(dbname); args.setTbl_name(tbl_name); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -9745,9 +9781,9 @@ public DropPartitionsResult getResult() throws NoSuchObjectException, MetaExcept } } - public void get_partition(String db_name, String tbl_name, List part_vals, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partition(String db_name, String tbl_name, List part_vals, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partition_call method_call = new get_partition_call(db_name, tbl_name, part_vals, resultHandler, this, ___protocolFactory, ___transport); + get_partition_call method_call = new get_partition_call(db_name, tbl_name, part_vals, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -9756,11 +9792,13 @@ public void get_partition(String db_name, String tbl_name, List part_val private String db_name; private String tbl_name; private List part_vals; - public get_partition_call(String db_name, String tbl_name, List part_vals, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partition_call(String db_name, String tbl_name, List part_vals, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.part_vals = part_vals; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -9769,6 +9807,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_vals(part_vals); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -9871,9 +9910,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partition_with_auth_call method_call = new get_partition_with_auth_call(db_name, tbl_name, part_vals, user_name, group_names, resultHandler, this, ___protocolFactory, ___transport); + get_partition_with_auth_call method_call = new get_partition_with_auth_call(db_name, tbl_name, part_vals, user_name, group_names, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -9884,13 +9923,15 @@ public void get_partition_with_auth(String db_name, String tbl_name, List part_vals; private String user_name; private List group_names; - public get_partition_with_auth_call(String db_name, String tbl_name, List part_vals, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partition_with_auth_call(String db_name, String tbl_name, List part_vals, String user_name, List group_names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.part_vals = part_vals; this.user_name = user_name; this.group_names = group_names; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -9901,6 +9942,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setPart_vals(part_vals); args.setUser_name(user_name); args.setGroup_names(group_names); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -9915,9 +9957,9 @@ public Partition getResult() throws MetaException, NoSuchObjectException, org.ap } } - public void get_partition_by_name(String db_name, String tbl_name, String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partition_by_name(String db_name, String tbl_name, String part_name, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partition_by_name_call method_call = new get_partition_by_name_call(db_name, tbl_name, part_name, resultHandler, this, ___protocolFactory, ___transport); + get_partition_by_name_call method_call = new get_partition_by_name_call(db_name, tbl_name, part_name, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -9926,11 +9968,13 @@ public void get_partition_by_name(String db_name, String tbl_name, String part_n private String db_name; private String tbl_name; private String part_name; - public get_partition_by_name_call(String db_name, String tbl_name, String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partition_by_name_call(String db_name, String tbl_name, String part_name, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.part_name = part_name; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -9939,6 +9983,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_name(part_name); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -9953,9 +9998,9 @@ public Partition getResult() throws MetaException, NoSuchObjectException, org.ap } } - public void get_partitions(String db_name, String tbl_name, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partitions(String db_name, String tbl_name, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partitions_call method_call = new get_partitions_call(db_name, tbl_name, max_parts, resultHandler, this, ___protocolFactory, ___transport); + get_partitions_call method_call = new get_partitions_call(db_name, tbl_name, max_parts, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -9964,11 +10009,13 @@ public void get_partitions(String db_name, String tbl_name, short max_parts, org private String db_name; private String tbl_name; private short max_parts; - public get_partitions_call(String db_name, String tbl_name, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partitions_call(String db_name, String tbl_name, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.max_parts = max_parts; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -9977,6 +10024,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -9991,9 +10039,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List group_names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partitions_with_auth_call method_call = new get_partitions_with_auth_call(db_name, tbl_name, max_parts, user_name, group_names, resultHandler, this, ___protocolFactory, ___transport); + get_partitions_with_auth_call method_call = new get_partitions_with_auth_call(db_name, tbl_name, max_parts, user_name, group_names, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10004,13 +10052,15 @@ public void get_partitions_with_auth(String db_name, String tbl_name, short max_ private short max_parts; private String user_name; private List group_names; - public get_partitions_with_auth_call(String db_name, String tbl_name, short max_parts, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partitions_with_auth_call(String db_name, String tbl_name, short max_parts, String user_name, List group_names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.max_parts = max_parts; this.user_name = user_name; this.group_names = group_names; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10021,6 +10071,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setMax_parts(max_parts); args.setUser_name(user_name); args.setGroup_names(group_names); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -10035,9 +10086,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_partitions_pspec(String db_name, String tbl_name, int max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partitions_pspec(String db_name, String tbl_name, int max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partitions_pspec_call method_call = new get_partitions_pspec_call(db_name, tbl_name, max_parts, resultHandler, this, ___protocolFactory, ___transport); + get_partitions_pspec_call method_call = new get_partitions_pspec_call(db_name, tbl_name, max_parts, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10046,11 +10097,13 @@ public void get_partitions_pspec(String db_name, String tbl_name, int max_parts, private String db_name; private String tbl_name; private int max_parts; - public get_partitions_pspec_call(String db_name, String tbl_name, int max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partitions_pspec_call(String db_name, String tbl_name, int max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.max_parts = max_parts; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10059,6 +10112,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -10073,9 +10127,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_partition_names(String db_name, String tbl_name, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partition_names(String db_name, String tbl_name, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partition_names_call method_call = new get_partition_names_call(db_name, tbl_name, max_parts, resultHandler, this, ___protocolFactory, ___transport); + get_partition_names_call method_call = new get_partition_names_call(db_name, tbl_name, max_parts, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10084,11 +10138,13 @@ public void get_partition_names(String db_name, String tbl_name, short max_parts private String db_name; private String tbl_name; private short max_parts; - public get_partition_names_call(String db_name, String tbl_name, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partition_names_call(String db_name, String tbl_name, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.max_parts = max_parts; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10097,6 +10153,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -10143,9 +10200,9 @@ public PartitionValuesResponse getResult() throws MetaException, NoSuchObjectExc } } - public void get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partitions_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partitions_ps_call method_call = new get_partitions_ps_call(db_name, tbl_name, part_vals, max_parts, resultHandler, this, ___protocolFactory, ___transport); + get_partitions_ps_call method_call = new get_partitions_ps_call(db_name, tbl_name, part_vals, max_parts, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10155,12 +10212,14 @@ public void get_partitions_ps(String db_name, String tbl_name, List part private String tbl_name; private List part_vals; private short max_parts; - public get_partitions_ps_call(String db_name, String tbl_name, List part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partitions_ps_call(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.part_vals = part_vals; this.max_parts = max_parts; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10170,6 +10229,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setTbl_name(tbl_name); args.setPart_vals(part_vals); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -10184,9 +10244,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partitions_ps_with_auth(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partitions_ps_with_auth_call method_call = new get_partitions_ps_with_auth_call(db_name, tbl_name, part_vals, max_parts, user_name, group_names, resultHandler, this, ___protocolFactory, ___transport); + get_partitions_ps_with_auth_call method_call = new get_partitions_ps_with_auth_call(db_name, tbl_name, part_vals, max_parts, user_name, group_names, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10198,7 +10258,8 @@ public void get_partitions_ps_with_auth(String db_name, String tbl_name, List group_names; - public get_partitions_ps_with_auth_call(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partitions_ps_with_auth_call(String db_name, String tbl_name, List part_vals, short max_parts, String user_name, List group_names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; @@ -10206,6 +10267,7 @@ public get_partitions_ps_with_auth_call(String db_name, String tbl_name, List part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partition_names_ps(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partition_names_ps_call method_call = new get_partition_names_ps_call(db_name, tbl_name, part_vals, max_parts, resultHandler, this, ___protocolFactory, ___transport); + get_partition_names_ps_call method_call = new get_partition_names_ps_call(db_name, tbl_name, part_vals, max_parts, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10243,12 +10306,14 @@ public void get_partition_names_ps(String db_name, String tbl_name, List private String tbl_name; private List part_vals; private short max_parts; - public get_partition_names_ps_call(String db_name, String tbl_name, List part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partition_names_ps_call(String db_name, String tbl_name, List part_vals, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.part_vals = part_vals; this.max_parts = max_parts; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10258,6 +10323,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setTbl_name(tbl_name); args.setPart_vals(part_vals); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -10272,9 +10338,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partitions_by_filter_call method_call = new get_partitions_by_filter_call(db_name, tbl_name, filter, max_parts, resultHandler, this, ___protocolFactory, ___transport); + get_partitions_by_filter_call method_call = new get_partitions_by_filter_call(db_name, tbl_name, filter, max_parts, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10284,12 +10350,14 @@ public void get_partitions_by_filter(String db_name, String tbl_name, String fil private String tbl_name; private String filter; private short max_parts; - public get_partitions_by_filter_call(String db_name, String tbl_name, String filter, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partitions_by_filter_call(String db_name, String tbl_name, String filter, short max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.filter = filter; this.max_parts = max_parts; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10299,6 +10367,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setTbl_name(tbl_name); args.setFilter(filter); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -10313,9 +10382,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_part_specs_by_filter_call method_call = new get_part_specs_by_filter_call(db_name, tbl_name, filter, max_parts, resultHandler, this, ___protocolFactory, ___transport); + get_part_specs_by_filter_call method_call = new get_part_specs_by_filter_call(db_name, tbl_name, filter, max_parts, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10325,12 +10394,14 @@ public void get_part_specs_by_filter(String db_name, String tbl_name, String fil private String tbl_name; private String filter; private int max_parts; - public get_part_specs_by_filter_call(String db_name, String tbl_name, String filter, int max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_part_specs_by_filter_call(String db_name, String tbl_name, String filter, int max_parts, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.filter = filter; this.max_parts = max_parts; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10340,6 +10411,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setTbl_name(tbl_name); args.setFilter(filter); args.setMax_parts(max_parts); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -10386,9 +10458,9 @@ public PartitionsByExprResult getResult() throws MetaException, NoSuchObjectExce } } - public void get_num_partitions_by_filter(String db_name, String tbl_name, String filter, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_num_partitions_by_filter(String db_name, String tbl_name, String filter, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_num_partitions_by_filter_call method_call = new get_num_partitions_by_filter_call(db_name, tbl_name, filter, resultHandler, this, ___protocolFactory, ___transport); + get_num_partitions_by_filter_call method_call = new get_num_partitions_by_filter_call(db_name, tbl_name, filter, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10397,11 +10469,13 @@ public void get_num_partitions_by_filter(String db_name, String tbl_name, String private String db_name; private String tbl_name; private String filter; - public get_num_partitions_by_filter_call(String db_name, String tbl_name, String filter, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_num_partitions_by_filter_call(String db_name, String tbl_name, String filter, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.filter = filter; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10410,6 +10484,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setFilter(filter); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -10424,9 +10499,9 @@ public int getResult() throws MetaException, NoSuchObjectException, org.apache.t } } - public void get_partitions_by_names(String db_name, String tbl_name, List names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partitions_by_names(String db_name, String tbl_name, List names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partitions_by_names_call method_call = new get_partitions_by_names_call(db_name, tbl_name, names, resultHandler, this, ___protocolFactory, ___transport); + get_partitions_by_names_call method_call = new get_partitions_by_names_call(db_name, tbl_name, names, validTxnList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -10435,11 +10510,13 @@ public void get_partitions_by_names(String db_name, String tbl_name, List names; - public get_partitions_by_names_call(String db_name, String tbl_name, List names, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validTxnList; + public get_partitions_by_names_call(String db_name, String tbl_name, List names, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.names = names; + this.validTxnList = validTxnList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -10448,6 +10525,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setNames(names); + args.setValidTxnList(validTxnList); args.write(prot); prot.writeMessageEnd(); } @@ -11293,9 +11371,9 @@ public SetPartitionsStatsResponse getResult() throws NoSuchObjectException, Inva } } - public void get_table_column_statistics(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_table_column_statistics(String db_name, String tbl_name, String col_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_table_column_statistics_call method_call = new get_table_column_statistics_call(db_name, tbl_name, col_name, resultHandler, this, ___protocolFactory, ___transport); + get_table_column_statistics_call method_call = new get_table_column_statistics_call(db_name, tbl_name, col_name, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -11304,11 +11382,13 @@ public void get_table_column_statistics(String db_name, String tbl_name, String private String db_name; private String tbl_name; private String col_name; - public get_table_column_statistics_call(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public get_table_column_statistics_call(String db_name, String tbl_name, String col_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.col_name = col_name; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -11317,6 +11397,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setCol_name(col_name); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -11331,9 +11412,9 @@ public ColumnStatistics getResult() throws NoSuchObjectException, MetaException, } } - public void get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - get_partition_column_statistics_call method_call = new get_partition_column_statistics_call(db_name, tbl_name, part_name, col_name, resultHandler, this, ___protocolFactory, ___transport); + get_partition_column_statistics_call method_call = new get_partition_column_statistics_call(db_name, tbl_name, part_name, col_name, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -11343,12 +11424,14 @@ public void get_partition_column_statistics(String db_name, String tbl_name, Str private String tbl_name; private String part_name; private String col_name; - public get_partition_column_statistics_call(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public get_partition_column_statistics_call(String db_name, String tbl_name, String part_name, String col_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.part_name = part_name; this.col_name = col_name; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -11358,6 +11441,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setTbl_name(tbl_name); args.setPart_name(part_name); args.setCol_name(col_name); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -15796,7 +15880,7 @@ protected boolean isOneway() { public get_fields_result getResult(I iface, get_fields_args args) throws org.apache.thrift.TException { get_fields_result result = new get_fields_result(); try { - result.success = iface.get_fields(args.db_name, args.table_name); + result.success = iface.get_fields(args.db_name, args.table_name, args.validWriteIdList); } catch (MetaException o1) { result.o1 = o1; } catch (UnknownTableException o2) { @@ -15824,7 +15908,7 @@ protected boolean isOneway() { public get_fields_with_environment_context_result getResult(I iface, get_fields_with_environment_context_args args) throws org.apache.thrift.TException { get_fields_with_environment_context_result result = new get_fields_with_environment_context_result(); try { - result.success = iface.get_fields_with_environment_context(args.db_name, args.table_name, args.environment_context); + result.success = iface.get_fields_with_environment_context(args.db_name, args.table_name, args.environment_context, args.validWriteIdList); } catch (MetaException o1) { result.o1 = o1; } catch (UnknownTableException o2) { @@ -15852,7 +15936,7 @@ protected boolean isOneway() { public get_schema_result getResult(I iface, get_schema_args args) throws org.apache.thrift.TException { get_schema_result result = new get_schema_result(); try { - result.success = iface.get_schema(args.db_name, args.table_name); + result.success = iface.get_schema(args.db_name, args.table_name, args.validWriteIdList); } catch (MetaException o1) { result.o1 = o1; } catch (UnknownTableException o2) { @@ -15880,7 +15964,7 @@ protected boolean isOneway() { public get_schema_with_environment_context_result getResult(I iface, get_schema_with_environment_context_args args) throws org.apache.thrift.TException { get_schema_with_environment_context_result result = new get_schema_with_environment_context_result(); try { - result.success = iface.get_schema_with_environment_context(args.db_name, args.table_name, args.environment_context); + result.success = iface.get_schema_with_environment_context(args.db_name, args.table_name, args.environment_context, args.validWriteIdList); } catch (MetaException o1) { result.o1 = o1; } catch (UnknownTableException o2) { @@ -16424,7 +16508,7 @@ protected boolean isOneway() { public get_table_result getResult(I iface, get_table_args args) throws org.apache.thrift.TException { get_table_result result = new get_table_result(); try { - result.success = iface.get_table(args.dbname, args.tbl_name); + result.success = iface.get_table(args.dbname, args.tbl_name, args.validWriteIdList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -17124,7 +17208,7 @@ protected boolean isOneway() { public get_partition_result getResult(I iface, get_partition_args args) throws org.apache.thrift.TException { get_partition_result result = new get_partition_result(); try { - result.success = iface.get_partition(args.db_name, args.tbl_name, args.part_vals); + result.success = iface.get_partition(args.db_name, args.tbl_name, args.part_vals, args.validTxnList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -17210,7 +17294,7 @@ protected boolean isOneway() { public get_partition_with_auth_result getResult(I iface, get_partition_with_auth_args args) throws org.apache.thrift.TException { get_partition_with_auth_result result = new get_partition_with_auth_result(); try { - result.success = iface.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names); + result.success = iface.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names, args.validTxnList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -17236,7 +17320,7 @@ protected boolean isOneway() { public get_partition_by_name_result getResult(I iface, get_partition_by_name_args args) throws org.apache.thrift.TException { get_partition_by_name_result result = new get_partition_by_name_result(); try { - result.success = iface.get_partition_by_name(args.db_name, args.tbl_name, args.part_name); + result.success = iface.get_partition_by_name(args.db_name, args.tbl_name, args.part_name, args.validTxnList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -17262,7 +17346,7 @@ protected boolean isOneway() { public get_partitions_result getResult(I iface, get_partitions_args args) throws org.apache.thrift.TException { get_partitions_result result = new get_partitions_result(); try { - result.success = iface.get_partitions(args.db_name, args.tbl_name, args.max_parts); + result.success = iface.get_partitions(args.db_name, args.tbl_name, args.max_parts, args.validTxnList); } catch (NoSuchObjectException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -17288,7 +17372,7 @@ protected boolean isOneway() { public get_partitions_with_auth_result getResult(I iface, get_partitions_with_auth_args args) throws org.apache.thrift.TException { get_partitions_with_auth_result result = new get_partitions_with_auth_result(); try { - result.success = iface.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names); + result.success = iface.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names, args.validTxnList); } catch (NoSuchObjectException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -17314,7 +17398,7 @@ protected boolean isOneway() { public get_partitions_pspec_result getResult(I iface, get_partitions_pspec_args args) throws org.apache.thrift.TException { get_partitions_pspec_result result = new get_partitions_pspec_result(); try { - result.success = iface.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts); + result.success = iface.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts, args.validTxnList); } catch (NoSuchObjectException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -17340,7 +17424,7 @@ protected boolean isOneway() { public get_partition_names_result getResult(I iface, get_partition_names_args args) throws org.apache.thrift.TException { get_partition_names_result result = new get_partition_names_result(); try { - result.success = iface.get_partition_names(args.db_name, args.tbl_name, args.max_parts); + result.success = iface.get_partition_names(args.db_name, args.tbl_name, args.max_parts, args.validTxnList); } catch (NoSuchObjectException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -17392,7 +17476,7 @@ protected boolean isOneway() { public get_partitions_ps_result getResult(I iface, get_partitions_ps_args args) throws org.apache.thrift.TException { get_partitions_ps_result result = new get_partitions_ps_result(); try { - result.success = iface.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts); + result.success = iface.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.validTxnList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -17418,7 +17502,7 @@ protected boolean isOneway() { public get_partitions_ps_with_auth_result getResult(I iface, get_partitions_ps_with_auth_args args) throws org.apache.thrift.TException { get_partitions_ps_with_auth_result result = new get_partitions_ps_with_auth_result(); try { - result.success = iface.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names); + result.success = iface.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names, args.validTxnList); } catch (NoSuchObjectException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -17444,7 +17528,7 @@ protected boolean isOneway() { public get_partition_names_ps_result getResult(I iface, get_partition_names_ps_args args) throws org.apache.thrift.TException { get_partition_names_ps_result result = new get_partition_names_ps_result(); try { - result.success = iface.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts); + result.success = iface.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.validTxnList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -17470,7 +17554,7 @@ protected boolean isOneway() { public get_partitions_by_filter_result getResult(I iface, get_partitions_by_filter_args args) throws org.apache.thrift.TException { get_partitions_by_filter_result result = new get_partitions_by_filter_result(); try { - result.success = iface.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts); + result.success = iface.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts, args.validTxnList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -17496,7 +17580,7 @@ protected boolean isOneway() { public get_part_specs_by_filter_result getResult(I iface, get_part_specs_by_filter_args args) throws org.apache.thrift.TException { get_part_specs_by_filter_result result = new get_part_specs_by_filter_result(); try { - result.success = iface.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts); + result.success = iface.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts, args.validTxnList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -17548,7 +17632,7 @@ protected boolean isOneway() { public get_num_partitions_by_filter_result getResult(I iface, get_num_partitions_by_filter_args args) throws org.apache.thrift.TException { get_num_partitions_by_filter_result result = new get_num_partitions_by_filter_result(); try { - result.success = iface.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter); + result.success = iface.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.validTxnList); result.setSuccessIsSet(true); } catch (MetaException o1) { result.o1 = o1; @@ -17575,7 +17659,7 @@ protected boolean isOneway() { public get_partitions_by_names_result getResult(I iface, get_partitions_by_names_args args) throws org.apache.thrift.TException { get_partitions_by_names_result result = new get_partitions_by_names_result(); try { - result.success = iface.get_partitions_by_names(args.db_name, args.tbl_name, args.names); + result.success = iface.get_partitions_by_names(args.db_name, args.tbl_name, args.names, args.validTxnList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -18253,7 +18337,7 @@ protected boolean isOneway() { public get_table_column_statistics_result getResult(I iface, get_table_column_statistics_args args) throws org.apache.thrift.TException { get_table_column_statistics_result result = new get_table_column_statistics_result(); try { - result.success = iface.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name); + result.success = iface.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name, args.validWriteIdList); } catch (NoSuchObjectException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -18283,7 +18367,7 @@ protected boolean isOneway() { public get_partition_column_statistics_result getResult(I iface, get_partition_column_statistics_args args) throws org.apache.thrift.TException { get_partition_column_statistics_result result = new get_partition_column_statistics_result(); try { - result.success = iface.get_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name); + result.success = iface.get_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name, args.validWriteIdList); } catch (NoSuchObjectException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -22435,7 +22519,7 @@ protected boolean isOneway() { } public void start(I iface, get_fields_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_fields(args.db_name, args.table_name,resultHandler); + iface.get_fields(args.db_name, args.table_name, args.validWriteIdList,resultHandler); } } @@ -22502,7 +22586,7 @@ protected boolean isOneway() { } public void start(I iface, get_fields_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_fields_with_environment_context(args.db_name, args.table_name, args.environment_context,resultHandler); + iface.get_fields_with_environment_context(args.db_name, args.table_name, args.environment_context, args.validWriteIdList,resultHandler); } } @@ -22569,7 +22653,7 @@ protected boolean isOneway() { } public void start(I iface, get_schema_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_schema(args.db_name, args.table_name,resultHandler); + iface.get_schema(args.db_name, args.table_name, args.validWriteIdList,resultHandler); } } @@ -22636,7 +22720,7 @@ protected boolean isOneway() { } public void start(I iface, get_schema_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_schema_with_environment_context(args.db_name, args.table_name, args.environment_context,resultHandler); + iface.get_schema_with_environment_context(args.db_name, args.table_name, args.environment_context, args.validWriteIdList,resultHandler); } } @@ -23915,7 +23999,7 @@ protected boolean isOneway() { } public void start(I iface, get_table_args args, org.apache.thrift.async.AsyncMethodCallback
resultHandler) throws TException { - iface.get_table(args.dbname, args.tbl_name,resultHandler); + iface.get_table(args.dbname, args.tbl_name, args.validWriteIdList,resultHandler); } } @@ -25578,7 +25662,7 @@ protected boolean isOneway() { } public void start(I iface, get_partition_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_partition(args.db_name, args.tbl_name, args.part_vals,resultHandler); + iface.get_partition(args.db_name, args.tbl_name, args.part_vals, args.validTxnList,resultHandler); } } @@ -25784,7 +25868,7 @@ protected boolean isOneway() { } public void start(I iface, get_partition_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names,resultHandler); + iface.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names, args.validTxnList,resultHandler); } } @@ -25846,7 +25930,7 @@ protected boolean isOneway() { } public void start(I iface, get_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler); + iface.get_partition_by_name(args.db_name, args.tbl_name, args.part_name, args.validTxnList,resultHandler); } } @@ -25908,7 +25992,7 @@ protected boolean isOneway() { } public void start(I iface, get_partitions_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions(args.db_name, args.tbl_name, args.max_parts,resultHandler); + iface.get_partitions(args.db_name, args.tbl_name, args.max_parts, args.validTxnList,resultHandler); } } @@ -25970,7 +26054,7 @@ protected boolean isOneway() { } public void start(I iface, get_partitions_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names,resultHandler); + iface.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names, args.validTxnList,resultHandler); } } @@ -26032,7 +26116,7 @@ protected boolean isOneway() { } public void start(I iface, get_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts,resultHandler); + iface.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts, args.validTxnList,resultHandler); } } @@ -26094,7 +26178,7 @@ protected boolean isOneway() { } public void start(I iface, get_partition_names_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partition_names(args.db_name, args.tbl_name, args.max_parts,resultHandler); + iface.get_partition_names(args.db_name, args.tbl_name, args.max_parts, args.validTxnList,resultHandler); } } @@ -26218,7 +26302,7 @@ protected boolean isOneway() { } public void start(I iface, get_partitions_ps_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts,resultHandler); + iface.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.validTxnList,resultHandler); } } @@ -26280,7 +26364,7 @@ protected boolean isOneway() { } public void start(I iface, get_partitions_ps_with_auth_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names,resultHandler); + iface.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names, args.validTxnList,resultHandler); } } @@ -26342,7 +26426,7 @@ protected boolean isOneway() { } public void start(I iface, get_partition_names_ps_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts,resultHandler); + iface.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.validTxnList,resultHandler); } } @@ -26404,7 +26488,7 @@ protected boolean isOneway() { } public void start(I iface, get_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler); + iface.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts, args.validTxnList,resultHandler); } } @@ -26466,7 +26550,7 @@ protected boolean isOneway() { } public void start(I iface, get_part_specs_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts,resultHandler); + iface.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts, args.validTxnList,resultHandler); } } @@ -26591,7 +26675,7 @@ protected boolean isOneway() { } public void start(I iface, get_num_partitions_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter,resultHandler); + iface.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.validTxnList,resultHandler); } } @@ -26653,7 +26737,7 @@ protected boolean isOneway() { } public void start(I iface, get_partitions_by_names_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.get_partitions_by_names(args.db_name, args.tbl_name, args.names,resultHandler); + iface.get_partitions_by_names(args.db_name, args.tbl_name, args.names, args.validTxnList,resultHandler); } } @@ -28271,7 +28355,7 @@ protected boolean isOneway() { } public void start(I iface, get_table_column_statistics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name,resultHandler); + iface.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name, args.validWriteIdList,resultHandler); } } @@ -28343,7 +28427,7 @@ protected boolean isOneway() { } public void start(I iface, get_partition_column_statistics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name,resultHandler); + iface.get_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name, args.validWriteIdList,resultHandler); } } @@ -44894,13 +44978,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1080 = iprot.readListBegin(); - struct.success = new ArrayList(_list1080.size); - String _elem1081; - for (int _i1082 = 0; _i1082 < _list1080.size; ++_i1082) + org.apache.thrift.protocol.TList _list1088 = iprot.readListBegin(); + struct.success = new ArrayList(_list1088.size); + String _elem1089; + for (int _i1090 = 0; _i1090 < _list1088.size; ++_i1090) { - _elem1081 = iprot.readString(); - struct.success.add(_elem1081); + _elem1089 = iprot.readString(); + struct.success.add(_elem1089); } iprot.readListEnd(); } @@ -44935,9 +45019,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1083 : struct.success) + for (String _iter1091 : struct.success) { - oprot.writeString(_iter1083); + oprot.writeString(_iter1091); } oprot.writeListEnd(); } @@ -44976,9 +45060,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1084 : struct.success) + for (String _iter1092 : struct.success) { - oprot.writeString(_iter1084); + oprot.writeString(_iter1092); } } } @@ -44993,13 +45077,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1085 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1085.size); - String _elem1086; - for (int _i1087 = 0; _i1087 < _list1085.size; ++_i1087) + org.apache.thrift.protocol.TList _list1093 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1093.size); + String _elem1094; + for (int _i1095 = 0; _i1095 < _list1093.size; ++_i1095) { - _elem1086 = iprot.readString(); - struct.success.add(_elem1086); + _elem1094 = iprot.readString(); + struct.success.add(_elem1094); } } struct.setSuccessIsSet(true); @@ -45653,13 +45737,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1088 = iprot.readListBegin(); - struct.success = new ArrayList(_list1088.size); - String _elem1089; - for (int _i1090 = 0; _i1090 < _list1088.size; ++_i1090) + org.apache.thrift.protocol.TList _list1096 = iprot.readListBegin(); + struct.success = new ArrayList(_list1096.size); + String _elem1097; + for (int _i1098 = 0; _i1098 < _list1096.size; ++_i1098) { - _elem1089 = iprot.readString(); - struct.success.add(_elem1089); + _elem1097 = iprot.readString(); + struct.success.add(_elem1097); } iprot.readListEnd(); } @@ -45694,9 +45778,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1091 : struct.success) + for (String _iter1099 : struct.success) { - oprot.writeString(_iter1091); + oprot.writeString(_iter1099); } oprot.writeListEnd(); } @@ -45735,9 +45819,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1092 : struct.success) + for (String _iter1100 : struct.success) { - oprot.writeString(_iter1092); + oprot.writeString(_iter1100); } } } @@ -45752,13 +45836,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1093 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1093.size); - String _elem1094; - for (int _i1095 = 0; _i1095 < _list1093.size; ++_i1095) + org.apache.thrift.protocol.TList _list1101 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1101.size); + String _elem1102; + for (int _i1103 = 0; _i1103 < _list1101.size; ++_i1103) { - _elem1094 = iprot.readString(); - struct.success.add(_elem1094); + _elem1102 = iprot.readString(); + struct.success.add(_elem1102); } } struct.setSuccessIsSet(true); @@ -50365,16 +50449,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1096 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1096.size); - String _key1097; - Type _val1098; - for (int _i1099 = 0; _i1099 < _map1096.size; ++_i1099) + org.apache.thrift.protocol.TMap _map1104 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1104.size); + String _key1105; + Type _val1106; + for (int _i1107 = 0; _i1107 < _map1104.size; ++_i1107) { - _key1097 = iprot.readString(); - _val1098 = new Type(); - _val1098.read(iprot); - struct.success.put(_key1097, _val1098); + _key1105 = iprot.readString(); + _val1106 = new Type(); + _val1106.read(iprot); + struct.success.put(_key1105, _val1106); } iprot.readMapEnd(); } @@ -50409,10 +50493,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter1100 : struct.success.entrySet()) + for (Map.Entry _iter1108 : struct.success.entrySet()) { - oprot.writeString(_iter1100.getKey()); - _iter1100.getValue().write(oprot); + oprot.writeString(_iter1108.getKey()); + _iter1108.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -50451,10 +50535,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1101 : struct.success.entrySet()) + for (Map.Entry _iter1109 : struct.success.entrySet()) { - oprot.writeString(_iter1101.getKey()); - _iter1101.getValue().write(oprot); + oprot.writeString(_iter1109.getKey()); + _iter1109.getValue().write(oprot); } } } @@ -50469,16 +50553,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1102 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map1102.size); - String _key1103; - Type _val1104; - for (int _i1105 = 0; _i1105 < _map1102.size; ++_i1105) + org.apache.thrift.protocol.TMap _map1110 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map1110.size); + String _key1111; + Type _val1112; + for (int _i1113 = 0; _i1113 < _map1110.size; ++_i1113) { - _key1103 = iprot.readString(); - _val1104 = new Type(); - _val1104.read(iprot); - struct.success.put(_key1103, _val1104); + _key1111 = iprot.readString(); + _val1112 = new Type(); + _val1112.read(iprot); + struct.success.put(_key1111, _val1112); } } struct.setSuccessIsSet(true); @@ -50498,6 +50582,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -50507,11 +50592,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result private String db_name; // required private String table_name; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), - TABLE_NAME((short)2, "table_name"); + TABLE_NAME((short)2, "table_name"), + VALID_WRITE_ID_LIST((short)3, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -50530,6 +50617,8 @@ public static _Fields findByThriftId(int fieldId) { return DB_NAME; case 2: // TABLE_NAME return TABLE_NAME; + case 3: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -50577,6 +50666,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_fields_args.class, metaDataMap); } @@ -50586,11 +50677,13 @@ public get_fields_args() { public get_fields_args( String db_name, - String table_name) + String table_name, + String validWriteIdList) { this(); this.db_name = db_name; this.table_name = table_name; + this.validWriteIdList = validWriteIdList; } /** @@ -50603,6 +50696,9 @@ public get_fields_args(get_fields_args other) { if (other.isSetTable_name()) { this.table_name = other.table_name; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public get_fields_args deepCopy() { @@ -50613,6 +50709,7 @@ public get_fields_args deepCopy() { public void clear() { this.db_name = null; this.table_name = null; + this.validWriteIdList = null; } public String getDb_name() { @@ -50661,6 +50758,29 @@ public void setTable_nameIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -50679,6 +50799,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -50690,6 +50818,9 @@ public Object getFieldValue(_Fields field) { case TABLE_NAME: return getTable_name(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -50705,6 +50836,8 @@ public boolean isSet(_Fields field) { return isSetDb_name(); case TABLE_NAME: return isSetTable_name(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -50740,6 +50873,15 @@ public boolean equals(get_fields_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -50757,6 +50899,11 @@ public int hashCode() { if (present_table_name) list.add(table_name); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -50788,6 +50935,16 @@ public int compareTo(get_fields_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -50823,6 +50980,14 @@ public String toString() { sb.append(this.table_name); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -50882,6 +51047,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_args str org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -50905,6 +51078,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_args st oprot.writeString(struct.table_name); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -50929,19 +51107,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_args str if (struct.isSetTable_name()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetValidWriteIdList()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } if (struct.isSetTable_name()) { oprot.writeString(struct.table_name); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -50950,6 +51134,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_args stru struct.table_name = iprot.readString(); struct.setTable_nameIsSet(true); } + if (incoming.get(2)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -51513,14 +51701,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1106 = iprot.readListBegin(); - struct.success = new ArrayList(_list1106.size); - FieldSchema _elem1107; - for (int _i1108 = 0; _i1108 < _list1106.size; ++_i1108) + org.apache.thrift.protocol.TList _list1114 = iprot.readListBegin(); + struct.success = new ArrayList(_list1114.size); + FieldSchema _elem1115; + for (int _i1116 = 0; _i1116 < _list1114.size; ++_i1116) { - _elem1107 = new FieldSchema(); - _elem1107.read(iprot); - struct.success.add(_elem1107); + _elem1115 = new FieldSchema(); + _elem1115.read(iprot); + struct.success.add(_elem1115); } iprot.readListEnd(); } @@ -51573,9 +51761,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1109 : struct.success) + for (FieldSchema _iter1117 : struct.success) { - _iter1109.write(oprot); + _iter1117.write(oprot); } oprot.writeListEnd(); } @@ -51630,9 +51818,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1110 : struct.success) + for (FieldSchema _iter1118 : struct.success) { - _iter1110.write(oprot); + _iter1118.write(oprot); } } } @@ -51653,14 +51841,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1111 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1111.size); - FieldSchema _elem1112; - for (int _i1113 = 0; _i1113 < _list1111.size; ++_i1113) + org.apache.thrift.protocol.TList _list1119 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1119.size); + FieldSchema _elem1120; + for (int _i1121 = 0; _i1121 < _list1119.size; ++_i1121) { - _elem1112 = new FieldSchema(); - _elem1112.read(iprot); - struct.success.add(_elem1112); + _elem1120 = new FieldSchema(); + _elem1120.read(iprot); + struct.success.add(_elem1120); } } struct.setSuccessIsSet(true); @@ -51691,6 +51879,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environment_context", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -51701,12 +51890,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st private String db_name; // required private String table_name; // required private EnvironmentContext environment_context; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TABLE_NAME((short)2, "table_name"), - ENVIRONMENT_CONTEXT((short)3, "environment_context"); + ENVIRONMENT_CONTEXT((short)3, "environment_context"), + VALID_WRITE_ID_LIST((short)4, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -51727,6 +51918,8 @@ public static _Fields findByThriftId(int fieldId) { return TABLE_NAME; case 3: // ENVIRONMENT_CONTEXT return ENVIRONMENT_CONTEXT; + case 4: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -51776,6 +51969,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environment_context", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_fields_with_environment_context_args.class, metaDataMap); } @@ -51786,12 +51981,14 @@ public get_fields_with_environment_context_args() { public get_fields_with_environment_context_args( String db_name, String table_name, - EnvironmentContext environment_context) + EnvironmentContext environment_context, + String validWriteIdList) { this(); this.db_name = db_name; this.table_name = table_name; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } /** @@ -51807,6 +52004,9 @@ public get_fields_with_environment_context_args(get_fields_with_environment_cont if (other.isSetEnvironment_context()) { this.environment_context = new EnvironmentContext(other.environment_context); } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public get_fields_with_environment_context_args deepCopy() { @@ -51818,6 +52018,7 @@ public void clear() { this.db_name = null; this.table_name = null; this.environment_context = null; + this.validWriteIdList = null; } public String getDb_name() { @@ -51889,6 +52090,29 @@ public void setEnvironment_contextIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -51915,6 +52139,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -51929,6 +52161,9 @@ public Object getFieldValue(_Fields field) { case ENVIRONMENT_CONTEXT: return getEnvironment_context(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -51946,6 +52181,8 @@ public boolean isSet(_Fields field) { return isSetTable_name(); case ENVIRONMENT_CONTEXT: return isSetEnvironment_context(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -51990,6 +52227,15 @@ public boolean equals(get_fields_with_environment_context_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -52012,6 +52258,11 @@ public int hashCode() { if (present_environment_context) list.add(environment_context); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -52053,6 +52304,16 @@ public int compareTo(get_fields_with_environment_context_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -52096,6 +52357,14 @@ public String toString() { sb.append(this.environment_context); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -52167,6 +52436,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -52195,6 +52472,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en struct.environment_context.write(oprot); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -52222,7 +52504,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetEnvironment_context()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidWriteIdList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -52232,12 +52517,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetEnvironment_context()) { struct.environment_context.write(oprot); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -52251,6 +52539,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi struct.environment_context.read(iprot); struct.setEnvironment_contextIsSet(true); } + if (incoming.get(3)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -52814,14 +53106,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1114 = iprot.readListBegin(); - struct.success = new ArrayList(_list1114.size); - FieldSchema _elem1115; - for (int _i1116 = 0; _i1116 < _list1114.size; ++_i1116) + org.apache.thrift.protocol.TList _list1122 = iprot.readListBegin(); + struct.success = new ArrayList(_list1122.size); + FieldSchema _elem1123; + for (int _i1124 = 0; _i1124 < _list1122.size; ++_i1124) { - _elem1115 = new FieldSchema(); - _elem1115.read(iprot); - struct.success.add(_elem1115); + _elem1123 = new FieldSchema(); + _elem1123.read(iprot); + struct.success.add(_elem1123); } iprot.readListEnd(); } @@ -52874,9 +53166,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1117 : struct.success) + for (FieldSchema _iter1125 : struct.success) { - _iter1117.write(oprot); + _iter1125.write(oprot); } oprot.writeListEnd(); } @@ -52931,9 +53223,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1118 : struct.success) + for (FieldSchema _iter1126 : struct.success) { - _iter1118.write(oprot); + _iter1126.write(oprot); } } } @@ -52954,14 +53246,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1119 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1119.size); - FieldSchema _elem1120; - for (int _i1121 = 0; _i1121 < _list1119.size; ++_i1121) + org.apache.thrift.protocol.TList _list1127 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1127.size); + FieldSchema _elem1128; + for (int _i1129 = 0; _i1129 < _list1127.size; ++_i1129) { - _elem1120 = new FieldSchema(); - _elem1120.read(iprot); - struct.success.add(_elem1120); + _elem1128 = new FieldSchema(); + _elem1128.read(iprot); + struct.success.add(_elem1128); } } struct.setSuccessIsSet(true); @@ -52991,6 +53283,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53000,11 +53293,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi private String db_name; // required private String table_name; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), - TABLE_NAME((short)2, "table_name"); + TABLE_NAME((short)2, "table_name"), + VALID_WRITE_ID_LIST((short)3, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -53023,6 +53318,8 @@ public static _Fields findByThriftId(int fieldId) { return DB_NAME; case 2: // TABLE_NAME return TABLE_NAME; + case 3: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -53070,6 +53367,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_schema_args.class, metaDataMap); } @@ -53079,11 +53378,13 @@ public get_schema_args() { public get_schema_args( String db_name, - String table_name) + String table_name, + String validWriteIdList) { this(); this.db_name = db_name; this.table_name = table_name; + this.validWriteIdList = validWriteIdList; } /** @@ -53096,6 +53397,9 @@ public get_schema_args(get_schema_args other) { if (other.isSetTable_name()) { this.table_name = other.table_name; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public get_schema_args deepCopy() { @@ -53106,6 +53410,7 @@ public get_schema_args deepCopy() { public void clear() { this.db_name = null; this.table_name = null; + this.validWriteIdList = null; } public String getDb_name() { @@ -53154,6 +53459,29 @@ public void setTable_nameIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -53172,6 +53500,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -53183,6 +53519,9 @@ public Object getFieldValue(_Fields field) { case TABLE_NAME: return getTable_name(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -53198,6 +53537,8 @@ public boolean isSet(_Fields field) { return isSetDb_name(); case TABLE_NAME: return isSetTable_name(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -53233,6 +53574,15 @@ public boolean equals(get_schema_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -53250,6 +53600,11 @@ public int hashCode() { if (present_table_name) list.add(table_name); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -53281,6 +53636,16 @@ public int compareTo(get_schema_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -53316,6 +53681,14 @@ public String toString() { sb.append(this.table_name); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -53375,6 +53748,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_args str org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -53398,6 +53779,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_args st oprot.writeString(struct.table_name); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -53422,19 +53808,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_args str if (struct.isSetTable_name()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetValidWriteIdList()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } if (struct.isSetTable_name()) { oprot.writeString(struct.table_name); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -53443,6 +53835,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_args stru struct.table_name = iprot.readString(); struct.setTable_nameIsSet(true); } + if (incoming.get(2)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -54006,14 +54402,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1122 = iprot.readListBegin(); - struct.success = new ArrayList(_list1122.size); - FieldSchema _elem1123; - for (int _i1124 = 0; _i1124 < _list1122.size; ++_i1124) + org.apache.thrift.protocol.TList _list1130 = iprot.readListBegin(); + struct.success = new ArrayList(_list1130.size); + FieldSchema _elem1131; + for (int _i1132 = 0; _i1132 < _list1130.size; ++_i1132) { - _elem1123 = new FieldSchema(); - _elem1123.read(iprot); - struct.success.add(_elem1123); + _elem1131 = new FieldSchema(); + _elem1131.read(iprot); + struct.success.add(_elem1131); } iprot.readListEnd(); } @@ -54066,9 +54462,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1125 : struct.success) + for (FieldSchema _iter1133 : struct.success) { - _iter1125.write(oprot); + _iter1133.write(oprot); } oprot.writeListEnd(); } @@ -54123,9 +54519,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1126 : struct.success) + for (FieldSchema _iter1134 : struct.success) { - _iter1126.write(oprot); + _iter1134.write(oprot); } } } @@ -54146,14 +54542,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1127 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1127.size); - FieldSchema _elem1128; - for (int _i1129 = 0; _i1129 < _list1127.size; ++_i1129) + org.apache.thrift.protocol.TList _list1135 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1135.size); + FieldSchema _elem1136; + for (int _i1137 = 0; _i1137 < _list1135.size; ++_i1137) { - _elem1128 = new FieldSchema(); - _elem1128.read(iprot); - struct.success.add(_elem1128); + _elem1136 = new FieldSchema(); + _elem1136.read(iprot); + struct.success.add(_elem1136); } } struct.setSuccessIsSet(true); @@ -54184,6 +54580,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environment_context", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -54194,12 +54591,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st private String db_name; // required private String table_name; // required private EnvironmentContext environment_context; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TABLE_NAME((short)2, "table_name"), - ENVIRONMENT_CONTEXT((short)3, "environment_context"); + ENVIRONMENT_CONTEXT((short)3, "environment_context"), + VALID_WRITE_ID_LIST((short)4, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -54220,6 +54619,8 @@ public static _Fields findByThriftId(int fieldId) { return TABLE_NAME; case 3: // ENVIRONMENT_CONTEXT return ENVIRONMENT_CONTEXT; + case 4: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -54269,6 +54670,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environment_context", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_schema_with_environment_context_args.class, metaDataMap); } @@ -54279,12 +54682,14 @@ public get_schema_with_environment_context_args() { public get_schema_with_environment_context_args( String db_name, String table_name, - EnvironmentContext environment_context) + EnvironmentContext environment_context, + String validWriteIdList) { this(); this.db_name = db_name; this.table_name = table_name; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } /** @@ -54300,6 +54705,9 @@ public get_schema_with_environment_context_args(get_schema_with_environment_cont if (other.isSetEnvironment_context()) { this.environment_context = new EnvironmentContext(other.environment_context); } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public get_schema_with_environment_context_args deepCopy() { @@ -54311,6 +54719,7 @@ public void clear() { this.db_name = null; this.table_name = null; this.environment_context = null; + this.validWriteIdList = null; } public String getDb_name() { @@ -54382,6 +54791,29 @@ public void setEnvironment_contextIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -54408,6 +54840,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -54422,6 +54862,9 @@ public Object getFieldValue(_Fields field) { case ENVIRONMENT_CONTEXT: return getEnvironment_context(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -54439,6 +54882,8 @@ public boolean isSet(_Fields field) { return isSetTable_name(); case ENVIRONMENT_CONTEXT: return isSetEnvironment_context(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -54483,6 +54928,15 @@ public boolean equals(get_schema_with_environment_context_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -54505,6 +54959,11 @@ public int hashCode() { if (present_environment_context) list.add(environment_context); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -54546,6 +55005,16 @@ public int compareTo(get_schema_with_environment_context_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -54589,6 +55058,14 @@ public String toString() { sb.append(this.environment_context); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -54660,6 +55137,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -54688,6 +55173,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en struct.environment_context.write(oprot); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -54715,7 +55205,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetEnvironment_context()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidWriteIdList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -54725,12 +55218,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetEnvironment_context()) { struct.environment_context.write(oprot); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -54744,6 +55240,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi struct.environment_context.read(iprot); struct.setEnvironment_contextIsSet(true); } + if (incoming.get(3)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -55307,14 +55807,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1130 = iprot.readListBegin(); - struct.success = new ArrayList(_list1130.size); - FieldSchema _elem1131; - for (int _i1132 = 0; _i1132 < _list1130.size; ++_i1132) + org.apache.thrift.protocol.TList _list1138 = iprot.readListBegin(); + struct.success = new ArrayList(_list1138.size); + FieldSchema _elem1139; + for (int _i1140 = 0; _i1140 < _list1138.size; ++_i1140) { - _elem1131 = new FieldSchema(); - _elem1131.read(iprot); - struct.success.add(_elem1131); + _elem1139 = new FieldSchema(); + _elem1139.read(iprot); + struct.success.add(_elem1139); } iprot.readListEnd(); } @@ -55367,9 +55867,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1133 : struct.success) + for (FieldSchema _iter1141 : struct.success) { - _iter1133.write(oprot); + _iter1141.write(oprot); } oprot.writeListEnd(); } @@ -55424,9 +55924,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1134 : struct.success) + for (FieldSchema _iter1142 : struct.success) { - _iter1134.write(oprot); + _iter1142.write(oprot); } } } @@ -55447,14 +55947,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1135 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1135.size); - FieldSchema _elem1136; - for (int _i1137 = 0; _i1137 < _list1135.size; ++_i1137) + org.apache.thrift.protocol.TList _list1143 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1143.size); + FieldSchema _elem1144; + for (int _i1145 = 0; _i1145 < _list1143.size; ++_i1145) { - _elem1136 = new FieldSchema(); - _elem1136.read(iprot); - struct.success.add(_elem1136); + _elem1144 = new FieldSchema(); + _elem1144.read(iprot); + struct.success.add(_elem1144); } } struct.setSuccessIsSet(true); @@ -58583,14 +59083,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 2: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1138 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list1138.size); - SQLPrimaryKey _elem1139; - for (int _i1140 = 0; _i1140 < _list1138.size; ++_i1140) + org.apache.thrift.protocol.TList _list1146 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list1146.size); + SQLPrimaryKey _elem1147; + for (int _i1148 = 0; _i1148 < _list1146.size; ++_i1148) { - _elem1139 = new SQLPrimaryKey(); - _elem1139.read(iprot); - struct.primaryKeys.add(_elem1139); + _elem1147 = new SQLPrimaryKey(); + _elem1147.read(iprot); + struct.primaryKeys.add(_elem1147); } iprot.readListEnd(); } @@ -58602,14 +59102,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 3: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1141 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list1141.size); - SQLForeignKey _elem1142; - for (int _i1143 = 0; _i1143 < _list1141.size; ++_i1143) + org.apache.thrift.protocol.TList _list1149 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list1149.size); + SQLForeignKey _elem1150; + for (int _i1151 = 0; _i1151 < _list1149.size; ++_i1151) { - _elem1142 = new SQLForeignKey(); - _elem1142.read(iprot); - struct.foreignKeys.add(_elem1142); + _elem1150 = new SQLForeignKey(); + _elem1150.read(iprot); + struct.foreignKeys.add(_elem1150); } iprot.readListEnd(); } @@ -58621,14 +59121,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 4: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1144 = iprot.readListBegin(); - struct.uniqueConstraints = new ArrayList(_list1144.size); - SQLUniqueConstraint _elem1145; - for (int _i1146 = 0; _i1146 < _list1144.size; ++_i1146) + org.apache.thrift.protocol.TList _list1152 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList(_list1152.size); + SQLUniqueConstraint _elem1153; + for (int _i1154 = 0; _i1154 < _list1152.size; ++_i1154) { - _elem1145 = new SQLUniqueConstraint(); - _elem1145.read(iprot); - struct.uniqueConstraints.add(_elem1145); + _elem1153 = new SQLUniqueConstraint(); + _elem1153.read(iprot); + struct.uniqueConstraints.add(_elem1153); } iprot.readListEnd(); } @@ -58640,14 +59140,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 5: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1147 = iprot.readListBegin(); - struct.notNullConstraints = new ArrayList(_list1147.size); - SQLNotNullConstraint _elem1148; - for (int _i1149 = 0; _i1149 < _list1147.size; ++_i1149) + org.apache.thrift.protocol.TList _list1155 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList(_list1155.size); + SQLNotNullConstraint _elem1156; + for (int _i1157 = 0; _i1157 < _list1155.size; ++_i1157) { - _elem1148 = new SQLNotNullConstraint(); - _elem1148.read(iprot); - struct.notNullConstraints.add(_elem1148); + _elem1156 = new SQLNotNullConstraint(); + _elem1156.read(iprot); + struct.notNullConstraints.add(_elem1156); } iprot.readListEnd(); } @@ -58659,14 +59159,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 6: // DEFAULT_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1150 = iprot.readListBegin(); - struct.defaultConstraints = new ArrayList(_list1150.size); - SQLDefaultConstraint _elem1151; - for (int _i1152 = 0; _i1152 < _list1150.size; ++_i1152) + org.apache.thrift.protocol.TList _list1158 = iprot.readListBegin(); + struct.defaultConstraints = new ArrayList(_list1158.size); + SQLDefaultConstraint _elem1159; + for (int _i1160 = 0; _i1160 < _list1158.size; ++_i1160) { - _elem1151 = new SQLDefaultConstraint(); - _elem1151.read(iprot); - struct.defaultConstraints.add(_elem1151); + _elem1159 = new SQLDefaultConstraint(); + _elem1159.read(iprot); + struct.defaultConstraints.add(_elem1159); } iprot.readListEnd(); } @@ -58678,14 +59178,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 7: // CHECK_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1153 = iprot.readListBegin(); - struct.checkConstraints = new ArrayList(_list1153.size); - SQLCheckConstraint _elem1154; - for (int _i1155 = 0; _i1155 < _list1153.size; ++_i1155) + org.apache.thrift.protocol.TList _list1161 = iprot.readListBegin(); + struct.checkConstraints = new ArrayList(_list1161.size); + SQLCheckConstraint _elem1162; + for (int _i1163 = 0; _i1163 < _list1161.size; ++_i1163) { - _elem1154 = new SQLCheckConstraint(); - _elem1154.read(iprot); - struct.checkConstraints.add(_elem1154); + _elem1162 = new SQLCheckConstraint(); + _elem1162.read(iprot); + struct.checkConstraints.add(_elem1162); } iprot.readListEnd(); } @@ -58716,9 +59216,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter1156 : struct.primaryKeys) + for (SQLPrimaryKey _iter1164 : struct.primaryKeys) { - _iter1156.write(oprot); + _iter1164.write(oprot); } oprot.writeListEnd(); } @@ -58728,9 +59228,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter1157 : struct.foreignKeys) + for (SQLForeignKey _iter1165 : struct.foreignKeys) { - _iter1157.write(oprot); + _iter1165.write(oprot); } oprot.writeListEnd(); } @@ -58740,9 +59240,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter1158 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1166 : struct.uniqueConstraints) { - _iter1158.write(oprot); + _iter1166.write(oprot); } oprot.writeListEnd(); } @@ -58752,9 +59252,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter1159 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1167 : struct.notNullConstraints) { - _iter1159.write(oprot); + _iter1167.write(oprot); } oprot.writeListEnd(); } @@ -58764,9 +59264,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); - for (SQLDefaultConstraint _iter1160 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1168 : struct.defaultConstraints) { - _iter1160.write(oprot); + _iter1168.write(oprot); } oprot.writeListEnd(); } @@ -58776,9 +59276,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size())); - for (SQLCheckConstraint _iter1161 : struct.checkConstraints) + for (SQLCheckConstraint _iter1169 : struct.checkConstraints) { - _iter1161.write(oprot); + _iter1169.write(oprot); } oprot.writeListEnd(); } @@ -58830,54 +59330,54 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c if (struct.isSetPrimaryKeys()) { { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter1162 : struct.primaryKeys) + for (SQLPrimaryKey _iter1170 : struct.primaryKeys) { - _iter1162.write(oprot); + _iter1170.write(oprot); } } } if (struct.isSetForeignKeys()) { { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter1163 : struct.foreignKeys) + for (SQLForeignKey _iter1171 : struct.foreignKeys) { - _iter1163.write(oprot); + _iter1171.write(oprot); } } } if (struct.isSetUniqueConstraints()) { { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter1164 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1172 : struct.uniqueConstraints) { - _iter1164.write(oprot); + _iter1172.write(oprot); } } } if (struct.isSetNotNullConstraints()) { { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter1165 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1173 : struct.notNullConstraints) { - _iter1165.write(oprot); + _iter1173.write(oprot); } } } if (struct.isSetDefaultConstraints()) { { oprot.writeI32(struct.defaultConstraints.size()); - for (SQLDefaultConstraint _iter1166 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1174 : struct.defaultConstraints) { - _iter1166.write(oprot); + _iter1174.write(oprot); } } } if (struct.isSetCheckConstraints()) { { oprot.writeI32(struct.checkConstraints.size()); - for (SQLCheckConstraint _iter1167 : struct.checkConstraints) + for (SQLCheckConstraint _iter1175 : struct.checkConstraints) { - _iter1167.write(oprot); + _iter1175.write(oprot); } } } @@ -58894,84 +59394,84 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_co } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1168 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list1168.size); - SQLPrimaryKey _elem1169; - for (int _i1170 = 0; _i1170 < _list1168.size; ++_i1170) + org.apache.thrift.protocol.TList _list1176 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list1176.size); + SQLPrimaryKey _elem1177; + for (int _i1178 = 0; _i1178 < _list1176.size; ++_i1178) { - _elem1169 = new SQLPrimaryKey(); - _elem1169.read(iprot); - struct.primaryKeys.add(_elem1169); + _elem1177 = new SQLPrimaryKey(); + _elem1177.read(iprot); + struct.primaryKeys.add(_elem1177); } } struct.setPrimaryKeysIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1171 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list1171.size); - SQLForeignKey _elem1172; - for (int _i1173 = 0; _i1173 < _list1171.size; ++_i1173) + org.apache.thrift.protocol.TList _list1179 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list1179.size); + SQLForeignKey _elem1180; + for (int _i1181 = 0; _i1181 < _list1179.size; ++_i1181) { - _elem1172 = new SQLForeignKey(); - _elem1172.read(iprot); - struct.foreignKeys.add(_elem1172); + _elem1180 = new SQLForeignKey(); + _elem1180.read(iprot); + struct.foreignKeys.add(_elem1180); } } struct.setForeignKeysIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list1174 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraints = new ArrayList(_list1174.size); - SQLUniqueConstraint _elem1175; - for (int _i1176 = 0; _i1176 < _list1174.size; ++_i1176) + org.apache.thrift.protocol.TList _list1182 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraints = new ArrayList(_list1182.size); + SQLUniqueConstraint _elem1183; + for (int _i1184 = 0; _i1184 < _list1182.size; ++_i1184) { - _elem1175 = new SQLUniqueConstraint(); - _elem1175.read(iprot); - struct.uniqueConstraints.add(_elem1175); + _elem1183 = new SQLUniqueConstraint(); + _elem1183.read(iprot); + struct.uniqueConstraints.add(_elem1183); } } struct.setUniqueConstraintsIsSet(true); } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1177 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraints = new ArrayList(_list1177.size); - SQLNotNullConstraint _elem1178; - for (int _i1179 = 0; _i1179 < _list1177.size; ++_i1179) + org.apache.thrift.protocol.TList _list1185 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraints = new ArrayList(_list1185.size); + SQLNotNullConstraint _elem1186; + for (int _i1187 = 0; _i1187 < _list1185.size; ++_i1187) { - _elem1178 = new SQLNotNullConstraint(); - _elem1178.read(iprot); - struct.notNullConstraints.add(_elem1178); + _elem1186 = new SQLNotNullConstraint(); + _elem1186.read(iprot); + struct.notNullConstraints.add(_elem1186); } } struct.setNotNullConstraintsIsSet(true); } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1180 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.defaultConstraints = new ArrayList(_list1180.size); - SQLDefaultConstraint _elem1181; - for (int _i1182 = 0; _i1182 < _list1180.size; ++_i1182) + org.apache.thrift.protocol.TList _list1188 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraints = new ArrayList(_list1188.size); + SQLDefaultConstraint _elem1189; + for (int _i1190 = 0; _i1190 < _list1188.size; ++_i1190) { - _elem1181 = new SQLDefaultConstraint(); - _elem1181.read(iprot); - struct.defaultConstraints.add(_elem1181); + _elem1189 = new SQLDefaultConstraint(); + _elem1189.read(iprot); + struct.defaultConstraints.add(_elem1189); } } struct.setDefaultConstraintsIsSet(true); } if (incoming.get(6)) { { - org.apache.thrift.protocol.TList _list1183 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.checkConstraints = new ArrayList(_list1183.size); - SQLCheckConstraint _elem1184; - for (int _i1185 = 0; _i1185 < _list1183.size; ++_i1185) + org.apache.thrift.protocol.TList _list1191 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.checkConstraints = new ArrayList(_list1191.size); + SQLCheckConstraint _elem1192; + for (int _i1193 = 0; _i1193 < _list1191.size; ++_i1193) { - _elem1184 = new SQLCheckConstraint(); - _elem1184.read(iprot); - struct.checkConstraints.add(_elem1184); + _elem1192 = new SQLCheckConstraint(); + _elem1192.read(iprot); + struct.checkConstraints.add(_elem1192); } } struct.setCheckConstraintsIsSet(true); @@ -68121,13 +68621,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args case 3: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1186 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list1186.size); - String _elem1187; - for (int _i1188 = 0; _i1188 < _list1186.size; ++_i1188) + org.apache.thrift.protocol.TList _list1194 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list1194.size); + String _elem1195; + for (int _i1196 = 0; _i1196 < _list1194.size; ++_i1196) { - _elem1187 = iprot.readString(); - struct.partNames.add(_elem1187); + _elem1195 = iprot.readString(); + struct.partNames.add(_elem1195); } iprot.readListEnd(); } @@ -68163,9 +68663,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_arg oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter1189 : struct.partNames) + for (String _iter1197 : struct.partNames) { - oprot.writeString(_iter1189); + oprot.writeString(_iter1197); } oprot.writeListEnd(); } @@ -68208,9 +68708,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_args if (struct.isSetPartNames()) { { oprot.writeI32(struct.partNames.size()); - for (String _iter1190 : struct.partNames) + for (String _iter1198 : struct.partNames) { - oprot.writeString(_iter1190); + oprot.writeString(_iter1198); } } } @@ -68230,13 +68730,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1191 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list1191.size); - String _elem1192; - for (int _i1193 = 0; _i1193 < _list1191.size; ++_i1193) + org.apache.thrift.protocol.TList _list1199 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list1199.size); + String _elem1200; + for (int _i1201 = 0; _i1201 < _list1199.size; ++_i1201) { - _elem1192 = iprot.readString(); - struct.partNames.add(_elem1192); + _elem1200 = iprot.readString(); + struct.partNames.add(_elem1200); } } struct.setPartNamesIsSet(true); @@ -70293,13 +70793,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1194 = iprot.readListBegin(); - struct.success = new ArrayList(_list1194.size); - String _elem1195; - for (int _i1196 = 0; _i1196 < _list1194.size; ++_i1196) + org.apache.thrift.protocol.TList _list1202 = iprot.readListBegin(); + struct.success = new ArrayList(_list1202.size); + String _elem1203; + for (int _i1204 = 0; _i1204 < _list1202.size; ++_i1204) { - _elem1195 = iprot.readString(); - struct.success.add(_elem1195); + _elem1203 = iprot.readString(); + struct.success.add(_elem1203); } iprot.readListEnd(); } @@ -70334,9 +70834,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1197 : struct.success) + for (String _iter1205 : struct.success) { - oprot.writeString(_iter1197); + oprot.writeString(_iter1205); } oprot.writeListEnd(); } @@ -70375,9 +70875,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1198 : struct.success) + for (String _iter1206 : struct.success) { - oprot.writeString(_iter1198); + oprot.writeString(_iter1206); } } } @@ -70392,13 +70892,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1199 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1199.size); - String _elem1200; - for (int _i1201 = 0; _i1201 < _list1199.size; ++_i1201) + org.apache.thrift.protocol.TList _list1207 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1207.size); + String _elem1208; + for (int _i1209 = 0; _i1209 < _list1207.size; ++_i1209) { - _elem1200 = iprot.readString(); - struct.success.add(_elem1200); + _elem1208 = iprot.readString(); + struct.success.add(_elem1208); } } struct.setSuccessIsSet(true); @@ -71372,13 +71872,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1202 = iprot.readListBegin(); - struct.success = new ArrayList(_list1202.size); - String _elem1203; - for (int _i1204 = 0; _i1204 < _list1202.size; ++_i1204) + org.apache.thrift.protocol.TList _list1210 = iprot.readListBegin(); + struct.success = new ArrayList(_list1210.size); + String _elem1211; + for (int _i1212 = 0; _i1212 < _list1210.size; ++_i1212) { - _elem1203 = iprot.readString(); - struct.success.add(_elem1203); + _elem1211 = iprot.readString(); + struct.success.add(_elem1211); } iprot.readListEnd(); } @@ -71413,9 +71913,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1205 : struct.success) + for (String _iter1213 : struct.success) { - oprot.writeString(_iter1205); + oprot.writeString(_iter1213); } oprot.writeListEnd(); } @@ -71454,9 +71954,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1206 : struct.success) + for (String _iter1214 : struct.success) { - oprot.writeString(_iter1206); + oprot.writeString(_iter1214); } } } @@ -71471,13 +71971,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_r BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1207 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1207.size); - String _elem1208; - for (int _i1209 = 0; _i1209 < _list1207.size; ++_i1209) + org.apache.thrift.protocol.TList _list1215 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1215.size); + String _elem1216; + for (int _i1217 = 0; _i1217 < _list1215.size; ++_i1217) { - _elem1208 = iprot.readString(); - struct.success.add(_elem1208); + _elem1216 = iprot.readString(); + struct.success.add(_elem1216); } } struct.setSuccessIsSet(true); @@ -72134,14 +72634,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_materialize case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1210 = iprot.readListBegin(); - struct.success = new ArrayList
(_list1210.size); - Table _elem1211; - for (int _i1212 = 0; _i1212 < _list1210.size; ++_i1212) + org.apache.thrift.protocol.TList _list1218 = iprot.readListBegin(); + struct.success = new ArrayList
(_list1218.size); + Table _elem1219; + for (int _i1220 = 0; _i1220 < _list1218.size; ++_i1220) { - _elem1211 = new Table(); - _elem1211.read(iprot); - struct.success.add(_elem1211); + _elem1219 = new Table(); + _elem1219.read(iprot); + struct.success.add(_elem1219); } iprot.readListEnd(); } @@ -72176,9 +72676,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_materializ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter1213 : struct.success) + for (Table _iter1221 : struct.success) { - _iter1213.write(oprot); + _iter1221.write(oprot); } oprot.writeListEnd(); } @@ -72217,9 +72717,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_materialize if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter1214 : struct.success) + for (Table _iter1222 : struct.success) { - _iter1214.write(oprot); + _iter1222.write(oprot); } } } @@ -72234,14 +72734,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_materialized BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1215 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list1215.size); - Table _elem1216; - for (int _i1217 = 0; _i1217 < _list1215.size; ++_i1217) + org.apache.thrift.protocol.TList _list1223 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list1223.size); + Table _elem1224; + for (int _i1225 = 0; _i1225 < _list1223.size; ++_i1225) { - _elem1216 = new Table(); - _elem1216.read(iprot); - struct.success.add(_elem1216); + _elem1224 = new Table(); + _elem1224.read(iprot); + struct.success.add(_elem1224); } } struct.setSuccessIsSet(true); @@ -73007,13 +73507,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_vi case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1218 = iprot.readListBegin(); - struct.success = new ArrayList(_list1218.size); - String _elem1219; - for (int _i1220 = 0; _i1220 < _list1218.size; ++_i1220) + org.apache.thrift.protocol.TList _list1226 = iprot.readListBegin(); + struct.success = new ArrayList(_list1226.size); + String _elem1227; + for (int _i1228 = 0; _i1228 < _list1226.size; ++_i1228) { - _elem1219 = iprot.readString(); - struct.success.add(_elem1219); + _elem1227 = iprot.readString(); + struct.success.add(_elem1227); } iprot.readListEnd(); } @@ -73048,9 +73548,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_v oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1221 : struct.success) + for (String _iter1229 : struct.success) { - oprot.writeString(_iter1221); + oprot.writeString(_iter1229); } oprot.writeListEnd(); } @@ -73089,9 +73589,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_vi if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1222 : struct.success) + for (String _iter1230 : struct.success) { - oprot.writeString(_iter1222); + oprot.writeString(_iter1230); } } } @@ -73106,13 +73606,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_vie BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1223 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1223.size); - String _elem1224; - for (int _i1225 = 0; _i1225 < _list1223.size; ++_i1225) + org.apache.thrift.protocol.TList _list1231 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1231.size); + String _elem1232; + for (int _i1233 = 0; _i1233 < _list1231.size; ++_i1233) { - _elem1224 = iprot.readString(); - struct.success.add(_elem1224); + _elem1232 = iprot.readString(); + struct.success.add(_elem1232); } } struct.setSuccessIsSet(true); @@ -73617,13 +74117,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args case 3: // TBL_TYPES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1226 = iprot.readListBegin(); - struct.tbl_types = new ArrayList(_list1226.size); - String _elem1227; - for (int _i1228 = 0; _i1228 < _list1226.size; ++_i1228) + org.apache.thrift.protocol.TList _list1234 = iprot.readListBegin(); + struct.tbl_types = new ArrayList(_list1234.size); + String _elem1235; + for (int _i1236 = 0; _i1236 < _list1234.size; ++_i1236) { - _elem1227 = iprot.readString(); - struct.tbl_types.add(_elem1227); + _elem1235 = iprot.readString(); + struct.tbl_types.add(_elem1235); } iprot.readListEnd(); } @@ -73659,9 +74159,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_arg oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size())); - for (String _iter1229 : struct.tbl_types) + for (String _iter1237 : struct.tbl_types) { - oprot.writeString(_iter1229); + oprot.writeString(_iter1237); } oprot.writeListEnd(); } @@ -73704,9 +74204,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args if (struct.isSetTbl_types()) { { oprot.writeI32(struct.tbl_types.size()); - for (String _iter1230 : struct.tbl_types) + for (String _iter1238 : struct.tbl_types) { - oprot.writeString(_iter1230); + oprot.writeString(_iter1238); } } } @@ -73726,13 +74226,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1231 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_types = new ArrayList(_list1231.size); - String _elem1232; - for (int _i1233 = 0; _i1233 < _list1231.size; ++_i1233) + org.apache.thrift.protocol.TList _list1239 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_types = new ArrayList(_list1239.size); + String _elem1240; + for (int _i1241 = 0; _i1241 < _list1239.size; ++_i1241) { - _elem1232 = iprot.readString(); - struct.tbl_types.add(_elem1232); + _elem1240 = iprot.readString(); + struct.tbl_types.add(_elem1240); } } struct.setTbl_typesIsSet(true); @@ -74138,14 +74638,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1234 = iprot.readListBegin(); - struct.success = new ArrayList(_list1234.size); - TableMeta _elem1235; - for (int _i1236 = 0; _i1236 < _list1234.size; ++_i1236) + org.apache.thrift.protocol.TList _list1242 = iprot.readListBegin(); + struct.success = new ArrayList(_list1242.size); + TableMeta _elem1243; + for (int _i1244 = 0; _i1244 < _list1242.size; ++_i1244) { - _elem1235 = new TableMeta(); - _elem1235.read(iprot); - struct.success.add(_elem1235); + _elem1243 = new TableMeta(); + _elem1243.read(iprot); + struct.success.add(_elem1243); } iprot.readListEnd(); } @@ -74180,9 +74680,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TableMeta _iter1237 : struct.success) + for (TableMeta _iter1245 : struct.success) { - _iter1237.write(oprot); + _iter1245.write(oprot); } oprot.writeListEnd(); } @@ -74221,9 +74721,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TableMeta _iter1238 : struct.success) + for (TableMeta _iter1246 : struct.success) { - _iter1238.write(oprot); + _iter1246.write(oprot); } } } @@ -74238,14 +74738,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1239 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1239.size); - TableMeta _elem1240; - for (int _i1241 = 0; _i1241 < _list1239.size; ++_i1241) + org.apache.thrift.protocol.TList _list1247 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1247.size); + TableMeta _elem1248; + for (int _i1249 = 0; _i1249 < _list1247.size; ++_i1249) { - _elem1240 = new TableMeta(); - _elem1240.read(iprot); - struct.success.add(_elem1240); + _elem1248 = new TableMeta(); + _elem1248.read(iprot); + struct.success.add(_elem1248); } } struct.setSuccessIsSet(true); @@ -75011,13 +75511,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1242 = iprot.readListBegin(); - struct.success = new ArrayList(_list1242.size); - String _elem1243; - for (int _i1244 = 0; _i1244 < _list1242.size; ++_i1244) + org.apache.thrift.protocol.TList _list1250 = iprot.readListBegin(); + struct.success = new ArrayList(_list1250.size); + String _elem1251; + for (int _i1252 = 0; _i1252 < _list1250.size; ++_i1252) { - _elem1243 = iprot.readString(); - struct.success.add(_elem1243); + _elem1251 = iprot.readString(); + struct.success.add(_elem1251); } iprot.readListEnd(); } @@ -75052,9 +75552,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1245 : struct.success) + for (String _iter1253 : struct.success) { - oprot.writeString(_iter1245); + oprot.writeString(_iter1253); } oprot.writeListEnd(); } @@ -75093,9 +75593,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1246 : struct.success) + for (String _iter1254 : struct.success) { - oprot.writeString(_iter1246); + oprot.writeString(_iter1254); } } } @@ -75110,13 +75610,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1247 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1247.size); - String _elem1248; - for (int _i1249 = 0; _i1249 < _list1247.size; ++_i1249) + org.apache.thrift.protocol.TList _list1255 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1255.size); + String _elem1256; + for (int _i1257 = 0; _i1257 < _list1255.size; ++_i1257) { - _elem1248 = iprot.readString(); - struct.success.add(_elem1248); + _elem1256 = iprot.readString(); + struct.success.add(_elem1256); } } struct.setSuccessIsSet(true); @@ -75136,6 +75636,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -75145,11 +75646,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul private String dbname; // required private String tbl_name; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DBNAME((short)1, "dbname"), - TBL_NAME((short)2, "tbl_name"); + TBL_NAME((short)2, "tbl_name"), + VALID_WRITE_ID_LIST((short)3, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -75168,6 +75671,8 @@ public static _Fields findByThriftId(int fieldId) { return DBNAME; case 2: // TBL_NAME return TBL_NAME; + case 3: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -75215,6 +75720,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_table_args.class, metaDataMap); } @@ -75224,11 +75731,13 @@ public get_table_args() { public get_table_args( String dbname, - String tbl_name) + String tbl_name, + String validWriteIdList) { this(); this.dbname = dbname; this.tbl_name = tbl_name; + this.validWriteIdList = validWriteIdList; } /** @@ -75241,6 +75750,9 @@ public get_table_args(get_table_args other) { if (other.isSetTbl_name()) { this.tbl_name = other.tbl_name; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public get_table_args deepCopy() { @@ -75251,6 +75763,7 @@ public get_table_args deepCopy() { public void clear() { this.dbname = null; this.tbl_name = null; + this.validWriteIdList = null; } public String getDbname() { @@ -75299,6 +75812,29 @@ public void setTbl_nameIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DBNAME: @@ -75317,6 +75853,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -75328,6 +75872,9 @@ public Object getFieldValue(_Fields field) { case TBL_NAME: return getTbl_name(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -75343,6 +75890,8 @@ public boolean isSet(_Fields field) { return isSetDbname(); case TBL_NAME: return isSetTbl_name(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -75378,6 +75927,15 @@ public boolean equals(get_table_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -75395,6 +75953,11 @@ public int hashCode() { if (present_tbl_name) list.add(tbl_name); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -75426,6 +75989,16 @@ public int compareTo(get_table_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -75461,6 +76034,14 @@ public String toString() { sb.append(this.tbl_name); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -75520,6 +76101,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_args stru org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -75543,6 +76132,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_args str oprot.writeString(struct.tbl_name); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -75567,19 +76161,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_args stru if (struct.isSetTbl_name()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetValidWriteIdList()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetDbname()) { oprot.writeString(struct.dbname); } if (struct.isSetTbl_name()) { oprot.writeString(struct.tbl_name); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_table_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.dbname = iprot.readString(); struct.setDbnameIsSet(true); @@ -75588,6 +76188,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_args struc struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); } + if (incoming.get(2)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -76569,13 +77173,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1250 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list1250.size); - String _elem1251; - for (int _i1252 = 0; _i1252 < _list1250.size; ++_i1252) + org.apache.thrift.protocol.TList _list1258 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1258.size); + String _elem1259; + for (int _i1260 = 0; _i1260 < _list1258.size; ++_i1260) { - _elem1251 = iprot.readString(); - struct.tbl_names.add(_elem1251); + _elem1259 = iprot.readString(); + struct.tbl_names.add(_elem1259); } iprot.readListEnd(); } @@ -76606,9 +77210,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter1253 : struct.tbl_names) + for (String _iter1261 : struct.tbl_names) { - oprot.writeString(_iter1253); + oprot.writeString(_iter1261); } oprot.writeListEnd(); } @@ -76645,9 +77249,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter1254 : struct.tbl_names) + for (String _iter1262 : struct.tbl_names) { - oprot.writeString(_iter1254); + oprot.writeString(_iter1262); } } } @@ -76663,13 +77267,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1255 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list1255.size); - String _elem1256; - for (int _i1257 = 0; _i1257 < _list1255.size; ++_i1257) + org.apache.thrift.protocol.TList _list1263 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1263.size); + String _elem1264; + for (int _i1265 = 0; _i1265 < _list1263.size; ++_i1265) { - _elem1256 = iprot.readString(); - struct.tbl_names.add(_elem1256); + _elem1264 = iprot.readString(); + struct.tbl_names.add(_elem1264); } } struct.setTbl_namesIsSet(true); @@ -76994,14 +77598,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1258 = iprot.readListBegin(); - struct.success = new ArrayList
(_list1258.size); - Table _elem1259; - for (int _i1260 = 0; _i1260 < _list1258.size; ++_i1260) + org.apache.thrift.protocol.TList _list1266 = iprot.readListBegin(); + struct.success = new ArrayList
(_list1266.size); + Table _elem1267; + for (int _i1268 = 0; _i1268 < _list1266.size; ++_i1268) { - _elem1259 = new Table(); - _elem1259.read(iprot); - struct.success.add(_elem1259); + _elem1267 = new Table(); + _elem1267.read(iprot); + struct.success.add(_elem1267); } iprot.readListEnd(); } @@ -77027,9 +77631,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter1261 : struct.success) + for (Table _iter1269 : struct.success) { - _iter1261.write(oprot); + _iter1269.write(oprot); } oprot.writeListEnd(); } @@ -77060,9 +77664,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter1262 : struct.success) + for (Table _iter1270 : struct.success) { - _iter1262.write(oprot); + _iter1270.write(oprot); } } } @@ -77074,14 +77678,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1263 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list1263.size); - Table _elem1264; - for (int _i1265 = 0; _i1265 < _list1263.size; ++_i1265) + org.apache.thrift.protocol.TList _list1271 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list1271.size); + Table _elem1272; + for (int _i1273 = 0; _i1273 < _list1271.size; ++_i1273) { - _elem1264 = new Table(); - _elem1264.read(iprot); - struct.success.add(_elem1264); + _elem1272 = new Table(); + _elem1272.read(iprot); + struct.success.add(_elem1272); } } struct.setSuccessIsSet(true); @@ -77850,14 +78454,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_ext_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1266 = iprot.readListBegin(); - struct.success = new ArrayList(_list1266.size); - ExtendedTableInfo _elem1267; - for (int _i1268 = 0; _i1268 < _list1266.size; ++_i1268) + org.apache.thrift.protocol.TList _list1274 = iprot.readListBegin(); + struct.success = new ArrayList(_list1274.size); + ExtendedTableInfo _elem1275; + for (int _i1276 = 0; _i1276 < _list1274.size; ++_i1276) { - _elem1267 = new ExtendedTableInfo(); - _elem1267.read(iprot); - struct.success.add(_elem1267); + _elem1275 = new ExtendedTableInfo(); + _elem1275.read(iprot); + struct.success.add(_elem1275); } iprot.readListEnd(); } @@ -77892,9 +78496,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_ext_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (ExtendedTableInfo _iter1269 : struct.success) + for (ExtendedTableInfo _iter1277 : struct.success) { - _iter1269.write(oprot); + _iter1277.write(oprot); } oprot.writeListEnd(); } @@ -77933,9 +78537,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_ext_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (ExtendedTableInfo _iter1270 : struct.success) + for (ExtendedTableInfo _iter1278 : struct.success) { - _iter1270.write(oprot); + _iter1278.write(oprot); } } } @@ -77950,14 +78554,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_ext_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1271 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1271.size); - ExtendedTableInfo _elem1272; - for (int _i1273 = 0; _i1273 < _list1271.size; ++_i1273) + org.apache.thrift.protocol.TList _list1279 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1279.size); + ExtendedTableInfo _elem1280; + for (int _i1281 = 0; _i1281 < _list1279.size; ++_i1281) { - _elem1272 = new ExtendedTableInfo(); - _elem1272.read(iprot); - struct.success.add(_elem1272); + _elem1280 = new ExtendedTableInfo(); + _elem1280.read(iprot); + struct.success.add(_elem1280); } } struct.setSuccessIsSet(true); @@ -83470,13 +84074,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1274 = iprot.readListBegin(); - struct.success = new ArrayList(_list1274.size); - String _elem1275; - for (int _i1276 = 0; _i1276 < _list1274.size; ++_i1276) + org.apache.thrift.protocol.TList _list1282 = iprot.readListBegin(); + struct.success = new ArrayList(_list1282.size); + String _elem1283; + for (int _i1284 = 0; _i1284 < _list1282.size; ++_i1284) { - _elem1275 = iprot.readString(); - struct.success.add(_elem1275); + _elem1283 = iprot.readString(); + struct.success.add(_elem1283); } iprot.readListEnd(); } @@ -83529,9 +84133,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1277 : struct.success) + for (String _iter1285 : struct.success) { - oprot.writeString(_iter1277); + oprot.writeString(_iter1285); } oprot.writeListEnd(); } @@ -83586,9 +84190,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1278 : struct.success) + for (String _iter1286 : struct.success) { - oprot.writeString(_iter1278); + oprot.writeString(_iter1286); } } } @@ -83609,13 +84213,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1279 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1279.size); - String _elem1280; - for (int _i1281 = 0; _i1281 < _list1279.size; ++_i1281) + org.apache.thrift.protocol.TList _list1287 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1287.size); + String _elem1288; + for (int _i1289 = 0; _i1289 < _list1287.size; ++_i1289) { - _elem1280 = iprot.readString(); - struct.success.add(_elem1280); + _elem1288 = iprot.readString(); + struct.success.add(_elem1288); } } struct.setSuccessIsSet(true); @@ -90412,14 +91016,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1282 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1282.size); - Partition _elem1283; - for (int _i1284 = 0; _i1284 < _list1282.size; ++_i1284) + org.apache.thrift.protocol.TList _list1290 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1290.size); + Partition _elem1291; + for (int _i1292 = 0; _i1292 < _list1290.size; ++_i1292) { - _elem1283 = new Partition(); - _elem1283.read(iprot); - struct.new_parts.add(_elem1283); + _elem1291 = new Partition(); + _elem1291.read(iprot); + struct.new_parts.add(_elem1291); } iprot.readListEnd(); } @@ -90445,9 +91049,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1285 : struct.new_parts) + for (Partition _iter1293 : struct.new_parts) { - _iter1285.write(oprot); + _iter1293.write(oprot); } oprot.writeListEnd(); } @@ -90478,9 +91082,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1286 : struct.new_parts) + for (Partition _iter1294 : struct.new_parts) { - _iter1286.write(oprot); + _iter1294.write(oprot); } } } @@ -90492,14 +91096,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1287 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1287.size); - Partition _elem1288; - for (int _i1289 = 0; _i1289 < _list1287.size; ++_i1289) + org.apache.thrift.protocol.TList _list1295 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1295.size); + Partition _elem1296; + for (int _i1297 = 0; _i1297 < _list1295.size; ++_i1297) { - _elem1288 = new Partition(); - _elem1288.read(iprot); - struct.new_parts.add(_elem1288); + _elem1296 = new Partition(); + _elem1296.read(iprot); + struct.new_parts.add(_elem1296); } } struct.setNew_partsIsSet(true); @@ -91500,14 +92104,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1290 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1290.size); - PartitionSpec _elem1291; - for (int _i1292 = 0; _i1292 < _list1290.size; ++_i1292) + org.apache.thrift.protocol.TList _list1298 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1298.size); + PartitionSpec _elem1299; + for (int _i1300 = 0; _i1300 < _list1298.size; ++_i1300) { - _elem1291 = new PartitionSpec(); - _elem1291.read(iprot); - struct.new_parts.add(_elem1291); + _elem1299 = new PartitionSpec(); + _elem1299.read(iprot); + struct.new_parts.add(_elem1299); } iprot.readListEnd(); } @@ -91533,9 +92137,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (PartitionSpec _iter1293 : struct.new_parts) + for (PartitionSpec _iter1301 : struct.new_parts) { - _iter1293.write(oprot); + _iter1301.write(oprot); } oprot.writeListEnd(); } @@ -91566,9 +92170,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (PartitionSpec _iter1294 : struct.new_parts) + for (PartitionSpec _iter1302 : struct.new_parts) { - _iter1294.write(oprot); + _iter1302.write(oprot); } } } @@ -91580,14 +92184,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1295 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1295.size); - PartitionSpec _elem1296; - for (int _i1297 = 0; _i1297 < _list1295.size; ++_i1297) + org.apache.thrift.protocol.TList _list1303 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1303.size); + PartitionSpec _elem1304; + for (int _i1305 = 0; _i1305 < _list1303.size; ++_i1305) { - _elem1296 = new PartitionSpec(); - _elem1296.read(iprot); - struct.new_parts.add(_elem1296); + _elem1304 = new PartitionSpec(); + _elem1304.read(iprot); + struct.new_parts.add(_elem1304); } } struct.setNew_partsIsSet(true); @@ -92763,13 +93367,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1298 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1298.size); - String _elem1299; - for (int _i1300 = 0; _i1300 < _list1298.size; ++_i1300) + org.apache.thrift.protocol.TList _list1306 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1306.size); + String _elem1307; + for (int _i1308 = 0; _i1308 < _list1306.size; ++_i1308) { - _elem1299 = iprot.readString(); - struct.part_vals.add(_elem1299); + _elem1307 = iprot.readString(); + struct.part_vals.add(_elem1307); } iprot.readListEnd(); } @@ -92805,9 +93409,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1301 : struct.part_vals) + for (String _iter1309 : struct.part_vals) { - oprot.writeString(_iter1301); + oprot.writeString(_iter1309); } oprot.writeListEnd(); } @@ -92850,9 +93454,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1302 : struct.part_vals) + for (String _iter1310 : struct.part_vals) { - oprot.writeString(_iter1302); + oprot.writeString(_iter1310); } } } @@ -92872,13 +93476,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1303 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1303.size); - String _elem1304; - for (int _i1305 = 0; _i1305 < _list1303.size; ++_i1305) + org.apache.thrift.protocol.TList _list1311 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1311.size); + String _elem1312; + for (int _i1313 = 0; _i1313 < _list1311.size; ++_i1313) { - _elem1304 = iprot.readString(); - struct.part_vals.add(_elem1304); + _elem1312 = iprot.readString(); + struct.part_vals.add(_elem1312); } } struct.setPart_valsIsSet(true); @@ -95187,13 +95791,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1306 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1306.size); - String _elem1307; - for (int _i1308 = 0; _i1308 < _list1306.size; ++_i1308) + org.apache.thrift.protocol.TList _list1314 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1314.size); + String _elem1315; + for (int _i1316 = 0; _i1316 < _list1314.size; ++_i1316) { - _elem1307 = iprot.readString(); - struct.part_vals.add(_elem1307); + _elem1315 = iprot.readString(); + struct.part_vals.add(_elem1315); } iprot.readListEnd(); } @@ -95238,9 +95842,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1309 : struct.part_vals) + for (String _iter1317 : struct.part_vals) { - oprot.writeString(_iter1309); + oprot.writeString(_iter1317); } oprot.writeListEnd(); } @@ -95291,9 +95895,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1310 : struct.part_vals) + for (String _iter1318 : struct.part_vals) { - oprot.writeString(_iter1310); + oprot.writeString(_iter1318); } } } @@ -95316,13 +95920,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1311 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1311.size); - String _elem1312; - for (int _i1313 = 0; _i1313 < _list1311.size; ++_i1313) + org.apache.thrift.protocol.TList _list1319 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1319.size); + String _elem1320; + for (int _i1321 = 0; _i1321 < _list1319.size; ++_i1321) { - _elem1312 = iprot.readString(); - struct.part_vals.add(_elem1312); + _elem1320 = iprot.readString(); + struct.part_vals.add(_elem1320); } } struct.setPart_valsIsSet(true); @@ -99192,13 +99796,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1314 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1314.size); - String _elem1315; - for (int _i1316 = 0; _i1316 < _list1314.size; ++_i1316) + org.apache.thrift.protocol.TList _list1322 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1322.size); + String _elem1323; + for (int _i1324 = 0; _i1324 < _list1322.size; ++_i1324) { - _elem1315 = iprot.readString(); - struct.part_vals.add(_elem1315); + _elem1323 = iprot.readString(); + struct.part_vals.add(_elem1323); } iprot.readListEnd(); } @@ -99242,9 +99846,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1317 : struct.part_vals) + for (String _iter1325 : struct.part_vals) { - oprot.writeString(_iter1317); + oprot.writeString(_iter1325); } oprot.writeListEnd(); } @@ -99293,9 +99897,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1318 : struct.part_vals) + for (String _iter1326 : struct.part_vals) { - oprot.writeString(_iter1318); + oprot.writeString(_iter1326); } } } @@ -99318,13 +99922,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1319 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1319.size); - String _elem1320; - for (int _i1321 = 0; _i1321 < _list1319.size; ++_i1321) + org.apache.thrift.protocol.TList _list1327 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1327.size); + String _elem1328; + for (int _i1329 = 0; _i1329 < _list1327.size; ++_i1329) { - _elem1320 = iprot.readString(); - struct.part_vals.add(_elem1320); + _elem1328 = iprot.readString(); + struct.part_vals.add(_elem1328); } } struct.setPart_valsIsSet(true); @@ -100563,13 +101167,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1322 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1322.size); - String _elem1323; - for (int _i1324 = 0; _i1324 < _list1322.size; ++_i1324) + org.apache.thrift.protocol.TList _list1330 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1330.size); + String _elem1331; + for (int _i1332 = 0; _i1332 < _list1330.size; ++_i1332) { - _elem1323 = iprot.readString(); - struct.part_vals.add(_elem1323); + _elem1331 = iprot.readString(); + struct.part_vals.add(_elem1331); } iprot.readListEnd(); } @@ -100622,9 +101226,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1325 : struct.part_vals) + for (String _iter1333 : struct.part_vals) { - oprot.writeString(_iter1325); + oprot.writeString(_iter1333); } oprot.writeListEnd(); } @@ -100681,9 +101285,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1326 : struct.part_vals) + for (String _iter1334 : struct.part_vals) { - oprot.writeString(_iter1326); + oprot.writeString(_iter1334); } } } @@ -100709,13 +101313,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1327 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1327.size); - String _elem1328; - for (int _i1329 = 0; _i1329 < _list1327.size; ++_i1329) + org.apache.thrift.protocol.TList _list1335 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1335.size); + String _elem1336; + for (int _i1337 = 0; _i1337 < _list1335.size; ++_i1337) { - _elem1328 = iprot.readString(); - struct.part_vals.add(_elem1328); + _elem1336 = iprot.readString(); + struct.part_vals.add(_elem1336); } } struct.setPart_valsIsSet(true); @@ -104833,6 +105437,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partitions_req_ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("part_vals", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -104843,12 +105448,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partitions_req_ private String db_name; // required private String tbl_name; // required private List part_vals; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - PART_VALS((short)3, "part_vals"); + PART_VALS((short)3, "part_vals"), + VALID_TXN_LIST((short)4, "validTxnList"); private static final Map byName = new HashMap(); @@ -104869,6 +105476,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // PART_VALS return PART_VALS; + case 4: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -104919,6 +105528,8 @@ public String getFieldName() { tmpMap.put(_Fields.PART_VALS, new org.apache.thrift.meta_data.FieldMetaData("part_vals", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_args.class, metaDataMap); } @@ -104929,12 +105540,14 @@ public get_partition_args() { public get_partition_args( String db_name, String tbl_name, - List part_vals) + List part_vals, + String validTxnList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.part_vals = part_vals; + this.validTxnList = validTxnList; } /** @@ -104951,6 +105564,9 @@ public get_partition_args(get_partition_args other) { List __this__part_vals = new ArrayList(other.part_vals); this.part_vals = __this__part_vals; } + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partition_args deepCopy() { @@ -104962,6 +105578,7 @@ public void clear() { this.db_name = null; this.tbl_name = null; this.part_vals = null; + this.validTxnList = null; } public String getDb_name() { @@ -105048,6 +105665,29 @@ public void setPart_valsIsSet(boolean value) { } } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -105074,6 +105714,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -105088,6 +105736,9 @@ public Object getFieldValue(_Fields field) { case PART_VALS: return getPart_vals(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -105105,6 +105756,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case PART_VALS: return isSetPart_vals(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -105149,6 +105802,15 @@ public boolean equals(get_partition_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -105171,6 +105833,11 @@ public int hashCode() { if (present_part_vals) list.add(part_vals); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -105212,6 +105879,16 @@ public int compareTo(get_partition_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -105255,6 +105932,14 @@ public String toString() { sb.append(this.part_vals); } first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -105317,13 +106002,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1330 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1330.size); - String _elem1331; - for (int _i1332 = 0; _i1332 < _list1330.size; ++_i1332) + org.apache.thrift.protocol.TList _list1338 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1338.size); + String _elem1339; + for (int _i1340 = 0; _i1340 < _list1338.size; ++_i1340) { - _elem1331 = iprot.readString(); - struct.part_vals.add(_elem1331); + _elem1339 = iprot.readString(); + struct.part_vals.add(_elem1339); } iprot.readListEnd(); } @@ -105332,6 +106017,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -105359,14 +106052,19 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1333 : struct.part_vals) + for (String _iter1341 : struct.part_vals) { - oprot.writeString(_iter1333); + oprot.writeString(_iter1341); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -105394,7 +106092,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidTxnList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -105404,18 +106105,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1334 : struct.part_vals) + for (String _iter1342 : struct.part_vals) { - oprot.writeString(_iter1334); + oprot.writeString(_iter1342); } } } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -105426,17 +106130,21 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1335 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1335.size); - String _elem1336; - for (int _i1337 = 0; _i1337 < _list1335.size; ++_i1337) + org.apache.thrift.protocol.TList _list1343 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1343.size); + String _elem1344; + for (int _i1345 = 0; _i1345 < _list1343.size; ++_i1345) { - _elem1336 = iprot.readString(); - struct.part_vals.add(_elem1336); + _elem1344 = iprot.readString(); + struct.part_vals.add(_elem1344); } } struct.setPart_valsIsSet(true); } + if (incoming.get(3)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -106650,15 +107358,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1338 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1338.size); - String _key1339; - String _val1340; - for (int _i1341 = 0; _i1341 < _map1338.size; ++_i1341) + org.apache.thrift.protocol.TMap _map1346 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1346.size); + String _key1347; + String _val1348; + for (int _i1349 = 0; _i1349 < _map1346.size; ++_i1349) { - _key1339 = iprot.readString(); - _val1340 = iprot.readString(); - struct.partitionSpecs.put(_key1339, _val1340); + _key1347 = iprot.readString(); + _val1348 = iprot.readString(); + struct.partitionSpecs.put(_key1347, _val1348); } iprot.readMapEnd(); } @@ -106716,10 +107424,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1342 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1350 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1342.getKey()); - oprot.writeString(_iter1342.getValue()); + oprot.writeString(_iter1350.getKey()); + oprot.writeString(_iter1350.getValue()); } oprot.writeMapEnd(); } @@ -106782,10 +107490,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1343 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1351 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1343.getKey()); - oprot.writeString(_iter1343.getValue()); + oprot.writeString(_iter1351.getKey()); + oprot.writeString(_iter1351.getValue()); } } } @@ -106809,15 +107517,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1344 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1344.size); - String _key1345; - String _val1346; - for (int _i1347 = 0; _i1347 < _map1344.size; ++_i1347) + org.apache.thrift.protocol.TMap _map1352 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1352.size); + String _key1353; + String _val1354; + for (int _i1355 = 0; _i1355 < _map1352.size; ++_i1355) { - _key1345 = iprot.readString(); - _val1346 = iprot.readString(); - struct.partitionSpecs.put(_key1345, _val1346); + _key1353 = iprot.readString(); + _val1354 = iprot.readString(); + struct.partitionSpecs.put(_key1353, _val1354); } } struct.setPartitionSpecsIsSet(true); @@ -108263,15 +108971,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1348 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1348.size); - String _key1349; - String _val1350; - for (int _i1351 = 0; _i1351 < _map1348.size; ++_i1351) + org.apache.thrift.protocol.TMap _map1356 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1356.size); + String _key1357; + String _val1358; + for (int _i1359 = 0; _i1359 < _map1356.size; ++_i1359) { - _key1349 = iprot.readString(); - _val1350 = iprot.readString(); - struct.partitionSpecs.put(_key1349, _val1350); + _key1357 = iprot.readString(); + _val1358 = iprot.readString(); + struct.partitionSpecs.put(_key1357, _val1358); } iprot.readMapEnd(); } @@ -108329,10 +109037,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1352 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1360 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1352.getKey()); - oprot.writeString(_iter1352.getValue()); + oprot.writeString(_iter1360.getKey()); + oprot.writeString(_iter1360.getValue()); } oprot.writeMapEnd(); } @@ -108395,10 +109103,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1353 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1361 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1353.getKey()); - oprot.writeString(_iter1353.getValue()); + oprot.writeString(_iter1361.getKey()); + oprot.writeString(_iter1361.getValue()); } } } @@ -108422,15 +109130,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1354 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1354.size); - String _key1355; - String _val1356; - for (int _i1357 = 0; _i1357 < _map1354.size; ++_i1357) + org.apache.thrift.protocol.TMap _map1362 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1362.size); + String _key1363; + String _val1364; + for (int _i1365 = 0; _i1365 < _map1362.size; ++_i1365) { - _key1355 = iprot.readString(); - _val1356 = iprot.readString(); - struct.partitionSpecs.put(_key1355, _val1356); + _key1363 = iprot.readString(); + _val1364 = iprot.readString(); + struct.partitionSpecs.put(_key1363, _val1364); } } struct.setPartitionSpecsIsSet(true); @@ -109095,14 +109803,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1358 = iprot.readListBegin(); - struct.success = new ArrayList(_list1358.size); - Partition _elem1359; - for (int _i1360 = 0; _i1360 < _list1358.size; ++_i1360) + org.apache.thrift.protocol.TList _list1366 = iprot.readListBegin(); + struct.success = new ArrayList(_list1366.size); + Partition _elem1367; + for (int _i1368 = 0; _i1368 < _list1366.size; ++_i1368) { - _elem1359 = new Partition(); - _elem1359.read(iprot); - struct.success.add(_elem1359); + _elem1367 = new Partition(); + _elem1367.read(iprot); + struct.success.add(_elem1367); } iprot.readListEnd(); } @@ -109164,9 +109872,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1361 : struct.success) + for (Partition _iter1369 : struct.success) { - _iter1361.write(oprot); + _iter1369.write(oprot); } oprot.writeListEnd(); } @@ -109229,9 +109937,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1362 : struct.success) + for (Partition _iter1370 : struct.success) { - _iter1362.write(oprot); + _iter1370.write(oprot); } } } @@ -109255,14 +109963,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1363 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1363.size); - Partition _elem1364; - for (int _i1365 = 0; _i1365 < _list1363.size; ++_i1365) + org.apache.thrift.protocol.TList _list1371 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1371.size); + Partition _elem1372; + for (int _i1373 = 0; _i1373 < _list1371.size; ++_i1373) { - _elem1364 = new Partition(); - _elem1364.read(iprot); - struct.success.add(_elem1364); + _elem1372 = new Partition(); + _elem1372.read(iprot); + struct.success.add(_elem1372); } } struct.setSuccessIsSet(true); @@ -109300,6 +110008,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("part_vals", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField USER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("user_name", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField GROUP_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("group_names", org.apache.thrift.protocol.TType.LIST, (short)5); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -109312,6 +110021,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ private List part_vals; // required private String user_name; // required private List group_names; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -109319,7 +110029,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ TBL_NAME((short)2, "tbl_name"), PART_VALS((short)3, "part_vals"), USER_NAME((short)4, "user_name"), - GROUP_NAMES((short)5, "group_names"); + GROUP_NAMES((short)5, "group_names"), + VALID_TXN_LIST((short)6, "validTxnList"); private static final Map byName = new HashMap(); @@ -109344,6 +110055,8 @@ public static _Fields findByThriftId(int fieldId) { return USER_NAME; case 5: // GROUP_NAMES return GROUP_NAMES; + case 6: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -109399,6 +110112,8 @@ public String getFieldName() { tmpMap.put(_Fields.GROUP_NAMES, new org.apache.thrift.meta_data.FieldMetaData("group_names", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_with_auth_args.class, metaDataMap); } @@ -109411,7 +110126,8 @@ public get_partition_with_auth_args( String tbl_name, List part_vals, String user_name, - List group_names) + List group_names, + String validTxnList) { this(); this.db_name = db_name; @@ -109419,6 +110135,7 @@ public get_partition_with_auth_args( this.part_vals = part_vals; this.user_name = user_name; this.group_names = group_names; + this.validTxnList = validTxnList; } /** @@ -109442,6 +110159,9 @@ public get_partition_with_auth_args(get_partition_with_auth_args other) { List __this__group_names = new ArrayList(other.group_names); this.group_names = __this__group_names; } + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partition_with_auth_args deepCopy() { @@ -109455,6 +110175,7 @@ public void clear() { this.part_vals = null; this.user_name = null; this.group_names = null; + this.validTxnList = null; } public String getDb_name() { @@ -109602,6 +110323,29 @@ public void setGroup_namesIsSet(boolean value) { } } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -109644,6 +110388,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -109664,6 +110416,9 @@ public Object getFieldValue(_Fields field) { case GROUP_NAMES: return getGroup_names(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -109685,6 +110440,8 @@ public boolean isSet(_Fields field) { return isSetUser_name(); case GROUP_NAMES: return isSetGroup_names(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -109747,6 +110504,15 @@ public boolean equals(get_partition_with_auth_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -109779,6 +110545,11 @@ public int hashCode() { if (present_group_names) list.add(group_names); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -109840,6 +110611,16 @@ public int compareTo(get_partition_with_auth_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -109899,6 +110680,14 @@ public String toString() { sb.append(this.group_names); } first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -109961,13 +110750,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1366 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1366.size); - String _elem1367; - for (int _i1368 = 0; _i1368 < _list1366.size; ++_i1368) + org.apache.thrift.protocol.TList _list1374 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1374.size); + String _elem1375; + for (int _i1376 = 0; _i1376 < _list1374.size; ++_i1376) { - _elem1367 = iprot.readString(); - struct.part_vals.add(_elem1367); + _elem1375 = iprot.readString(); + struct.part_vals.add(_elem1375); } iprot.readListEnd(); } @@ -109987,13 +110776,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1369 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1369.size); - String _elem1370; - for (int _i1371 = 0; _i1371 < _list1369.size; ++_i1371) + org.apache.thrift.protocol.TList _list1377 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1377.size); + String _elem1378; + for (int _i1379 = 0; _i1379 < _list1377.size; ++_i1379) { - _elem1370 = iprot.readString(); - struct.group_names.add(_elem1370); + _elem1378 = iprot.readString(); + struct.group_names.add(_elem1378); } iprot.readListEnd(); } @@ -110002,6 +110791,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -110029,9 +110826,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1372 : struct.part_vals) + for (String _iter1380 : struct.part_vals) { - oprot.writeString(_iter1372); + oprot.writeString(_iter1380); } oprot.writeListEnd(); } @@ -110046,14 +110843,19 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1373 : struct.group_names) + for (String _iter1381 : struct.group_names) { - oprot.writeString(_iter1373); + oprot.writeString(_iter1381); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -110087,7 +110889,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { optionals.set(4); } - oprot.writeBitSet(optionals, 5); + if (struct.isSetValidTxnList()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -110097,9 +110902,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1374 : struct.part_vals) + for (String _iter1382 : struct.part_vals) { - oprot.writeString(_iter1374); + oprot.writeString(_iter1382); } } } @@ -110109,18 +110914,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1375 : struct.group_names) + for (String _iter1383 : struct.group_names) { - oprot.writeString(_iter1375); + oprot.writeString(_iter1383); } } } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_auth_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -110131,13 +110939,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1376 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1376.size); - String _elem1377; - for (int _i1378 = 0; _i1378 < _list1376.size; ++_i1378) + org.apache.thrift.protocol.TList _list1384 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1384.size); + String _elem1385; + for (int _i1386 = 0; _i1386 < _list1384.size; ++_i1386) { - _elem1377 = iprot.readString(); - struct.part_vals.add(_elem1377); + _elem1385 = iprot.readString(); + struct.part_vals.add(_elem1385); } } struct.setPart_valsIsSet(true); @@ -110148,17 +110956,21 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1379 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1379.size); - String _elem1380; - for (int _i1381 = 0; _i1381 < _list1379.size; ++_i1381) + org.apache.thrift.protocol.TList _list1387 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1387.size); + String _elem1388; + for (int _i1389 = 0; _i1389 < _list1387.size; ++_i1389) { - _elem1380 = iprot.readString(); - struct.group_names.add(_elem1380); + _elem1388 = iprot.readString(); + struct.group_names.add(_elem1388); } } struct.setGroup_namesIsSet(true); } + if (incoming.get(5)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -110745,6 +111557,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField PART_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("part_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -110755,12 +111568,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a private String db_name; // required private String tbl_name; // required private String part_name; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - PART_NAME((short)3, "part_name"); + PART_NAME((short)3, "part_name"), + VALID_TXN_LIST((short)4, "validTxnList"); private static final Map byName = new HashMap(); @@ -110781,6 +111596,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // PART_NAME return PART_NAME; + case 4: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -110830,6 +111647,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.PART_NAME, new org.apache.thrift.meta_data.FieldMetaData("part_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_by_name_args.class, metaDataMap); } @@ -110840,12 +111659,14 @@ public get_partition_by_name_args() { public get_partition_by_name_args( String db_name, String tbl_name, - String part_name) + String part_name, + String validTxnList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.part_name = part_name; + this.validTxnList = validTxnList; } /** @@ -110861,6 +111682,9 @@ public get_partition_by_name_args(get_partition_by_name_args other) { if (other.isSetPart_name()) { this.part_name = other.part_name; } + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partition_by_name_args deepCopy() { @@ -110872,6 +111696,7 @@ public void clear() { this.db_name = null; this.tbl_name = null; this.part_name = null; + this.validTxnList = null; } public String getDb_name() { @@ -110943,6 +111768,29 @@ public void setPart_nameIsSet(boolean value) { } } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -110969,6 +111817,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -110983,6 +111839,9 @@ public Object getFieldValue(_Fields field) { case PART_NAME: return getPart_name(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -111000,6 +111859,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case PART_NAME: return isSetPart_name(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -111044,6 +111905,15 @@ public boolean equals(get_partition_by_name_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -111066,6 +111936,11 @@ public int hashCode() { if (present_part_name) list.add(part_name); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -111107,6 +111982,16 @@ public int compareTo(get_partition_by_name_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -111150,6 +112035,14 @@ public String toString() { sb.append(this.part_name); } first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -111217,6 +112110,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_by_na org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -111245,6 +112146,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_by_n oprot.writeString(struct.part_name); oprot.writeFieldEnd(); } + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -111272,7 +112178,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_by_na if (struct.isSetPart_name()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidTxnList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -111282,12 +112191,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_by_na if (struct.isSetPart_name()) { oprot.writeString(struct.part_name); } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_by_name_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -111300,6 +112212,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_by_nam struct.part_name = iprot.readString(); struct.setPart_nameIsSet(true); } + if (incoming.get(3)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -111886,6 +112802,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_by_nam private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I16, (short)3); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -111896,12 +112813,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_by_nam private String db_name; // required private String tbl_name; // required private short max_parts; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - MAX_PARTS((short)3, "max_parts"); + MAX_PARTS((short)3, "max_parts"), + VALID_TXN_LIST((short)4, "validTxnList"); private static final Map byName = new HashMap(); @@ -111922,6 +112841,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // MAX_PARTS return MAX_PARTS; + case 4: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -111973,6 +112894,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_args.class, metaDataMap); } @@ -111985,13 +112908,15 @@ public get_partitions_args() { public get_partitions_args( String db_name, String tbl_name, - short max_parts) + short max_parts, + String validTxnList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.max_parts = max_parts; setMax_partsIsSet(true); + this.validTxnList = validTxnList; } /** @@ -112006,6 +112931,9 @@ public get_partitions_args(get_partitions_args other) { this.tbl_name = other.tbl_name; } this.max_parts = other.max_parts; + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partitions_args deepCopy() { @@ -112018,6 +112946,7 @@ public void clear() { this.tbl_name = null; this.max_parts = (short)-1; + this.validTxnList = null; } public String getDb_name() { @@ -112088,6 +113017,29 @@ public void setMax_partsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_PARTS_ISSET_ID, value); } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -112114,6 +113066,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -112128,6 +113088,9 @@ public Object getFieldValue(_Fields field) { case MAX_PARTS: return getMax_parts(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -112145,6 +113108,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case MAX_PARTS: return isSetMax_parts(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -112189,6 +113154,15 @@ public boolean equals(get_partitions_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -112211,6 +113185,11 @@ public int hashCode() { if (present_max_parts) list.add(max_parts); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -112252,6 +113231,16 @@ public int compareTo(get_partitions_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -112291,6 +113280,14 @@ public String toString() { sb.append("max_parts:"); sb.append(this.max_parts); first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -112360,6 +113357,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_args org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -112386,6 +113391,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_arg oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); oprot.writeI16(struct.max_parts); oprot.writeFieldEnd(); + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -112413,7 +113423,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_args if (struct.isSetMax_parts()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidTxnList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -112423,12 +113436,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_args if (struct.isSetMax_parts()) { oprot.writeI16(struct.max_parts); } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -112441,6 +113457,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_args struct.max_parts = iprot.readI16(); struct.setMax_partsIsSet(true); } + if (incoming.get(3)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -112923,14 +113943,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1382 = iprot.readListBegin(); - struct.success = new ArrayList(_list1382.size); - Partition _elem1383; - for (int _i1384 = 0; _i1384 < _list1382.size; ++_i1384) + org.apache.thrift.protocol.TList _list1390 = iprot.readListBegin(); + struct.success = new ArrayList(_list1390.size); + Partition _elem1391; + for (int _i1392 = 0; _i1392 < _list1390.size; ++_i1392) { - _elem1383 = new Partition(); - _elem1383.read(iprot); - struct.success.add(_elem1383); + _elem1391 = new Partition(); + _elem1391.read(iprot); + struct.success.add(_elem1391); } iprot.readListEnd(); } @@ -112974,9 +113994,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1385 : struct.success) + for (Partition _iter1393 : struct.success) { - _iter1385.write(oprot); + _iter1393.write(oprot); } oprot.writeListEnd(); } @@ -113023,9 +114043,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1386 : struct.success) + for (Partition _iter1394 : struct.success) { - _iter1386.write(oprot); + _iter1394.write(oprot); } } } @@ -113043,14 +114063,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1387 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1387.size); - Partition _elem1388; - for (int _i1389 = 0; _i1389 < _list1387.size; ++_i1389) + org.apache.thrift.protocol.TList _list1395 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1395.size); + Partition _elem1396; + for (int _i1397 = 0; _i1397 < _list1395.size; ++_i1397) { - _elem1388 = new Partition(); - _elem1388.read(iprot); - struct.success.add(_elem1388); + _elem1396 = new Partition(); + _elem1396.read(iprot); + struct.success.add(_elem1396); } } struct.setSuccessIsSet(true); @@ -113078,6 +114098,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I16, (short)3); private static final org.apache.thrift.protocol.TField USER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("user_name", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField GROUP_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("group_names", org.apache.thrift.protocol.TType.LIST, (short)5); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -113090,6 +114111,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul private short max_parts; // required private String user_name; // required private List group_names; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -113097,7 +114119,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul TBL_NAME((short)2, "tbl_name"), MAX_PARTS((short)3, "max_parts"), USER_NAME((short)4, "user_name"), - GROUP_NAMES((short)5, "group_names"); + GROUP_NAMES((short)5, "group_names"), + VALID_TXN_LIST((short)6, "validTxnList"); private static final Map byName = new HashMap(); @@ -113122,6 +114145,8 @@ public static _Fields findByThriftId(int fieldId) { return USER_NAME; case 5: // GROUP_NAMES return GROUP_NAMES; + case 6: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -113178,6 +114203,8 @@ public String getFieldName() { tmpMap.put(_Fields.GROUP_NAMES, new org.apache.thrift.meta_data.FieldMetaData("group_names", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_with_auth_args.class, metaDataMap); } @@ -113192,7 +114219,8 @@ public get_partitions_with_auth_args( String tbl_name, short max_parts, String user_name, - List group_names) + List group_names, + String validTxnList) { this(); this.db_name = db_name; @@ -113201,6 +114229,7 @@ public get_partitions_with_auth_args( setMax_partsIsSet(true); this.user_name = user_name; this.group_names = group_names; + this.validTxnList = validTxnList; } /** @@ -113222,6 +114251,9 @@ public get_partitions_with_auth_args(get_partitions_with_auth_args other) { List __this__group_names = new ArrayList(other.group_names); this.group_names = __this__group_names; } + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partitions_with_auth_args deepCopy() { @@ -113236,6 +114268,7 @@ public void clear() { this.user_name = null; this.group_names = null; + this.validTxnList = null; } public String getDb_name() { @@ -113367,6 +114400,29 @@ public void setGroup_namesIsSet(boolean value) { } } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -113409,6 +114465,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -113429,6 +114493,9 @@ public Object getFieldValue(_Fields field) { case GROUP_NAMES: return getGroup_names(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -113450,6 +114517,8 @@ public boolean isSet(_Fields field) { return isSetUser_name(); case GROUP_NAMES: return isSetGroup_names(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -113512,6 +114581,15 @@ public boolean equals(get_partitions_with_auth_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -113544,6 +114622,11 @@ public int hashCode() { if (present_group_names) list.add(group_names); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -113605,6 +114688,16 @@ public int compareTo(get_partitions_with_auth_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -113660,6 +114753,14 @@ public String toString() { sb.append(this.group_names); } first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -113740,13 +114841,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1390 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1390.size); - String _elem1391; - for (int _i1392 = 0; _i1392 < _list1390.size; ++_i1392) + org.apache.thrift.protocol.TList _list1398 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1398.size); + String _elem1399; + for (int _i1400 = 0; _i1400 < _list1398.size; ++_i1400) { - _elem1391 = iprot.readString(); - struct.group_names.add(_elem1391); + _elem1399 = iprot.readString(); + struct.group_names.add(_elem1399); } iprot.readListEnd(); } @@ -113755,6 +114856,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -113790,14 +114899,19 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1393 : struct.group_names) + for (String _iter1401 : struct.group_names) { - oprot.writeString(_iter1393); + oprot.writeString(_iter1401); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -113831,7 +114945,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { optionals.set(4); } - oprot.writeBitSet(optionals, 5); + if (struct.isSetValidTxnList()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -113847,18 +114964,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1394 : struct.group_names) + for (String _iter1402 : struct.group_names) { - oprot.writeString(_iter1394); + oprot.writeString(_iter1402); } } } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_auth_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -113877,17 +114997,21 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1395 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1395.size); - String _elem1396; - for (int _i1397 = 0; _i1397 < _list1395.size; ++_i1397) + org.apache.thrift.protocol.TList _list1403 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1403.size); + String _elem1404; + for (int _i1405 = 0; _i1405 < _list1403.size; ++_i1405) { - _elem1396 = iprot.readString(); - struct.group_names.add(_elem1396); + _elem1404 = iprot.readString(); + struct.group_names.add(_elem1404); } } struct.setGroup_namesIsSet(true); } + if (incoming.get(5)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -114370,14 +115494,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1398 = iprot.readListBegin(); - struct.success = new ArrayList(_list1398.size); - Partition _elem1399; - for (int _i1400 = 0; _i1400 < _list1398.size; ++_i1400) + org.apache.thrift.protocol.TList _list1406 = iprot.readListBegin(); + struct.success = new ArrayList(_list1406.size); + Partition _elem1407; + for (int _i1408 = 0; _i1408 < _list1406.size; ++_i1408) { - _elem1399 = new Partition(); - _elem1399.read(iprot); - struct.success.add(_elem1399); + _elem1407 = new Partition(); + _elem1407.read(iprot); + struct.success.add(_elem1407); } iprot.readListEnd(); } @@ -114421,9 +115545,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1401 : struct.success) + for (Partition _iter1409 : struct.success) { - _iter1401.write(oprot); + _iter1409.write(oprot); } oprot.writeListEnd(); } @@ -114470,9 +115594,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1402 : struct.success) + for (Partition _iter1410 : struct.success) { - _iter1402.write(oprot); + _iter1410.write(oprot); } } } @@ -114490,14 +115614,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1403 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1403.size); - Partition _elem1404; - for (int _i1405 = 0; _i1405 < _list1403.size; ++_i1405) + org.apache.thrift.protocol.TList _list1411 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1411.size); + Partition _elem1412; + for (int _i1413 = 0; _i1413 < _list1411.size; ++_i1413) { - _elem1404 = new Partition(); - _elem1404.read(iprot); - struct.success.add(_elem1404); + _elem1412 = new Partition(); + _elem1412.read(iprot); + struct.success.add(_elem1412); } } struct.setSuccessIsSet(true); @@ -114523,6 +115647,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I32, (short)3); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -114533,12 +115658,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ private String db_name; // required private String tbl_name; // required private int max_parts; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - MAX_PARTS((short)3, "max_parts"); + MAX_PARTS((short)3, "max_parts"), + VALID_TXN_LIST((short)4, "validTxnList"); private static final Map byName = new HashMap(); @@ -114559,6 +115686,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // MAX_PARTS return MAX_PARTS; + case 4: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -114610,6 +115739,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_pspec_args.class, metaDataMap); } @@ -114622,13 +115753,15 @@ public get_partitions_pspec_args() { public get_partitions_pspec_args( String db_name, String tbl_name, - int max_parts) + int max_parts, + String validTxnList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.max_parts = max_parts; setMax_partsIsSet(true); + this.validTxnList = validTxnList; } /** @@ -114643,6 +115776,9 @@ public get_partitions_pspec_args(get_partitions_pspec_args other) { this.tbl_name = other.tbl_name; } this.max_parts = other.max_parts; + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partitions_pspec_args deepCopy() { @@ -114655,6 +115791,7 @@ public void clear() { this.tbl_name = null; this.max_parts = -1; + this.validTxnList = null; } public String getDb_name() { @@ -114725,6 +115862,29 @@ public void setMax_partsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_PARTS_ISSET_ID, value); } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -114751,6 +115911,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -114765,6 +115933,9 @@ public Object getFieldValue(_Fields field) { case MAX_PARTS: return getMax_parts(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -114782,6 +115953,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case MAX_PARTS: return isSetMax_parts(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -114826,6 +115999,15 @@ public boolean equals(get_partitions_pspec_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -114848,6 +116030,11 @@ public int hashCode() { if (present_max_parts) list.add(max_parts); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -114889,6 +116076,16 @@ public int compareTo(get_partitions_pspec_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -114928,6 +116125,14 @@ public String toString() { sb.append("max_parts:"); sb.append(this.max_parts); first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -114997,6 +116202,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -115023,6 +116236,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); oprot.writeI32(struct.max_parts); oprot.writeFieldEnd(); + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -115050,7 +116268,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetMax_parts()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidTxnList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -115060,12 +116281,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetMax_parts()) { oprot.writeI32(struct.max_parts); } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -115078,6 +116302,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec struct.max_parts = iprot.readI32(); struct.setMax_partsIsSet(true); } + if (incoming.get(3)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -115560,14 +116788,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1406 = iprot.readListBegin(); - struct.success = new ArrayList(_list1406.size); - PartitionSpec _elem1407; - for (int _i1408 = 0; _i1408 < _list1406.size; ++_i1408) + org.apache.thrift.protocol.TList _list1414 = iprot.readListBegin(); + struct.success = new ArrayList(_list1414.size); + PartitionSpec _elem1415; + for (int _i1416 = 0; _i1416 < _list1414.size; ++_i1416) { - _elem1407 = new PartitionSpec(); - _elem1407.read(iprot); - struct.success.add(_elem1407); + _elem1415 = new PartitionSpec(); + _elem1415.read(iprot); + struct.success.add(_elem1415); } iprot.readListEnd(); } @@ -115611,9 +116839,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1409 : struct.success) + for (PartitionSpec _iter1417 : struct.success) { - _iter1409.write(oprot); + _iter1417.write(oprot); } oprot.writeListEnd(); } @@ -115660,9 +116888,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1410 : struct.success) + for (PartitionSpec _iter1418 : struct.success) { - _iter1410.write(oprot); + _iter1418.write(oprot); } } } @@ -115680,14 +116908,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1411 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1411.size); - PartitionSpec _elem1412; - for (int _i1413 = 0; _i1413 < _list1411.size; ++_i1413) + org.apache.thrift.protocol.TList _list1419 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1419.size); + PartitionSpec _elem1420; + for (int _i1421 = 0; _i1421 < _list1419.size; ++_i1421) { - _elem1412 = new PartitionSpec(); - _elem1412.read(iprot); - struct.success.add(_elem1412); + _elem1420 = new PartitionSpec(); + _elem1420.read(iprot); + struct.success.add(_elem1420); } } struct.setSuccessIsSet(true); @@ -115713,6 +116941,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I16, (short)3); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -115723,12 +116952,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec private String db_name; // required private String tbl_name; // required private short max_parts; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - MAX_PARTS((short)3, "max_parts"); + MAX_PARTS((short)3, "max_parts"), + VALID_TXN_LIST((short)4, "validTxnList"); private static final Map byName = new HashMap(); @@ -115749,6 +116980,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // MAX_PARTS return MAX_PARTS; + case 4: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -115800,6 +117033,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_names_args.class, metaDataMap); } @@ -115812,13 +117047,15 @@ public get_partition_names_args() { public get_partition_names_args( String db_name, String tbl_name, - short max_parts) + short max_parts, + String validTxnList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.max_parts = max_parts; setMax_partsIsSet(true); + this.validTxnList = validTxnList; } /** @@ -115833,6 +117070,9 @@ public get_partition_names_args(get_partition_names_args other) { this.tbl_name = other.tbl_name; } this.max_parts = other.max_parts; + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partition_names_args deepCopy() { @@ -115845,6 +117085,7 @@ public void clear() { this.tbl_name = null; this.max_parts = (short)-1; + this.validTxnList = null; } public String getDb_name() { @@ -115915,6 +117156,29 @@ public void setMax_partsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_PARTS_ISSET_ID, value); } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -115941,6 +117205,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -115955,6 +117227,9 @@ public Object getFieldValue(_Fields field) { case MAX_PARTS: return getMax_parts(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -115972,6 +117247,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case MAX_PARTS: return isSetMax_parts(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -116016,6 +117293,15 @@ public boolean equals(get_partition_names_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -116038,6 +117324,11 @@ public int hashCode() { if (present_max_parts) list.add(max_parts); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -116079,6 +117370,16 @@ public int compareTo(get_partition_names_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -116118,6 +117419,14 @@ public String toString() { sb.append("max_parts:"); sb.append(this.max_parts); first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -116187,6 +117496,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -116213,6 +117530,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); oprot.writeI16(struct.max_parts); oprot.writeFieldEnd(); + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -116240,7 +117562,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetMax_parts()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidTxnList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -116250,12 +117575,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetMax_parts()) { oprot.writeI16(struct.max_parts); } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -116268,6 +117596,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ struct.max_parts = iprot.readI16(); struct.setMax_partsIsSet(true); } + if (incoming.get(3)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -116747,13 +118079,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1414 = iprot.readListBegin(); - struct.success = new ArrayList(_list1414.size); - String _elem1415; - for (int _i1416 = 0; _i1416 < _list1414.size; ++_i1416) + org.apache.thrift.protocol.TList _list1422 = iprot.readListBegin(); + struct.success = new ArrayList(_list1422.size); + String _elem1423; + for (int _i1424 = 0; _i1424 < _list1422.size; ++_i1424) { - _elem1415 = iprot.readString(); - struct.success.add(_elem1415); + _elem1423 = iprot.readString(); + struct.success.add(_elem1423); } iprot.readListEnd(); } @@ -116797,9 +118129,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1417 : struct.success) + for (String _iter1425 : struct.success) { - oprot.writeString(_iter1417); + oprot.writeString(_iter1425); } oprot.writeListEnd(); } @@ -116846,9 +118178,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1418 : struct.success) + for (String _iter1426 : struct.success) { - oprot.writeString(_iter1418); + oprot.writeString(_iter1426); } } } @@ -116866,13 +118198,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1419 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1419.size); - String _elem1420; - for (int _i1421 = 0; _i1421 < _list1419.size; ++_i1421) + org.apache.thrift.protocol.TList _list1427 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1427.size); + String _elem1428; + for (int _i1429 = 0; _i1429 < _list1427.size; ++_i1429) { - _elem1420 = iprot.readString(); - struct.success.add(_elem1420); + _elem1428 = iprot.readString(); + struct.success.add(_elem1428); } } struct.setSuccessIsSet(true); @@ -117837,6 +119169,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_values private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("part_vals", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I16, (short)4); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -117848,13 +119181,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_values private String tbl_name; // required private List part_vals; // required private short max_parts; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), PART_VALS((short)3, "part_vals"), - MAX_PARTS((short)4, "max_parts"); + MAX_PARTS((short)4, "max_parts"), + VALID_TXN_LIST((short)5, "validTxnList"); private static final Map byName = new HashMap(); @@ -117877,6 +119212,8 @@ public static _Fields findByThriftId(int fieldId) { return PART_VALS; case 4: // MAX_PARTS return MAX_PARTS; + case 5: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -117931,6 +119268,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_ps_args.class, metaDataMap); } @@ -117944,7 +119283,8 @@ public get_partitions_ps_args( String db_name, String tbl_name, List part_vals, - short max_parts) + short max_parts, + String validTxnList) { this(); this.db_name = db_name; @@ -117952,6 +119292,7 @@ public get_partitions_ps_args( this.part_vals = part_vals; this.max_parts = max_parts; setMax_partsIsSet(true); + this.validTxnList = validTxnList; } /** @@ -117970,6 +119311,9 @@ public get_partitions_ps_args(get_partitions_ps_args other) { this.part_vals = __this__part_vals; } this.max_parts = other.max_parts; + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partitions_ps_args deepCopy() { @@ -117983,6 +119327,7 @@ public void clear() { this.part_vals = null; this.max_parts = (short)-1; + this.validTxnList = null; } public String getDb_name() { @@ -118091,6 +119436,29 @@ public void setMax_partsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_PARTS_ISSET_ID, value); } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -118125,6 +119493,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -118142,6 +119518,9 @@ public Object getFieldValue(_Fields field) { case MAX_PARTS: return getMax_parts(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -118161,6 +119540,8 @@ public boolean isSet(_Fields field) { return isSetPart_vals(); case MAX_PARTS: return isSetMax_parts(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -118214,6 +119595,15 @@ public boolean equals(get_partitions_ps_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -118241,6 +119631,11 @@ public int hashCode() { if (present_max_parts) list.add(max_parts); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -118292,6 +119687,16 @@ public int compareTo(get_partitions_ps_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -118339,6 +119744,14 @@ public String toString() { sb.append("max_parts:"); sb.append(this.max_parts); first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -118403,13 +119816,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1422 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1422.size); - String _elem1423; - for (int _i1424 = 0; _i1424 < _list1422.size; ++_i1424) + org.apache.thrift.protocol.TList _list1430 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1430.size); + String _elem1431; + for (int _i1432 = 0; _i1432 < _list1430.size; ++_i1432) { - _elem1423 = iprot.readString(); - struct.part_vals.add(_elem1423); + _elem1431 = iprot.readString(); + struct.part_vals.add(_elem1431); } iprot.readListEnd(); } @@ -118426,6 +119839,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -118453,9 +119874,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1425 : struct.part_vals) + for (String _iter1433 : struct.part_vals) { - oprot.writeString(_iter1425); + oprot.writeString(_iter1433); } oprot.writeListEnd(); } @@ -118464,6 +119885,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); oprot.writeI16(struct.max_parts); oprot.writeFieldEnd(); + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -118494,7 +119920,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetMax_parts()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetValidTxnList()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -118504,21 +119933,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1426 : struct.part_vals) + for (String _iter1434 : struct.part_vals) { - oprot.writeString(_iter1426); + oprot.writeString(_iter1434); } } } if (struct.isSetMax_parts()) { oprot.writeI16(struct.max_parts); } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -118529,13 +119961,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1427 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1427.size); - String _elem1428; - for (int _i1429 = 0; _i1429 < _list1427.size; ++_i1429) + org.apache.thrift.protocol.TList _list1435 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1435.size); + String _elem1436; + for (int _i1437 = 0; _i1437 < _list1435.size; ++_i1437) { - _elem1428 = iprot.readString(); - struct.part_vals.add(_elem1428); + _elem1436 = iprot.readString(); + struct.part_vals.add(_elem1436); } } struct.setPart_valsIsSet(true); @@ -118544,6 +119976,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar struct.max_parts = iprot.readI16(); struct.setMax_partsIsSet(true); } + if (incoming.get(4)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -119026,14 +120462,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1430 = iprot.readListBegin(); - struct.success = new ArrayList(_list1430.size); - Partition _elem1431; - for (int _i1432 = 0; _i1432 < _list1430.size; ++_i1432) + org.apache.thrift.protocol.TList _list1438 = iprot.readListBegin(); + struct.success = new ArrayList(_list1438.size); + Partition _elem1439; + for (int _i1440 = 0; _i1440 < _list1438.size; ++_i1440) { - _elem1431 = new Partition(); - _elem1431.read(iprot); - struct.success.add(_elem1431); + _elem1439 = new Partition(); + _elem1439.read(iprot); + struct.success.add(_elem1439); } iprot.readListEnd(); } @@ -119077,9 +120513,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1433 : struct.success) + for (Partition _iter1441 : struct.success) { - _iter1433.write(oprot); + _iter1441.write(oprot); } oprot.writeListEnd(); } @@ -119126,9 +120562,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1434 : struct.success) + for (Partition _iter1442 : struct.success) { - _iter1434.write(oprot); + _iter1442.write(oprot); } } } @@ -119146,14 +120582,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1435 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1435.size); - Partition _elem1436; - for (int _i1437 = 0; _i1437 < _list1435.size; ++_i1437) + org.apache.thrift.protocol.TList _list1443 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1443.size); + Partition _elem1444; + for (int _i1445 = 0; _i1445 < _list1443.size; ++_i1445) { - _elem1436 = new Partition(); - _elem1436.read(iprot); - struct.success.add(_elem1436); + _elem1444 = new Partition(); + _elem1444.read(iprot); + struct.success.add(_elem1444); } } struct.setSuccessIsSet(true); @@ -119182,6 +120618,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I16, (short)4); private static final org.apache.thrift.protocol.TField USER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("user_name", org.apache.thrift.protocol.TType.STRING, (short)5); private static final org.apache.thrift.protocol.TField GROUP_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("group_names", org.apache.thrift.protocol.TType.LIST, (short)6); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)7); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -119195,6 +120632,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re private short max_parts; // required private String user_name; // required private List group_names; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -119203,7 +120641,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re PART_VALS((short)3, "part_vals"), MAX_PARTS((short)4, "max_parts"), USER_NAME((short)5, "user_name"), - GROUP_NAMES((short)6, "group_names"); + GROUP_NAMES((short)6, "group_names"), + VALID_TXN_LIST((short)7, "validTxnList"); private static final Map byName = new HashMap(); @@ -119230,6 +120669,8 @@ public static _Fields findByThriftId(int fieldId) { return USER_NAME; case 6: // GROUP_NAMES return GROUP_NAMES; + case 7: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -119289,6 +120730,8 @@ public String getFieldName() { tmpMap.put(_Fields.GROUP_NAMES, new org.apache.thrift.meta_data.FieldMetaData("group_names", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_ps_with_auth_args.class, metaDataMap); } @@ -119304,7 +120747,8 @@ public get_partitions_ps_with_auth_args( List part_vals, short max_parts, String user_name, - List group_names) + List group_names, + String validTxnList) { this(); this.db_name = db_name; @@ -119314,6 +120758,7 @@ public get_partitions_ps_with_auth_args( setMax_partsIsSet(true); this.user_name = user_name; this.group_names = group_names; + this.validTxnList = validTxnList; } /** @@ -119339,6 +120784,9 @@ public get_partitions_ps_with_auth_args(get_partitions_ps_with_auth_args other) List __this__group_names = new ArrayList(other.group_names); this.group_names = __this__group_names; } + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partitions_ps_with_auth_args deepCopy() { @@ -119354,6 +120802,7 @@ public void clear() { this.user_name = null; this.group_names = null; + this.validTxnList = null; } public String getDb_name() { @@ -119523,6 +120972,29 @@ public void setGroup_namesIsSet(boolean value) { } } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -119573,6 +121045,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -119596,6 +121076,9 @@ public Object getFieldValue(_Fields field) { case GROUP_NAMES: return getGroup_names(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -119619,6 +121102,8 @@ public boolean isSet(_Fields field) { return isSetUser_name(); case GROUP_NAMES: return isSetGroup_names(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -119690,6 +121175,15 @@ public boolean equals(get_partitions_ps_with_auth_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -119727,6 +121221,11 @@ public int hashCode() { if (present_group_names) list.add(group_names); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -119798,6 +121297,16 @@ public int compareTo(get_partitions_ps_with_auth_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -119861,6 +121370,14 @@ public String toString() { sb.append(this.group_names); } first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -119925,13 +121442,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1438 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1438.size); - String _elem1439; - for (int _i1440 = 0; _i1440 < _list1438.size; ++_i1440) + org.apache.thrift.protocol.TList _list1446 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1446.size); + String _elem1447; + for (int _i1448 = 0; _i1448 < _list1446.size; ++_i1448) { - _elem1439 = iprot.readString(); - struct.part_vals.add(_elem1439); + _elem1447 = iprot.readString(); + struct.part_vals.add(_elem1447); } iprot.readListEnd(); } @@ -119959,13 +121476,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1441 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1441.size); - String _elem1442; - for (int _i1443 = 0; _i1443 < _list1441.size; ++_i1443) + org.apache.thrift.protocol.TList _list1449 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1449.size); + String _elem1450; + for (int _i1451 = 0; _i1451 < _list1449.size; ++_i1451) { - _elem1442 = iprot.readString(); - struct.group_names.add(_elem1442); + _elem1450 = iprot.readString(); + struct.group_names.add(_elem1450); } iprot.readListEnd(); } @@ -119974,6 +121491,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -120001,9 +121526,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1444 : struct.part_vals) + for (String _iter1452 : struct.part_vals) { - oprot.writeString(_iter1444); + oprot.writeString(_iter1452); } oprot.writeListEnd(); } @@ -120021,14 +121546,19 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1445 : struct.group_names) + for (String _iter1453 : struct.group_names) { - oprot.writeString(_iter1445); + oprot.writeString(_iter1453); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -120065,7 +121595,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { optionals.set(5); } - oprot.writeBitSet(optionals, 6); + if (struct.isSetValidTxnList()) { + optionals.set(6); + } + oprot.writeBitSet(optionals, 7); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -120075,9 +121608,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1446 : struct.part_vals) + for (String _iter1454 : struct.part_vals) { - oprot.writeString(_iter1446); + oprot.writeString(_iter1454); } } } @@ -120090,18 +121623,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1447 : struct.group_names) + for (String _iter1455 : struct.group_names) { - oprot.writeString(_iter1447); + oprot.writeString(_iter1455); } } } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_with_auth_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(6); + BitSet incoming = iprot.readBitSet(7); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -120112,13 +121648,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1448 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1448.size); - String _elem1449; - for (int _i1450 = 0; _i1450 < _list1448.size; ++_i1450) + org.apache.thrift.protocol.TList _list1456 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1456.size); + String _elem1457; + for (int _i1458 = 0; _i1458 < _list1456.size; ++_i1458) { - _elem1449 = iprot.readString(); - struct.part_vals.add(_elem1449); + _elem1457 = iprot.readString(); + struct.part_vals.add(_elem1457); } } struct.setPart_valsIsSet(true); @@ -120133,17 +121669,21 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1451 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1451.size); - String _elem1452; - for (int _i1453 = 0; _i1453 < _list1451.size; ++_i1453) + org.apache.thrift.protocol.TList _list1459 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1459.size); + String _elem1460; + for (int _i1461 = 0; _i1461 < _list1459.size; ++_i1461) { - _elem1452 = iprot.readString(); - struct.group_names.add(_elem1452); + _elem1460 = iprot.readString(); + struct.group_names.add(_elem1460); } } struct.setGroup_namesIsSet(true); } + if (incoming.get(6)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -120626,14 +122166,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1454 = iprot.readListBegin(); - struct.success = new ArrayList(_list1454.size); - Partition _elem1455; - for (int _i1456 = 0; _i1456 < _list1454.size; ++_i1456) + org.apache.thrift.protocol.TList _list1462 = iprot.readListBegin(); + struct.success = new ArrayList(_list1462.size); + Partition _elem1463; + for (int _i1464 = 0; _i1464 < _list1462.size; ++_i1464) { - _elem1455 = new Partition(); - _elem1455.read(iprot); - struct.success.add(_elem1455); + _elem1463 = new Partition(); + _elem1463.read(iprot); + struct.success.add(_elem1463); } iprot.readListEnd(); } @@ -120677,9 +122217,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1457 : struct.success) + for (Partition _iter1465 : struct.success) { - _iter1457.write(oprot); + _iter1465.write(oprot); } oprot.writeListEnd(); } @@ -120726,9 +122266,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1458 : struct.success) + for (Partition _iter1466 : struct.success) { - _iter1458.write(oprot); + _iter1466.write(oprot); } } } @@ -120746,14 +122286,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1459 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1459.size); - Partition _elem1460; - for (int _i1461 = 0; _i1461 < _list1459.size; ++_i1461) + org.apache.thrift.protocol.TList _list1467 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1467.size); + Partition _elem1468; + for (int _i1469 = 0; _i1469 < _list1467.size; ++_i1469) { - _elem1460 = new Partition(); - _elem1460.read(iprot); - struct.success.add(_elem1460); + _elem1468 = new Partition(); + _elem1468.read(iprot); + struct.success.add(_elem1468); } } struct.setSuccessIsSet(true); @@ -120780,6 +122320,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("part_vals", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I16, (short)4); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -120791,13 +122332,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi private String tbl_name; // required private List part_vals; // required private short max_parts; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), PART_VALS((short)3, "part_vals"), - MAX_PARTS((short)4, "max_parts"); + MAX_PARTS((short)4, "max_parts"), + VALID_TXN_LIST((short)5, "validTxnList"); private static final Map byName = new HashMap(); @@ -120820,6 +122363,8 @@ public static _Fields findByThriftId(int fieldId) { return PART_VALS; case 4: // MAX_PARTS return MAX_PARTS; + case 5: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -120874,6 +122419,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_names_ps_args.class, metaDataMap); } @@ -120887,7 +122434,8 @@ public get_partition_names_ps_args( String db_name, String tbl_name, List part_vals, - short max_parts) + short max_parts, + String validTxnList) { this(); this.db_name = db_name; @@ -120895,6 +122443,7 @@ public get_partition_names_ps_args( this.part_vals = part_vals; this.max_parts = max_parts; setMax_partsIsSet(true); + this.validTxnList = validTxnList; } /** @@ -120913,6 +122462,9 @@ public get_partition_names_ps_args(get_partition_names_ps_args other) { this.part_vals = __this__part_vals; } this.max_parts = other.max_parts; + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partition_names_ps_args deepCopy() { @@ -120926,6 +122478,7 @@ public void clear() { this.part_vals = null; this.max_parts = (short)-1; + this.validTxnList = null; } public String getDb_name() { @@ -121034,6 +122587,29 @@ public void setMax_partsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_PARTS_ISSET_ID, value); } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -121068,6 +122644,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -121085,6 +122669,9 @@ public Object getFieldValue(_Fields field) { case MAX_PARTS: return getMax_parts(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -121104,6 +122691,8 @@ public boolean isSet(_Fields field) { return isSetPart_vals(); case MAX_PARTS: return isSetMax_parts(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -121157,6 +122746,15 @@ public boolean equals(get_partition_names_ps_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -121184,6 +122782,11 @@ public int hashCode() { if (present_max_parts) list.add(max_parts); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -121235,6 +122838,16 @@ public int compareTo(get_partition_names_ps_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -121282,6 +122895,14 @@ public String toString() { sb.append("max_parts:"); sb.append(this.max_parts); first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -121346,13 +122967,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1462 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1462.size); - String _elem1463; - for (int _i1464 = 0; _i1464 < _list1462.size; ++_i1464) + org.apache.thrift.protocol.TList _list1470 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1470.size); + String _elem1471; + for (int _i1472 = 0; _i1472 < _list1470.size; ++_i1472) { - _elem1463 = iprot.readString(); - struct.part_vals.add(_elem1463); + _elem1471 = iprot.readString(); + struct.part_vals.add(_elem1471); } iprot.readListEnd(); } @@ -121369,6 +122990,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -121396,9 +123025,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1465 : struct.part_vals) + for (String _iter1473 : struct.part_vals) { - oprot.writeString(_iter1465); + oprot.writeString(_iter1473); } oprot.writeListEnd(); } @@ -121407,6 +123036,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); oprot.writeI16(struct.max_parts); oprot.writeFieldEnd(); + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -121437,7 +123071,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetMax_parts()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetValidTxnList()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -121447,21 +123084,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1466 : struct.part_vals) + for (String _iter1474 : struct.part_vals) { - oprot.writeString(_iter1466); + oprot.writeString(_iter1474); } } } if (struct.isSetMax_parts()) { oprot.writeI16(struct.max_parts); } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ps_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -121472,13 +123112,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1467 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1467.size); - String _elem1468; - for (int _i1469 = 0; _i1469 < _list1467.size; ++_i1469) + org.apache.thrift.protocol.TList _list1475 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1475.size); + String _elem1476; + for (int _i1477 = 0; _i1477 < _list1475.size; ++_i1477) { - _elem1468 = iprot.readString(); - struct.part_vals.add(_elem1468); + _elem1476 = iprot.readString(); + struct.part_vals.add(_elem1476); } } struct.setPart_valsIsSet(true); @@ -121487,6 +123127,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ struct.max_parts = iprot.readI16(); struct.setMax_partsIsSet(true); } + if (incoming.get(4)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -121966,13 +123610,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1470 = iprot.readListBegin(); - struct.success = new ArrayList(_list1470.size); - String _elem1471; - for (int _i1472 = 0; _i1472 < _list1470.size; ++_i1472) + org.apache.thrift.protocol.TList _list1478 = iprot.readListBegin(); + struct.success = new ArrayList(_list1478.size); + String _elem1479; + for (int _i1480 = 0; _i1480 < _list1478.size; ++_i1480) { - _elem1471 = iprot.readString(); - struct.success.add(_elem1471); + _elem1479 = iprot.readString(); + struct.success.add(_elem1479); } iprot.readListEnd(); } @@ -122016,9 +123660,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1473 : struct.success) + for (String _iter1481 : struct.success) { - oprot.writeString(_iter1473); + oprot.writeString(_iter1481); } oprot.writeListEnd(); } @@ -122065,9 +123709,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1474 : struct.success) + for (String _iter1482 : struct.success) { - oprot.writeString(_iter1474); + oprot.writeString(_iter1482); } } } @@ -122085,13 +123729,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1475 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1475.size); - String _elem1476; - for (int _i1477 = 0; _i1477 < _list1475.size; ++_i1477) + org.apache.thrift.protocol.TList _list1483 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1483.size); + String _elem1484; + for (int _i1485 = 0; _i1485 < _list1483.size; ++_i1485) { - _elem1476 = iprot.readString(); - struct.success.add(_elem1476); + _elem1484 = iprot.readString(); + struct.success.add(_elem1484); } } struct.setSuccessIsSet(true); @@ -122118,6 +123762,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField FILTER_FIELD_DESC = new org.apache.thrift.protocol.TField("filter", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I16, (short)4); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -122129,13 +123774,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ private String tbl_name; // required private String filter; // required private short max_parts; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), FILTER((short)3, "filter"), - MAX_PARTS((short)4, "max_parts"); + MAX_PARTS((short)4, "max_parts"), + VALID_TXN_LIST((short)5, "validTxnList"); private static final Map byName = new HashMap(); @@ -122158,6 +123805,8 @@ public static _Fields findByThriftId(int fieldId) { return FILTER; case 4: // MAX_PARTS return MAX_PARTS; + case 5: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -122211,6 +123860,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_by_filter_args.class, metaDataMap); } @@ -122224,7 +123875,8 @@ public get_partitions_by_filter_args( String db_name, String tbl_name, String filter, - short max_parts) + short max_parts, + String validTxnList) { this(); this.db_name = db_name; @@ -122232,6 +123884,7 @@ public get_partitions_by_filter_args( this.filter = filter; this.max_parts = max_parts; setMax_partsIsSet(true); + this.validTxnList = validTxnList; } /** @@ -122249,6 +123902,9 @@ public get_partitions_by_filter_args(get_partitions_by_filter_args other) { this.filter = other.filter; } this.max_parts = other.max_parts; + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partitions_by_filter_args deepCopy() { @@ -122262,6 +123918,7 @@ public void clear() { this.filter = null; this.max_parts = (short)-1; + this.validTxnList = null; } public String getDb_name() { @@ -122355,6 +124012,29 @@ public void setMax_partsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_PARTS_ISSET_ID, value); } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -122389,6 +124069,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -122406,6 +124094,9 @@ public Object getFieldValue(_Fields field) { case MAX_PARTS: return getMax_parts(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -122425,6 +124116,8 @@ public boolean isSet(_Fields field) { return isSetFilter(); case MAX_PARTS: return isSetMax_parts(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -122478,6 +124171,15 @@ public boolean equals(get_partitions_by_filter_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -122505,6 +124207,11 @@ public int hashCode() { if (present_max_parts) list.add(max_parts); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -122556,6 +124263,16 @@ public int compareTo(get_partitions_by_filter_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -122603,6 +124320,14 @@ public String toString() { sb.append("max_parts:"); sb.append(this.max_parts); first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -122680,6 +124405,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -122711,6 +124444,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); oprot.writeI16(struct.max_parts); oprot.writeFieldEnd(); + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -122741,7 +124479,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetMax_parts()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetValidTxnList()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -122754,12 +124495,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetMax_parts()) { oprot.writeI16(struct.max_parts); } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_filter_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -122776,6 +124520,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi struct.max_parts = iprot.readI16(); struct.setMax_partsIsSet(true); } + if (incoming.get(4)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -123258,14 +125006,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1478 = iprot.readListBegin(); - struct.success = new ArrayList(_list1478.size); - Partition _elem1479; - for (int _i1480 = 0; _i1480 < _list1478.size; ++_i1480) + org.apache.thrift.protocol.TList _list1486 = iprot.readListBegin(); + struct.success = new ArrayList(_list1486.size); + Partition _elem1487; + for (int _i1488 = 0; _i1488 < _list1486.size; ++_i1488) { - _elem1479 = new Partition(); - _elem1479.read(iprot); - struct.success.add(_elem1479); + _elem1487 = new Partition(); + _elem1487.read(iprot); + struct.success.add(_elem1487); } iprot.readListEnd(); } @@ -123309,9 +125057,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1481 : struct.success) + for (Partition _iter1489 : struct.success) { - _iter1481.write(oprot); + _iter1489.write(oprot); } oprot.writeListEnd(); } @@ -123358,9 +125106,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1482 : struct.success) + for (Partition _iter1490 : struct.success) { - _iter1482.write(oprot); + _iter1490.write(oprot); } } } @@ -123378,14 +125126,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1483 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1483.size); - Partition _elem1484; - for (int _i1485 = 0; _i1485 < _list1483.size; ++_i1485) + org.apache.thrift.protocol.TList _list1491 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1491.size); + Partition _elem1492; + for (int _i1493 = 0; _i1493 < _list1491.size; ++_i1493) { - _elem1484 = new Partition(); - _elem1484.read(iprot); - struct.success.add(_elem1484); + _elem1492 = new Partition(); + _elem1492.read(iprot); + struct.success.add(_elem1492); } } struct.setSuccessIsSet(true); @@ -123412,6 +125160,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField FILTER_FIELD_DESC = new org.apache.thrift.protocol.TField("filter", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("max_parts", org.apache.thrift.protocol.TType.I32, (short)4); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -123423,13 +125172,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi private String tbl_name; // required private String filter; // required private int max_parts; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), FILTER((short)3, "filter"), - MAX_PARTS((short)4, "max_parts"); + MAX_PARTS((short)4, "max_parts"), + VALID_TXN_LIST((short)5, "validTxnList"); private static final Map byName = new HashMap(); @@ -123452,6 +125203,8 @@ public static _Fields findByThriftId(int fieldId) { return FILTER; case 4: // MAX_PARTS return MAX_PARTS; + case 5: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -123505,6 +125258,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("max_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_part_specs_by_filter_args.class, metaDataMap); } @@ -123518,7 +125273,8 @@ public get_part_specs_by_filter_args( String db_name, String tbl_name, String filter, - int max_parts) + int max_parts, + String validTxnList) { this(); this.db_name = db_name; @@ -123526,6 +125282,7 @@ public get_part_specs_by_filter_args( this.filter = filter; this.max_parts = max_parts; setMax_partsIsSet(true); + this.validTxnList = validTxnList; } /** @@ -123543,6 +125300,9 @@ public get_part_specs_by_filter_args(get_part_specs_by_filter_args other) { this.filter = other.filter; } this.max_parts = other.max_parts; + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_part_specs_by_filter_args deepCopy() { @@ -123556,6 +125316,7 @@ public void clear() { this.filter = null; this.max_parts = -1; + this.validTxnList = null; } public String getDb_name() { @@ -123649,6 +125410,29 @@ public void setMax_partsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_PARTS_ISSET_ID, value); } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -123683,6 +125467,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -123700,6 +125492,9 @@ public Object getFieldValue(_Fields field) { case MAX_PARTS: return getMax_parts(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -123719,6 +125514,8 @@ public boolean isSet(_Fields field) { return isSetFilter(); case MAX_PARTS: return isSetMax_parts(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -123772,6 +125569,15 @@ public boolean equals(get_part_specs_by_filter_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -123799,6 +125605,11 @@ public int hashCode() { if (present_max_parts) list.add(max_parts); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -123850,6 +125661,16 @@ public int compareTo(get_part_specs_by_filter_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -123897,6 +125718,14 @@ public String toString() { sb.append("max_parts:"); sb.append(this.max_parts); first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -123974,6 +125803,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -124005,6 +125842,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC); oprot.writeI32(struct.max_parts); oprot.writeFieldEnd(); + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -124035,7 +125877,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetMax_parts()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetValidTxnList()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -124048,12 +125893,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetMax_parts()) { oprot.writeI32(struct.max_parts); } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -124070,6 +125918,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi struct.max_parts = iprot.readI32(); struct.setMax_partsIsSet(true); } + if (incoming.get(4)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -124552,14 +126404,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1486 = iprot.readListBegin(); - struct.success = new ArrayList(_list1486.size); - PartitionSpec _elem1487; - for (int _i1488 = 0; _i1488 < _list1486.size; ++_i1488) + org.apache.thrift.protocol.TList _list1494 = iprot.readListBegin(); + struct.success = new ArrayList(_list1494.size); + PartitionSpec _elem1495; + for (int _i1496 = 0; _i1496 < _list1494.size; ++_i1496) { - _elem1487 = new PartitionSpec(); - _elem1487.read(iprot); - struct.success.add(_elem1487); + _elem1495 = new PartitionSpec(); + _elem1495.read(iprot); + struct.success.add(_elem1495); } iprot.readListEnd(); } @@ -124603,9 +126455,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1489 : struct.success) + for (PartitionSpec _iter1497 : struct.success) { - _iter1489.write(oprot); + _iter1497.write(oprot); } oprot.writeListEnd(); } @@ -124652,9 +126504,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1490 : struct.success) + for (PartitionSpec _iter1498 : struct.success) { - _iter1490.write(oprot); + _iter1498.write(oprot); } } } @@ -124672,14 +126524,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1491 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1491.size); - PartitionSpec _elem1492; - for (int _i1493 = 0; _i1493 < _list1491.size; ++_i1493) + org.apache.thrift.protocol.TList _list1499 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1499.size); + PartitionSpec _elem1500; + for (int _i1501 = 0; _i1501 < _list1499.size; ++_i1501) { - _elem1492 = new PartitionSpec(); - _elem1492.read(iprot); - struct.success.add(_elem1492); + _elem1500 = new PartitionSpec(); + _elem1500.read(iprot); + struct.success.add(_elem1500); } } struct.setSuccessIsSet(true); @@ -125643,6 +127495,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_ex private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField FILTER_FIELD_DESC = new org.apache.thrift.protocol.TField("filter", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -125653,12 +127506,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_ex private String db_name; // required private String tbl_name; // required private String filter; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - FILTER((short)3, "filter"); + FILTER((short)3, "filter"), + VALID_TXN_LIST((short)4, "validTxnList"); private static final Map byName = new HashMap(); @@ -125679,6 +127534,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // FILTER return FILTER; + case 4: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -125728,6 +127585,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.FILTER, new org.apache.thrift.meta_data.FieldMetaData("filter", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_num_partitions_by_filter_args.class, metaDataMap); } @@ -125738,12 +127597,14 @@ public get_num_partitions_by_filter_args() { public get_num_partitions_by_filter_args( String db_name, String tbl_name, - String filter) + String filter, + String validTxnList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.filter = filter; + this.validTxnList = validTxnList; } /** @@ -125759,6 +127620,9 @@ public get_num_partitions_by_filter_args(get_num_partitions_by_filter_args other if (other.isSetFilter()) { this.filter = other.filter; } + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_num_partitions_by_filter_args deepCopy() { @@ -125770,6 +127634,7 @@ public void clear() { this.db_name = null; this.tbl_name = null; this.filter = null; + this.validTxnList = null; } public String getDb_name() { @@ -125841,6 +127706,29 @@ public void setFilterIsSet(boolean value) { } } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -125867,6 +127755,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -125881,6 +127777,9 @@ public Object getFieldValue(_Fields field) { case FILTER: return getFilter(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -125898,6 +127797,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case FILTER: return isSetFilter(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -125942,6 +127843,15 @@ public boolean equals(get_num_partitions_by_filter_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -125964,6 +127874,11 @@ public int hashCode() { if (present_filter) list.add(filter); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -126005,6 +127920,16 @@ public int compareTo(get_num_partitions_by_filter_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -126048,6 +127973,14 @@ public String toString() { sb.append(this.filter); } first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -126115,6 +128048,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_num_partitions_ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -126143,6 +128084,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_num_partitions oprot.writeString(struct.filter); oprot.writeFieldEnd(); } + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -126170,7 +128116,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_num_partitions_ if (struct.isSetFilter()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidTxnList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -126180,12 +128129,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_num_partitions_ if (struct.isSetFilter()) { oprot.writeString(struct.filter); } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_num_partitions_by_filter_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -126198,6 +128150,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_num_partitions_b struct.filter = iprot.readString(); struct.setFilterIsSet(true); } + if (incoming.get(3)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -126779,6 +128735,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_num_partitions_b private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("names", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -126789,12 +128746,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_num_partitions_b private String db_name; // required private String tbl_name; // required private List names; // required + private String validTxnList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - NAMES((short)3, "names"); + NAMES((short)3, "names"), + VALID_TXN_LIST((short)4, "validTxnList"); private static final Map byName = new HashMap(); @@ -126815,6 +128774,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // NAMES return NAMES; + case 4: // VALID_TXN_LIST + return VALID_TXN_LIST; default: return null; } @@ -126865,6 +128826,8 @@ public String getFieldName() { tmpMap.put(_Fields.NAMES, new org.apache.thrift.meta_data.FieldMetaData("names", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_by_names_args.class, metaDataMap); } @@ -126875,12 +128838,14 @@ public get_partitions_by_names_args() { public get_partitions_by_names_args( String db_name, String tbl_name, - List names) + List names, + String validTxnList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.names = names; + this.validTxnList = validTxnList; } /** @@ -126897,6 +128862,9 @@ public get_partitions_by_names_args(get_partitions_by_names_args other) { List __this__names = new ArrayList(other.names); this.names = __this__names; } + if (other.isSetValidTxnList()) { + this.validTxnList = other.validTxnList; + } } public get_partitions_by_names_args deepCopy() { @@ -126908,6 +128876,7 @@ public void clear() { this.db_name = null; this.tbl_name = null; this.names = null; + this.validTxnList = null; } public String getDb_name() { @@ -126994,6 +128963,29 @@ public void setNamesIsSet(boolean value) { } } + public String getValidTxnList() { + return this.validTxnList; + } + + public void setValidTxnList(String validTxnList) { + this.validTxnList = validTxnList; + } + + public void unsetValidTxnList() { + this.validTxnList = null; + } + + /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidTxnList() { + return this.validTxnList != null; + } + + public void setValidTxnListIsSet(boolean value) { + if (!value) { + this.validTxnList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -127020,6 +129012,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_TXN_LIST: + if (value == null) { + unsetValidTxnList(); + } else { + setValidTxnList((String)value); + } + break; + } } @@ -127034,6 +129034,9 @@ public Object getFieldValue(_Fields field) { case NAMES: return getNames(); + case VALID_TXN_LIST: + return getValidTxnList(); + } throw new IllegalStateException(); } @@ -127051,6 +129054,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case NAMES: return isSetNames(); + case VALID_TXN_LIST: + return isSetValidTxnList(); } throw new IllegalStateException(); } @@ -127095,6 +129100,15 @@ public boolean equals(get_partitions_by_names_args that) { return false; } + boolean this_present_validTxnList = true && this.isSetValidTxnList(); + boolean that_present_validTxnList = true && that.isSetValidTxnList(); + if (this_present_validTxnList || that_present_validTxnList) { + if (!(this_present_validTxnList && that_present_validTxnList)) + return false; + if (!this.validTxnList.equals(that.validTxnList)) + return false; + } + return true; } @@ -127117,6 +129131,11 @@ public int hashCode() { if (present_names) list.add(names); + boolean present_validTxnList = true && (isSetValidTxnList()); + list.add(present_validTxnList); + if (present_validTxnList) + list.add(validTxnList); + return list.hashCode(); } @@ -127158,6 +129177,16 @@ public int compareTo(get_partitions_by_names_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidTxnList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -127201,6 +129230,14 @@ public String toString() { sb.append(this.names); } first = false; + if (!first) sb.append(", "); + sb.append("validTxnList:"); + if (this.validTxnList == null) { + sb.append("null"); + } else { + sb.append(this.validTxnList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -127263,13 +129300,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1494 = iprot.readListBegin(); - struct.names = new ArrayList(_list1494.size); - String _elem1495; - for (int _i1496 = 0; _i1496 < _list1494.size; ++_i1496) + org.apache.thrift.protocol.TList _list1502 = iprot.readListBegin(); + struct.names = new ArrayList(_list1502.size); + String _elem1503; + for (int _i1504 = 0; _i1504 < _list1502.size; ++_i1504) { - _elem1495 = iprot.readString(); - struct.names.add(_elem1495); + _elem1503 = iprot.readString(); + struct.names.add(_elem1503); } iprot.readListEnd(); } @@ -127278,6 +129315,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_TXN_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -127305,14 +129350,19 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter1497 : struct.names) + for (String _iter1505 : struct.names) { - oprot.writeString(_iter1497); + oprot.writeString(_iter1505); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } + if (struct.validTxnList != null) { + oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC); + oprot.writeString(struct.validTxnList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -127340,7 +129390,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidTxnList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -127350,18 +129403,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter1498 : struct.names) + for (String _iter1506 : struct.names) { - oprot.writeString(_iter1498); + oprot.writeString(_iter1506); } } } + if (struct.isSetValidTxnList()) { + oprot.writeString(struct.validTxnList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_names_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -127372,17 +129428,21 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1499 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list1499.size); - String _elem1500; - for (int _i1501 = 0; _i1501 < _list1499.size; ++_i1501) + org.apache.thrift.protocol.TList _list1507 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list1507.size); + String _elem1508; + for (int _i1509 = 0; _i1509 < _list1507.size; ++_i1509) { - _elem1500 = iprot.readString(); - struct.names.add(_elem1500); + _elem1508 = iprot.readString(); + struct.names.add(_elem1508); } } struct.setNamesIsSet(true); } + if (incoming.get(3)) { + struct.validTxnList = iprot.readString(); + struct.setValidTxnListIsSet(true); + } } } @@ -127865,14 +129925,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1502 = iprot.readListBegin(); - struct.success = new ArrayList(_list1502.size); - Partition _elem1503; - for (int _i1504 = 0; _i1504 < _list1502.size; ++_i1504) + org.apache.thrift.protocol.TList _list1510 = iprot.readListBegin(); + struct.success = new ArrayList(_list1510.size); + Partition _elem1511; + for (int _i1512 = 0; _i1512 < _list1510.size; ++_i1512) { - _elem1503 = new Partition(); - _elem1503.read(iprot); - struct.success.add(_elem1503); + _elem1511 = new Partition(); + _elem1511.read(iprot); + struct.success.add(_elem1511); } iprot.readListEnd(); } @@ -127916,9 +129976,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1505 : struct.success) + for (Partition _iter1513 : struct.success) { - _iter1505.write(oprot); + _iter1513.write(oprot); } oprot.writeListEnd(); } @@ -127965,9 +130025,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1506 : struct.success) + for (Partition _iter1514 : struct.success) { - _iter1506.write(oprot); + _iter1514.write(oprot); } } } @@ -127985,14 +130045,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1507 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1507.size); - Partition _elem1508; - for (int _i1509 = 0; _i1509 < _list1507.size; ++_i1509) + org.apache.thrift.protocol.TList _list1515 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1515.size); + Partition _elem1516; + for (int _i1517 = 0; _i1517 < _list1515.size; ++_i1517) { - _elem1508 = new Partition(); - _elem1508.read(iprot); - struct.success.add(_elem1508); + _elem1516 = new Partition(); + _elem1516.read(iprot); + struct.success.add(_elem1516); } } struct.setSuccessIsSet(true); @@ -130480,14 +132540,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1510 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1510.size); - Partition _elem1511; - for (int _i1512 = 0; _i1512 < _list1510.size; ++_i1512) + org.apache.thrift.protocol.TList _list1518 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1518.size); + Partition _elem1519; + for (int _i1520 = 0; _i1520 < _list1518.size; ++_i1520) { - _elem1511 = new Partition(); - _elem1511.read(iprot); - struct.new_parts.add(_elem1511); + _elem1519 = new Partition(); + _elem1519.read(iprot); + struct.new_parts.add(_elem1519); } iprot.readListEnd(); } @@ -130523,9 +132583,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1513 : struct.new_parts) + for (Partition _iter1521 : struct.new_parts) { - _iter1513.write(oprot); + _iter1521.write(oprot); } oprot.writeListEnd(); } @@ -130568,9 +132628,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1514 : struct.new_parts) + for (Partition _iter1522 : struct.new_parts) { - _iter1514.write(oprot); + _iter1522.write(oprot); } } } @@ -130590,14 +132650,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1515 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1515.size); - Partition _elem1516; - for (int _i1517 = 0; _i1517 < _list1515.size; ++_i1517) + org.apache.thrift.protocol.TList _list1523 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1523.size); + Partition _elem1524; + for (int _i1525 = 0; _i1525 < _list1523.size; ++_i1525) { - _elem1516 = new Partition(); - _elem1516.read(iprot); - struct.new_parts.add(_elem1516); + _elem1524 = new Partition(); + _elem1524.read(iprot); + struct.new_parts.add(_elem1524); } } struct.setNew_partsIsSet(true); @@ -131650,14 +133710,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1518 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1518.size); - Partition _elem1519; - for (int _i1520 = 0; _i1520 < _list1518.size; ++_i1520) + org.apache.thrift.protocol.TList _list1526 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1526.size); + Partition _elem1527; + for (int _i1528 = 0; _i1528 < _list1526.size; ++_i1528) { - _elem1519 = new Partition(); - _elem1519.read(iprot); - struct.new_parts.add(_elem1519); + _elem1527 = new Partition(); + _elem1527.read(iprot); + struct.new_parts.add(_elem1527); } iprot.readListEnd(); } @@ -131702,9 +133762,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1521 : struct.new_parts) + for (Partition _iter1529 : struct.new_parts) { - _iter1521.write(oprot); + _iter1529.write(oprot); } oprot.writeListEnd(); } @@ -131755,9 +133815,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1522 : struct.new_parts) + for (Partition _iter1530 : struct.new_parts) { - _iter1522.write(oprot); + _iter1530.write(oprot); } } } @@ -131780,14 +133840,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1523 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1523.size); - Partition _elem1524; - for (int _i1525 = 0; _i1525 < _list1523.size; ++_i1525) + org.apache.thrift.protocol.TList _list1531 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1531.size); + Partition _elem1532; + for (int _i1533 = 0; _i1533 < _list1531.size; ++_i1533) { - _elem1524 = new Partition(); - _elem1524.read(iprot); - struct.new_parts.add(_elem1524); + _elem1532 = new Partition(); + _elem1532.read(iprot); + struct.new_parts.add(_elem1532); } } struct.setNew_partsIsSet(true); @@ -134926,13 +136986,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1526 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1526.size); - String _elem1527; - for (int _i1528 = 0; _i1528 < _list1526.size; ++_i1528) + org.apache.thrift.protocol.TList _list1534 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1534.size); + String _elem1535; + for (int _i1536 = 0; _i1536 < _list1534.size; ++_i1536) { - _elem1527 = iprot.readString(); - struct.part_vals.add(_elem1527); + _elem1535 = iprot.readString(); + struct.part_vals.add(_elem1535); } iprot.readListEnd(); } @@ -134977,9 +137037,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1529 : struct.part_vals) + for (String _iter1537 : struct.part_vals) { - oprot.writeString(_iter1529); + oprot.writeString(_iter1537); } oprot.writeListEnd(); } @@ -135030,9 +137090,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1530 : struct.part_vals) + for (String _iter1538 : struct.part_vals) { - oprot.writeString(_iter1530); + oprot.writeString(_iter1538); } } } @@ -135055,13 +137115,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1531 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1531.size); - String _elem1532; - for (int _i1533 = 0; _i1533 < _list1531.size; ++_i1533) + org.apache.thrift.protocol.TList _list1539 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1539.size); + String _elem1540; + for (int _i1541 = 0; _i1541 < _list1539.size; ++_i1541) { - _elem1532 = iprot.readString(); - struct.part_vals.add(_elem1532); + _elem1540 = iprot.readString(); + struct.part_vals.add(_elem1540); } } struct.setPart_valsIsSet(true); @@ -136873,13 +138933,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1534 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1534.size); - String _elem1535; - for (int _i1536 = 0; _i1536 < _list1534.size; ++_i1536) + org.apache.thrift.protocol.TList _list1542 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1542.size); + String _elem1543; + for (int _i1544 = 0; _i1544 < _list1542.size; ++_i1544) { - _elem1535 = iprot.readString(); - struct.part_vals.add(_elem1535); + _elem1543 = iprot.readString(); + struct.part_vals.add(_elem1543); } iprot.readListEnd(); } @@ -136913,9 +138973,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1537 : struct.part_vals) + for (String _iter1545 : struct.part_vals) { - oprot.writeString(_iter1537); + oprot.writeString(_iter1545); } oprot.writeListEnd(); } @@ -136952,9 +139012,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1538 : struct.part_vals) + for (String _iter1546 : struct.part_vals) { - oprot.writeString(_iter1538); + oprot.writeString(_iter1546); } } } @@ -136969,13 +139029,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1539 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1539.size); - String _elem1540; - for (int _i1541 = 0; _i1541 < _list1539.size; ++_i1541) + org.apache.thrift.protocol.TList _list1547 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1547.size); + String _elem1548; + for (int _i1549 = 0; _i1549 < _list1547.size; ++_i1549) { - _elem1540 = iprot.readString(); - struct.part_vals.add(_elem1540); + _elem1548 = iprot.readString(); + struct.part_vals.add(_elem1548); } } struct.setPart_valsIsSet(true); @@ -139130,13 +141190,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1542 = iprot.readListBegin(); - struct.success = new ArrayList(_list1542.size); - String _elem1543; - for (int _i1544 = 0; _i1544 < _list1542.size; ++_i1544) + org.apache.thrift.protocol.TList _list1550 = iprot.readListBegin(); + struct.success = new ArrayList(_list1550.size); + String _elem1551; + for (int _i1552 = 0; _i1552 < _list1550.size; ++_i1552) { - _elem1543 = iprot.readString(); - struct.success.add(_elem1543); + _elem1551 = iprot.readString(); + struct.success.add(_elem1551); } iprot.readListEnd(); } @@ -139171,9 +141231,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1545 : struct.success) + for (String _iter1553 : struct.success) { - oprot.writeString(_iter1545); + oprot.writeString(_iter1553); } oprot.writeListEnd(); } @@ -139212,9 +141272,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1546 : struct.success) + for (String _iter1554 : struct.success) { - oprot.writeString(_iter1546); + oprot.writeString(_iter1554); } } } @@ -139229,13 +141289,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1547 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1547.size); - String _elem1548; - for (int _i1549 = 0; _i1549 < _list1547.size; ++_i1549) + org.apache.thrift.protocol.TList _list1555 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1555.size); + String _elem1556; + for (int _i1557 = 0; _i1557 < _list1555.size; ++_i1557) { - _elem1548 = iprot.readString(); - struct.success.add(_elem1548); + _elem1556 = iprot.readString(); + struct.success.add(_elem1556); } } struct.setSuccessIsSet(true); @@ -139998,15 +142058,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1550 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1550.size); - String _key1551; - String _val1552; - for (int _i1553 = 0; _i1553 < _map1550.size; ++_i1553) + org.apache.thrift.protocol.TMap _map1558 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1558.size); + String _key1559; + String _val1560; + for (int _i1561 = 0; _i1561 < _map1558.size; ++_i1561) { - _key1551 = iprot.readString(); - _val1552 = iprot.readString(); - struct.success.put(_key1551, _val1552); + _key1559 = iprot.readString(); + _val1560 = iprot.readString(); + struct.success.put(_key1559, _val1560); } iprot.readMapEnd(); } @@ -140041,10 +142101,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter1554 : struct.success.entrySet()) + for (Map.Entry _iter1562 : struct.success.entrySet()) { - oprot.writeString(_iter1554.getKey()); - oprot.writeString(_iter1554.getValue()); + oprot.writeString(_iter1562.getKey()); + oprot.writeString(_iter1562.getValue()); } oprot.writeMapEnd(); } @@ -140083,10 +142143,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1555 : struct.success.entrySet()) + for (Map.Entry _iter1563 : struct.success.entrySet()) { - oprot.writeString(_iter1555.getKey()); - oprot.writeString(_iter1555.getValue()); + oprot.writeString(_iter1563.getKey()); + oprot.writeString(_iter1563.getValue()); } } } @@ -140101,15 +142161,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1556 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map1556.size); - String _key1557; - String _val1558; - for (int _i1559 = 0; _i1559 < _map1556.size; ++_i1559) + org.apache.thrift.protocol.TMap _map1564 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map1564.size); + String _key1565; + String _val1566; + for (int _i1567 = 0; _i1567 < _map1564.size; ++_i1567) { - _key1557 = iprot.readString(); - _val1558 = iprot.readString(); - struct.success.put(_key1557, _val1558); + _key1565 = iprot.readString(); + _val1566 = iprot.readString(); + struct.success.put(_key1565, _val1566); } } struct.setSuccessIsSet(true); @@ -140704,15 +142764,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1560 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1560.size); - String _key1561; - String _val1562; - for (int _i1563 = 0; _i1563 < _map1560.size; ++_i1563) + org.apache.thrift.protocol.TMap _map1568 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1568.size); + String _key1569; + String _val1570; + for (int _i1571 = 0; _i1571 < _map1568.size; ++_i1571) { - _key1561 = iprot.readString(); - _val1562 = iprot.readString(); - struct.part_vals.put(_key1561, _val1562); + _key1569 = iprot.readString(); + _val1570 = iprot.readString(); + struct.part_vals.put(_key1569, _val1570); } iprot.readMapEnd(); } @@ -140756,10 +142816,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1564 : struct.part_vals.entrySet()) + for (Map.Entry _iter1572 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1564.getKey()); - oprot.writeString(_iter1564.getValue()); + oprot.writeString(_iter1572.getKey()); + oprot.writeString(_iter1572.getValue()); } oprot.writeMapEnd(); } @@ -140810,10 +142870,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1565 : struct.part_vals.entrySet()) + for (Map.Entry _iter1573 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1565.getKey()); - oprot.writeString(_iter1565.getValue()); + oprot.writeString(_iter1573.getKey()); + oprot.writeString(_iter1573.getValue()); } } } @@ -140836,15 +142896,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1566 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1566.size); - String _key1567; - String _val1568; - for (int _i1569 = 0; _i1569 < _map1566.size; ++_i1569) + org.apache.thrift.protocol.TMap _map1574 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1574.size); + String _key1575; + String _val1576; + for (int _i1577 = 0; _i1577 < _map1574.size; ++_i1577) { - _key1567 = iprot.readString(); - _val1568 = iprot.readString(); - struct.part_vals.put(_key1567, _val1568); + _key1575 = iprot.readString(); + _val1576 = iprot.readString(); + struct.part_vals.put(_key1575, _val1576); } } struct.setPart_valsIsSet(true); @@ -142328,15 +144388,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1570 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1570.size); - String _key1571; - String _val1572; - for (int _i1573 = 0; _i1573 < _map1570.size; ++_i1573) + org.apache.thrift.protocol.TMap _map1578 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1578.size); + String _key1579; + String _val1580; + for (int _i1581 = 0; _i1581 < _map1578.size; ++_i1581) { - _key1571 = iprot.readString(); - _val1572 = iprot.readString(); - struct.part_vals.put(_key1571, _val1572); + _key1579 = iprot.readString(); + _val1580 = iprot.readString(); + struct.part_vals.put(_key1579, _val1580); } iprot.readMapEnd(); } @@ -142380,10 +144440,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1574 : struct.part_vals.entrySet()) + for (Map.Entry _iter1582 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1574.getKey()); - oprot.writeString(_iter1574.getValue()); + oprot.writeString(_iter1582.getKey()); + oprot.writeString(_iter1582.getValue()); } oprot.writeMapEnd(); } @@ -142434,10 +144494,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1575 : struct.part_vals.entrySet()) + for (Map.Entry _iter1583 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1575.getKey()); - oprot.writeString(_iter1575.getValue()); + oprot.writeString(_iter1583.getKey()); + oprot.writeString(_iter1583.getValue()); } } } @@ -142460,15 +144520,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1576 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1576.size); - String _key1577; - String _val1578; - for (int _i1579 = 0; _i1579 < _map1576.size; ++_i1579) + org.apache.thrift.protocol.TMap _map1584 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1584.size); + String _key1585; + String _val1586; + for (int _i1587 = 0; _i1587 < _map1584.size; ++_i1587) { - _key1577 = iprot.readString(); - _val1578 = iprot.readString(); - struct.part_vals.put(_key1577, _val1578); + _key1585 = iprot.readString(); + _val1586 = iprot.readString(); + struct.part_vals.put(_key1585, _val1586); } } struct.setPart_valsIsSet(true); @@ -153700,6 +155760,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_col private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField COL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("col_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -153710,12 +155771,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_col private String db_name; // required private String tbl_name; // required private String col_name; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - COL_NAME((short)3, "col_name"); + COL_NAME((short)3, "col_name"), + VALID_WRITE_ID_LIST((short)4, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -153736,6 +155799,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // COL_NAME return COL_NAME; + case 4: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -153785,6 +155850,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.COL_NAME, new org.apache.thrift.meta_data.FieldMetaData("col_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_table_column_statistics_args.class, metaDataMap); } @@ -153795,12 +155862,14 @@ public get_table_column_statistics_args() { public get_table_column_statistics_args( String db_name, String tbl_name, - String col_name) + String col_name, + String validWriteIdList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.col_name = col_name; + this.validWriteIdList = validWriteIdList; } /** @@ -153816,6 +155885,9 @@ public get_table_column_statistics_args(get_table_column_statistics_args other) if (other.isSetCol_name()) { this.col_name = other.col_name; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public get_table_column_statistics_args deepCopy() { @@ -153827,6 +155899,7 @@ public void clear() { this.db_name = null; this.tbl_name = null; this.col_name = null; + this.validWriteIdList = null; } public String getDb_name() { @@ -153898,6 +155971,29 @@ public void setCol_nameIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -153924,6 +156020,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -153938,6 +156042,9 @@ public Object getFieldValue(_Fields field) { case COL_NAME: return getCol_name(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -153955,6 +156062,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case COL_NAME: return isSetCol_name(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -153999,6 +156108,15 @@ public boolean equals(get_table_column_statistics_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -154021,6 +156139,11 @@ public int hashCode() { if (present_col_name) list.add(col_name); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -154062,6 +156185,16 @@ public int compareTo(get_table_column_statistics_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -154105,6 +156238,14 @@ public String toString() { sb.append(this.col_name); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -154172,6 +156313,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_column_st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -154200,6 +156349,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_column_s oprot.writeString(struct.col_name); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -154227,7 +156381,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_column_st if (struct.isSetCol_name()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidWriteIdList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -154237,12 +156394,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_column_st if (struct.isSetCol_name()) { oprot.writeString(struct.col_name); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_table_column_statistics_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -154255,6 +156415,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_column_sta struct.col_name = iprot.readString(); struct.setCol_nameIsSet(true); } + if (incoming.get(3)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -155054,6 +157218,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_column_sta private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField PART_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("part_name", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField COL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("col_name", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -155065,13 +157230,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_column_sta private String tbl_name; // required private String part_name; // required private String col_name; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), PART_NAME((short)3, "part_name"), - COL_NAME((short)4, "col_name"); + COL_NAME((short)4, "col_name"), + VALID_WRITE_ID_LIST((short)5, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -155094,6 +157261,8 @@ public static _Fields findByThriftId(int fieldId) { return PART_NAME; case 4: // COL_NAME return COL_NAME; + case 5: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -155145,6 +157314,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.COL_NAME, new org.apache.thrift.meta_data.FieldMetaData("col_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partition_column_statistics_args.class, metaDataMap); } @@ -155156,13 +157327,15 @@ public get_partition_column_statistics_args( String db_name, String tbl_name, String part_name, - String col_name) + String col_name, + String validWriteIdList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.part_name = part_name; this.col_name = col_name; + this.validWriteIdList = validWriteIdList; } /** @@ -155181,6 +157354,9 @@ public get_partition_column_statistics_args(get_partition_column_statistics_args if (other.isSetCol_name()) { this.col_name = other.col_name; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public get_partition_column_statistics_args deepCopy() { @@ -155193,6 +157369,7 @@ public void clear() { this.tbl_name = null; this.part_name = null; this.col_name = null; + this.validWriteIdList = null; } public String getDb_name() { @@ -155287,6 +157464,29 @@ public void setCol_nameIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -155321,6 +157521,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -155338,6 +157546,9 @@ public Object getFieldValue(_Fields field) { case COL_NAME: return getCol_name(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -155357,6 +157568,8 @@ public boolean isSet(_Fields field) { return isSetPart_name(); case COL_NAME: return isSetCol_name(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -155410,6 +157623,15 @@ public boolean equals(get_partition_column_statistics_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -155437,6 +157659,11 @@ public int hashCode() { if (present_col_name) list.add(col_name); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -155488,6 +157715,16 @@ public int compareTo(get_partition_column_statistics_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -155539,6 +157776,14 @@ public String toString() { sb.append(this.col_name); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -155614,6 +157859,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_colum org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -155647,6 +157900,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_colu oprot.writeString(struct.col_name); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -155677,7 +157935,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_colum if (struct.isSetCol_name()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetValidWriteIdList()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -155690,12 +157951,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_colum if (struct.isSetCol_name()) { oprot.writeString(struct.col_name); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_column_statistics_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -155712,6 +157976,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_column struct.col_name = iprot.readString(); struct.setCol_nameIsSet(true); } + if (incoming.get(4)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -167124,13 +169392,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1580 = iprot.readListBegin(); - struct.success = new ArrayList(_list1580.size); - String _elem1581; - for (int _i1582 = 0; _i1582 < _list1580.size; ++_i1582) + org.apache.thrift.protocol.TList _list1588 = iprot.readListBegin(); + struct.success = new ArrayList(_list1588.size); + String _elem1589; + for (int _i1590 = 0; _i1590 < _list1588.size; ++_i1590) { - _elem1581 = iprot.readString(); - struct.success.add(_elem1581); + _elem1589 = iprot.readString(); + struct.success.add(_elem1589); } iprot.readListEnd(); } @@ -167165,9 +169433,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1583 : struct.success) + for (String _iter1591 : struct.success) { - oprot.writeString(_iter1583); + oprot.writeString(_iter1591); } oprot.writeListEnd(); } @@ -167206,9 +169474,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1584 : struct.success) + for (String _iter1592 : struct.success) { - oprot.writeString(_iter1584); + oprot.writeString(_iter1592); } } } @@ -167223,13 +169491,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1585 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1585.size); - String _elem1586; - for (int _i1587 = 0; _i1587 < _list1585.size; ++_i1587) + org.apache.thrift.protocol.TList _list1593 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1593.size); + String _elem1594; + for (int _i1595 = 0; _i1595 < _list1593.size; ++_i1595) { - _elem1586 = iprot.readString(); - struct.success.add(_elem1586); + _elem1594 = iprot.readString(); + struct.success.add(_elem1594); } } struct.setSuccessIsSet(true); @@ -171284,13 +173552,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1588 = iprot.readListBegin(); - struct.success = new ArrayList(_list1588.size); - String _elem1589; - for (int _i1590 = 0; _i1590 < _list1588.size; ++_i1590) + org.apache.thrift.protocol.TList _list1596 = iprot.readListBegin(); + struct.success = new ArrayList(_list1596.size); + String _elem1597; + for (int _i1598 = 0; _i1598 < _list1596.size; ++_i1598) { - _elem1589 = iprot.readString(); - struct.success.add(_elem1589); + _elem1597 = iprot.readString(); + struct.success.add(_elem1597); } iprot.readListEnd(); } @@ -171325,9 +173593,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1591 : struct.success) + for (String _iter1599 : struct.success) { - oprot.writeString(_iter1591); + oprot.writeString(_iter1599); } oprot.writeListEnd(); } @@ -171366,9 +173634,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1592 : struct.success) + for (String _iter1600 : struct.success) { - oprot.writeString(_iter1592); + oprot.writeString(_iter1600); } } } @@ -171383,13 +173651,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1593 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1593.size); - String _elem1594; - for (int _i1595 = 0; _i1595 < _list1593.size; ++_i1595) + org.apache.thrift.protocol.TList _list1601 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1601.size); + String _elem1602; + for (int _i1603 = 0; _i1603 < _list1601.size; ++_i1603) { - _elem1594 = iprot.readString(); - struct.success.add(_elem1594); + _elem1602 = iprot.readString(); + struct.success.add(_elem1602); } } struct.setSuccessIsSet(true); @@ -174680,14 +176948,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1596 = iprot.readListBegin(); - struct.success = new ArrayList(_list1596.size); - Role _elem1597; - for (int _i1598 = 0; _i1598 < _list1596.size; ++_i1598) + org.apache.thrift.protocol.TList _list1604 = iprot.readListBegin(); + struct.success = new ArrayList(_list1604.size); + Role _elem1605; + for (int _i1606 = 0; _i1606 < _list1604.size; ++_i1606) { - _elem1597 = new Role(); - _elem1597.read(iprot); - struct.success.add(_elem1597); + _elem1605 = new Role(); + _elem1605.read(iprot); + struct.success.add(_elem1605); } iprot.readListEnd(); } @@ -174722,9 +176990,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1599 : struct.success) + for (Role _iter1607 : struct.success) { - _iter1599.write(oprot); + _iter1607.write(oprot); } oprot.writeListEnd(); } @@ -174763,9 +177031,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1600 : struct.success) + for (Role _iter1608 : struct.success) { - _iter1600.write(oprot); + _iter1608.write(oprot); } } } @@ -174780,14 +177048,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1601 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1601.size); - Role _elem1602; - for (int _i1603 = 0; _i1603 < _list1601.size; ++_i1603) + org.apache.thrift.protocol.TList _list1609 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1609.size); + Role _elem1610; + for (int _i1611 = 0; _i1611 < _list1609.size; ++_i1611) { - _elem1602 = new Role(); - _elem1602.read(iprot); - struct.success.add(_elem1602); + _elem1610 = new Role(); + _elem1610.read(iprot); + struct.success.add(_elem1610); } } struct.setSuccessIsSet(true); @@ -177792,13 +180060,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1604 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1604.size); - String _elem1605; - for (int _i1606 = 0; _i1606 < _list1604.size; ++_i1606) + org.apache.thrift.protocol.TList _list1612 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1612.size); + String _elem1613; + for (int _i1614 = 0; _i1614 < _list1612.size; ++_i1614) { - _elem1605 = iprot.readString(); - struct.group_names.add(_elem1605); + _elem1613 = iprot.readString(); + struct.group_names.add(_elem1613); } iprot.readListEnd(); } @@ -177834,9 +180102,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1607 : struct.group_names) + for (String _iter1615 : struct.group_names) { - oprot.writeString(_iter1607); + oprot.writeString(_iter1615); } oprot.writeListEnd(); } @@ -177879,9 +180147,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1608 : struct.group_names) + for (String _iter1616 : struct.group_names) { - oprot.writeString(_iter1608); + oprot.writeString(_iter1616); } } } @@ -177902,13 +180170,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1609 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1609.size); - String _elem1610; - for (int _i1611 = 0; _i1611 < _list1609.size; ++_i1611) + org.apache.thrift.protocol.TList _list1617 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1617.size); + String _elem1618; + for (int _i1619 = 0; _i1619 < _list1617.size; ++_i1619) { - _elem1610 = iprot.readString(); - struct.group_names.add(_elem1610); + _elem1618 = iprot.readString(); + struct.group_names.add(_elem1618); } } struct.setGroup_namesIsSet(true); @@ -179366,14 +181634,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1612 = iprot.readListBegin(); - struct.success = new ArrayList(_list1612.size); - HiveObjectPrivilege _elem1613; - for (int _i1614 = 0; _i1614 < _list1612.size; ++_i1614) + org.apache.thrift.protocol.TList _list1620 = iprot.readListBegin(); + struct.success = new ArrayList(_list1620.size); + HiveObjectPrivilege _elem1621; + for (int _i1622 = 0; _i1622 < _list1620.size; ++_i1622) { - _elem1613 = new HiveObjectPrivilege(); - _elem1613.read(iprot); - struct.success.add(_elem1613); + _elem1621 = new HiveObjectPrivilege(); + _elem1621.read(iprot); + struct.success.add(_elem1621); } iprot.readListEnd(); } @@ -179408,9 +181676,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1615 : struct.success) + for (HiveObjectPrivilege _iter1623 : struct.success) { - _iter1615.write(oprot); + _iter1623.write(oprot); } oprot.writeListEnd(); } @@ -179449,9 +181717,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1616 : struct.success) + for (HiveObjectPrivilege _iter1624 : struct.success) { - _iter1616.write(oprot); + _iter1624.write(oprot); } } } @@ -179466,14 +181734,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1617 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1617.size); - HiveObjectPrivilege _elem1618; - for (int _i1619 = 0; _i1619 < _list1617.size; ++_i1619) + org.apache.thrift.protocol.TList _list1625 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1625.size); + HiveObjectPrivilege _elem1626; + for (int _i1627 = 0; _i1627 < _list1625.size; ++_i1627) { - _elem1618 = new HiveObjectPrivilege(); - _elem1618.read(iprot); - struct.success.add(_elem1618); + _elem1626 = new HiveObjectPrivilege(); + _elem1626.read(iprot); + struct.success.add(_elem1626); } } struct.setSuccessIsSet(true); @@ -183420,13 +185688,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1620 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1620.size); - String _elem1621; - for (int _i1622 = 0; _i1622 < _list1620.size; ++_i1622) + org.apache.thrift.protocol.TList _list1628 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1628.size); + String _elem1629; + for (int _i1630 = 0; _i1630 < _list1628.size; ++_i1630) { - _elem1621 = iprot.readString(); - struct.group_names.add(_elem1621); + _elem1629 = iprot.readString(); + struct.group_names.add(_elem1629); } iprot.readListEnd(); } @@ -183457,9 +185725,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1623 : struct.group_names) + for (String _iter1631 : struct.group_names) { - oprot.writeString(_iter1623); + oprot.writeString(_iter1631); } oprot.writeListEnd(); } @@ -183496,9 +185764,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1624 : struct.group_names) + for (String _iter1632 : struct.group_names) { - oprot.writeString(_iter1624); + oprot.writeString(_iter1632); } } } @@ -183514,13 +185782,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1625 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1625.size); - String _elem1626; - for (int _i1627 = 0; _i1627 < _list1625.size; ++_i1627) + org.apache.thrift.protocol.TList _list1633 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1633.size); + String _elem1634; + for (int _i1635 = 0; _i1635 < _list1633.size; ++_i1635) { - _elem1626 = iprot.readString(); - struct.group_names.add(_elem1626); + _elem1634 = iprot.readString(); + struct.group_names.add(_elem1634); } } struct.setGroup_namesIsSet(true); @@ -183923,13 +186191,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1628 = iprot.readListBegin(); - struct.success = new ArrayList(_list1628.size); - String _elem1629; - for (int _i1630 = 0; _i1630 < _list1628.size; ++_i1630) + org.apache.thrift.protocol.TList _list1636 = iprot.readListBegin(); + struct.success = new ArrayList(_list1636.size); + String _elem1637; + for (int _i1638 = 0; _i1638 < _list1636.size; ++_i1638) { - _elem1629 = iprot.readString(); - struct.success.add(_elem1629); + _elem1637 = iprot.readString(); + struct.success.add(_elem1637); } iprot.readListEnd(); } @@ -183964,9 +186232,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1631 : struct.success) + for (String _iter1639 : struct.success) { - oprot.writeString(_iter1631); + oprot.writeString(_iter1639); } oprot.writeListEnd(); } @@ -184005,9 +186273,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1632 : struct.success) + for (String _iter1640 : struct.success) { - oprot.writeString(_iter1632); + oprot.writeString(_iter1640); } } } @@ -184022,13 +186290,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1633 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1633.size); - String _elem1634; - for (int _i1635 = 0; _i1635 < _list1633.size; ++_i1635) + org.apache.thrift.protocol.TList _list1641 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1641.size); + String _elem1642; + for (int _i1643 = 0; _i1643 < _list1641.size; ++_i1643) { - _elem1634 = iprot.readString(); - struct.success.add(_elem1634); + _elem1642 = iprot.readString(); + struct.success.add(_elem1642); } } struct.setSuccessIsSet(true); @@ -189319,13 +191587,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1636 = iprot.readListBegin(); - struct.success = new ArrayList(_list1636.size); - String _elem1637; - for (int _i1638 = 0; _i1638 < _list1636.size; ++_i1638) + org.apache.thrift.protocol.TList _list1644 = iprot.readListBegin(); + struct.success = new ArrayList(_list1644.size); + String _elem1645; + for (int _i1646 = 0; _i1646 < _list1644.size; ++_i1646) { - _elem1637 = iprot.readString(); - struct.success.add(_elem1637); + _elem1645 = iprot.readString(); + struct.success.add(_elem1645); } iprot.readListEnd(); } @@ -189351,9 +191619,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1639 : struct.success) + for (String _iter1647 : struct.success) { - oprot.writeString(_iter1639); + oprot.writeString(_iter1647); } oprot.writeListEnd(); } @@ -189384,9 +191652,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1640 : struct.success) + for (String _iter1648 : struct.success) { - oprot.writeString(_iter1640); + oprot.writeString(_iter1648); } } } @@ -189398,13 +191666,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1641 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1641.size); - String _elem1642; - for (int _i1643 = 0; _i1643 < _list1641.size; ++_i1643) + org.apache.thrift.protocol.TList _list1649 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1649.size); + String _elem1650; + for (int _i1651 = 0; _i1651 < _list1649.size; ++_i1651) { - _elem1642 = iprot.readString(); - struct.success.add(_elem1642); + _elem1650 = iprot.readString(); + struct.success.add(_elem1650); } } struct.setSuccessIsSet(true); @@ -192434,13 +194702,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1644 = iprot.readListBegin(); - struct.success = new ArrayList(_list1644.size); - String _elem1645; - for (int _i1646 = 0; _i1646 < _list1644.size; ++_i1646) + org.apache.thrift.protocol.TList _list1652 = iprot.readListBegin(); + struct.success = new ArrayList(_list1652.size); + String _elem1653; + for (int _i1654 = 0; _i1654 < _list1652.size; ++_i1654) { - _elem1645 = iprot.readString(); - struct.success.add(_elem1645); + _elem1653 = iprot.readString(); + struct.success.add(_elem1653); } iprot.readListEnd(); } @@ -192466,9 +194734,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1647 : struct.success) + for (String _iter1655 : struct.success) { - oprot.writeString(_iter1647); + oprot.writeString(_iter1655); } oprot.writeListEnd(); } @@ -192499,9 +194767,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1648 : struct.success) + for (String _iter1656 : struct.success) { - oprot.writeString(_iter1648); + oprot.writeString(_iter1656); } } } @@ -192513,13 +194781,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1649 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1649.size); - String _elem1650; - for (int _i1651 = 0; _i1651 < _list1649.size; ++_i1651) + org.apache.thrift.protocol.TList _list1657 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1657.size); + String _elem1658; + for (int _i1659 = 0; _i1659 < _list1657.size; ++_i1659) { - _elem1650 = iprot.readString(); - struct.success.add(_elem1650); + _elem1658 = iprot.readString(); + struct.success.add(_elem1658); } } struct.setSuccessIsSet(true); @@ -209640,13 +211908,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, find_columns_with_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1652 = iprot.readListBegin(); - struct.success = new ArrayList(_list1652.size); - String _elem1653; - for (int _i1654 = 0; _i1654 < _list1652.size; ++_i1654) + org.apache.thrift.protocol.TList _list1660 = iprot.readListBegin(); + struct.success = new ArrayList(_list1660.size); + String _elem1661; + for (int _i1662 = 0; _i1662 < _list1660.size; ++_i1662) { - _elem1653 = iprot.readString(); - struct.success.add(_elem1653); + _elem1661 = iprot.readString(); + struct.success.add(_elem1661); } iprot.readListEnd(); } @@ -209672,9 +211940,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, find_columns_with_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1655 : struct.success) + for (String _iter1663 : struct.success) { - oprot.writeString(_iter1655); + oprot.writeString(_iter1663); } oprot.writeListEnd(); } @@ -209705,9 +211973,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, find_columns_with_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1656 : struct.success) + for (String _iter1664 : struct.success) { - oprot.writeString(_iter1656); + oprot.writeString(_iter1664); } } } @@ -209719,13 +211987,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, find_columns_with_st BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1657 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1657.size); - String _elem1658; - for (int _i1659 = 0; _i1659 < _list1657.size; ++_i1659) + org.apache.thrift.protocol.TList _list1665 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1665.size); + String _elem1666; + for (int _i1667 = 0; _i1667 < _list1665.size; ++_i1667) { - _elem1658 = iprot.readString(); - struct.success.add(_elem1658); + _elem1666 = iprot.readString(); + struct.success.add(_elem1666); } } struct.setSuccessIsSet(true); @@ -246611,14 +248879,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_all_vers case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1660 = iprot.readListBegin(); - struct.success = new ArrayList(_list1660.size); - SchemaVersion _elem1661; - for (int _i1662 = 0; _i1662 < _list1660.size; ++_i1662) + org.apache.thrift.protocol.TList _list1668 = iprot.readListBegin(); + struct.success = new ArrayList(_list1668.size); + SchemaVersion _elem1669; + for (int _i1670 = 0; _i1670 < _list1668.size; ++_i1670) { - _elem1661 = new SchemaVersion(); - _elem1661.read(iprot); - struct.success.add(_elem1661); + _elem1669 = new SchemaVersion(); + _elem1669.read(iprot); + struct.success.add(_elem1669); } iprot.readListEnd(); } @@ -246662,9 +248930,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_all_ver oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (SchemaVersion _iter1663 : struct.success) + for (SchemaVersion _iter1671 : struct.success) { - _iter1663.write(oprot); + _iter1671.write(oprot); } oprot.writeListEnd(); } @@ -246711,9 +248979,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_all_vers if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (SchemaVersion _iter1664 : struct.success) + for (SchemaVersion _iter1672 : struct.success) { - _iter1664.write(oprot); + _iter1672.write(oprot); } } } @@ -246731,14 +248999,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_all_versi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1665 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1665.size); - SchemaVersion _elem1666; - for (int _i1667 = 0; _i1667 < _list1665.size; ++_i1667) + org.apache.thrift.protocol.TList _list1673 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1673.size); + SchemaVersion _elem1674; + for (int _i1675 = 0; _i1675 < _list1673.size; ++_i1675) { - _elem1666 = new SchemaVersion(); - _elem1666.read(iprot); - struct.success.add(_elem1666); + _elem1674 = new SchemaVersion(); + _elem1674.read(iprot); + struct.success.add(_elem1674); } } struct.setSuccessIsSet(true); @@ -255281,14 +257549,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_runtime_stats_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1668 = iprot.readListBegin(); - struct.success = new ArrayList(_list1668.size); - RuntimeStat _elem1669; - for (int _i1670 = 0; _i1670 < _list1668.size; ++_i1670) + org.apache.thrift.protocol.TList _list1676 = iprot.readListBegin(); + struct.success = new ArrayList(_list1676.size); + RuntimeStat _elem1677; + for (int _i1678 = 0; _i1678 < _list1676.size; ++_i1678) { - _elem1669 = new RuntimeStat(); - _elem1669.read(iprot); - struct.success.add(_elem1669); + _elem1677 = new RuntimeStat(); + _elem1677.read(iprot); + struct.success.add(_elem1677); } iprot.readListEnd(); } @@ -255323,9 +257591,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_runtime_stats_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (RuntimeStat _iter1671 : struct.success) + for (RuntimeStat _iter1679 : struct.success) { - _iter1671.write(oprot); + _iter1679.write(oprot); } oprot.writeListEnd(); } @@ -255364,9 +257632,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (RuntimeStat _iter1672 : struct.success) + for (RuntimeStat _iter1680 : struct.success) { - _iter1672.write(oprot); + _iter1680.write(oprot); } } } @@ -255381,14 +257649,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1673 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1673.size); - RuntimeStat _elem1674; - for (int _i1675 = 0; _i1675 < _list1673.size; ++_i1675) + org.apache.thrift.protocol.TList _list1681 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1681.size); + RuntimeStat _elem1682; + for (int _i1683 = 0; _i1683 < _list1681.size; ++_i1683) { - _elem1674 = new RuntimeStat(); - _elem1674.read(iprot); - struct.success.add(_elem1674); + _elem1682 = new RuntimeStat(); + _elem1682.read(iprot); + struct.success.add(_elem1682); } } struct.setSuccessIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java index 74828dfa8c..53f517b539 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java @@ -755,14 +755,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 2: // POOLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list944 = iprot.readListBegin(); - struct.pools = new ArrayList(_list944.size); - WMPool _elem945; - for (int _i946 = 0; _i946 < _list944.size; ++_i946) + org.apache.thrift.protocol.TList _list952 = iprot.readListBegin(); + struct.pools = new ArrayList(_list952.size); + WMPool _elem953; + for (int _i954 = 0; _i954 < _list952.size; ++_i954) { - _elem945 = new WMPool(); - _elem945.read(iprot); - struct.pools.add(_elem945); + _elem953 = new WMPool(); + _elem953.read(iprot); + struct.pools.add(_elem953); } iprot.readListEnd(); } @@ -774,14 +774,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 3: // MAPPINGS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list947 = iprot.readListBegin(); - struct.mappings = new ArrayList(_list947.size); - WMMapping _elem948; - for (int _i949 = 0; _i949 < _list947.size; ++_i949) + org.apache.thrift.protocol.TList _list955 = iprot.readListBegin(); + struct.mappings = new ArrayList(_list955.size); + WMMapping _elem956; + for (int _i957 = 0; _i957 < _list955.size; ++_i957) { - _elem948 = new WMMapping(); - _elem948.read(iprot); - struct.mappings.add(_elem948); + _elem956 = new WMMapping(); + _elem956.read(iprot); + struct.mappings.add(_elem956); } iprot.readListEnd(); } @@ -793,14 +793,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 4: // TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list950 = iprot.readListBegin(); - struct.triggers = new ArrayList(_list950.size); - WMTrigger _elem951; - for (int _i952 = 0; _i952 < _list950.size; ++_i952) + org.apache.thrift.protocol.TList _list958 = iprot.readListBegin(); + struct.triggers = new ArrayList(_list958.size); + WMTrigger _elem959; + for (int _i960 = 0; _i960 < _list958.size; ++_i960) { - _elem951 = new WMTrigger(); - _elem951.read(iprot); - struct.triggers.add(_elem951); + _elem959 = new WMTrigger(); + _elem959.read(iprot); + struct.triggers.add(_elem959); } iprot.readListEnd(); } @@ -812,14 +812,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 5: // POOL_TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list953 = iprot.readListBegin(); - struct.poolTriggers = new ArrayList(_list953.size); - WMPoolTrigger _elem954; - for (int _i955 = 0; _i955 < _list953.size; ++_i955) + org.apache.thrift.protocol.TList _list961 = iprot.readListBegin(); + struct.poolTriggers = new ArrayList(_list961.size); + WMPoolTrigger _elem962; + for (int _i963 = 0; _i963 < _list961.size; ++_i963) { - _elem954 = new WMPoolTrigger(); - _elem954.read(iprot); - struct.poolTriggers.add(_elem954); + _elem962 = new WMPoolTrigger(); + _elem962.read(iprot); + struct.poolTriggers.add(_elem962); } iprot.readListEnd(); } @@ -850,9 +850,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(POOLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.pools.size())); - for (WMPool _iter956 : struct.pools) + for (WMPool _iter964 : struct.pools) { - _iter956.write(oprot); + _iter964.write(oprot); } oprot.writeListEnd(); } @@ -863,9 +863,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(MAPPINGS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.mappings.size())); - for (WMMapping _iter957 : struct.mappings) + for (WMMapping _iter965 : struct.mappings) { - _iter957.write(oprot); + _iter965.write(oprot); } oprot.writeListEnd(); } @@ -877,9 +877,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size())); - for (WMTrigger _iter958 : struct.triggers) + for (WMTrigger _iter966 : struct.triggers) { - _iter958.write(oprot); + _iter966.write(oprot); } oprot.writeListEnd(); } @@ -891,9 +891,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(POOL_TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.poolTriggers.size())); - for (WMPoolTrigger _iter959 : struct.poolTriggers) + for (WMPoolTrigger _iter967 : struct.poolTriggers) { - _iter959.write(oprot); + _iter967.write(oprot); } oprot.writeListEnd(); } @@ -920,9 +920,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan struct.plan.write(oprot); { oprot.writeI32(struct.pools.size()); - for (WMPool _iter960 : struct.pools) + for (WMPool _iter968 : struct.pools) { - _iter960.write(oprot); + _iter968.write(oprot); } } BitSet optionals = new BitSet(); @@ -939,27 +939,27 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan if (struct.isSetMappings()) { { oprot.writeI32(struct.mappings.size()); - for (WMMapping _iter961 : struct.mappings) + for (WMMapping _iter969 : struct.mappings) { - _iter961.write(oprot); + _iter969.write(oprot); } } } if (struct.isSetTriggers()) { { oprot.writeI32(struct.triggers.size()); - for (WMTrigger _iter962 : struct.triggers) + for (WMTrigger _iter970 : struct.triggers) { - _iter962.write(oprot); + _iter970.write(oprot); } } } if (struct.isSetPoolTriggers()) { { oprot.writeI32(struct.poolTriggers.size()); - for (WMPoolTrigger _iter963 : struct.poolTriggers) + for (WMPoolTrigger _iter971 : struct.poolTriggers) { - _iter963.write(oprot); + _iter971.write(oprot); } } } @@ -972,56 +972,56 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan s struct.plan.read(iprot); struct.setPlanIsSet(true); { - org.apache.thrift.protocol.TList _list964 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.pools = new ArrayList(_list964.size); - WMPool _elem965; - for (int _i966 = 0; _i966 < _list964.size; ++_i966) + org.apache.thrift.protocol.TList _list972 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.pools = new ArrayList(_list972.size); + WMPool _elem973; + for (int _i974 = 0; _i974 < _list972.size; ++_i974) { - _elem965 = new WMPool(); - _elem965.read(iprot); - struct.pools.add(_elem965); + _elem973 = new WMPool(); + _elem973.read(iprot); + struct.pools.add(_elem973); } } struct.setPoolsIsSet(true); BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list967 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.mappings = new ArrayList(_list967.size); - WMMapping _elem968; - for (int _i969 = 0; _i969 < _list967.size; ++_i969) + org.apache.thrift.protocol.TList _list975 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.mappings = new ArrayList(_list975.size); + WMMapping _elem976; + for (int _i977 = 0; _i977 < _list975.size; ++_i977) { - _elem968 = new WMMapping(); - _elem968.read(iprot); - struct.mappings.add(_elem968); + _elem976 = new WMMapping(); + _elem976.read(iprot); + struct.mappings.add(_elem976); } } struct.setMappingsIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list970 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.triggers = new ArrayList(_list970.size); - WMTrigger _elem971; - for (int _i972 = 0; _i972 < _list970.size; ++_i972) + org.apache.thrift.protocol.TList _list978 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.triggers = new ArrayList(_list978.size); + WMTrigger _elem979; + for (int _i980 = 0; _i980 < _list978.size; ++_i980) { - _elem971 = new WMTrigger(); - _elem971.read(iprot); - struct.triggers.add(_elem971); + _elem979 = new WMTrigger(); + _elem979.read(iprot); + struct.triggers.add(_elem979); } } struct.setTriggersIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.poolTriggers = new ArrayList(_list973.size); - WMPoolTrigger _elem974; - for (int _i975 = 0; _i975 < _list973.size; ++_i975) + org.apache.thrift.protocol.TList _list981 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.poolTriggers = new ArrayList(_list981.size); + WMPoolTrigger _elem982; + for (int _i983 = 0; _i983 < _list981.size; ++_i983) { - _elem974 = new WMPoolTrigger(); - _elem974.read(iprot); - struct.poolTriggers.add(_elem974); + _elem982 = new WMPoolTrigger(); + _elem982.read(iprot); + struct.poolTriggers.add(_elem982); } } struct.setPoolTriggersIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java index fe8aacb553..fcbba4fd1e 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMGetAllResourcePla case 1: // RESOURCE_PLANS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list976 = iprot.readListBegin(); - struct.resourcePlans = new ArrayList(_list976.size); - WMResourcePlan _elem977; - for (int _i978 = 0; _i978 < _list976.size; ++_i978) + org.apache.thrift.protocol.TList _list984 = iprot.readListBegin(); + struct.resourcePlans = new ArrayList(_list984.size); + WMResourcePlan _elem985; + for (int _i986 = 0; _i986 < _list984.size; ++_i986) { - _elem977 = new WMResourcePlan(); - _elem977.read(iprot); - struct.resourcePlans.add(_elem977); + _elem985 = new WMResourcePlan(); + _elem985.read(iprot); + struct.resourcePlans.add(_elem985); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMGetAllResourcePl oprot.writeFieldBegin(RESOURCE_PLANS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.resourcePlans.size())); - for (WMResourcePlan _iter979 : struct.resourcePlans) + for (WMResourcePlan _iter987 : struct.resourcePlans) { - _iter979.write(oprot); + _iter987.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMGetAllResourcePla if (struct.isSetResourcePlans()) { { oprot.writeI32(struct.resourcePlans.size()); - for (WMResourcePlan _iter980 : struct.resourcePlans) + for (WMResourcePlan _iter988 : struct.resourcePlans) { - _iter980.write(oprot); + _iter988.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMGetAllResourcePlan BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list981 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.resourcePlans = new ArrayList(_list981.size); - WMResourcePlan _elem982; - for (int _i983 = 0; _i983 < _list981.size; ++_i983) + org.apache.thrift.protocol.TList _list989 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.resourcePlans = new ArrayList(_list989.size); + WMResourcePlan _elem990; + for (int _i991 = 0; _i991 < _list989.size; ++_i991) { - _elem982 = new WMResourcePlan(); - _elem982.read(iprot); - struct.resourcePlans.add(_elem982); + _elem990 = new WMResourcePlan(); + _elem990.read(iprot); + struct.resourcePlans.add(_elem990); } } struct.setResourcePlansIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java index 3b57fea105..bd1e1334ea 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMGetTriggersForRes case 1: // TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1000 = iprot.readListBegin(); - struct.triggers = new ArrayList(_list1000.size); - WMTrigger _elem1001; - for (int _i1002 = 0; _i1002 < _list1000.size; ++_i1002) + org.apache.thrift.protocol.TList _list1008 = iprot.readListBegin(); + struct.triggers = new ArrayList(_list1008.size); + WMTrigger _elem1009; + for (int _i1010 = 0; _i1010 < _list1008.size; ++_i1010) { - _elem1001 = new WMTrigger(); - _elem1001.read(iprot); - struct.triggers.add(_elem1001); + _elem1009 = new WMTrigger(); + _elem1009.read(iprot); + struct.triggers.add(_elem1009); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMGetTriggersForRe oprot.writeFieldBegin(TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size())); - for (WMTrigger _iter1003 : struct.triggers) + for (WMTrigger _iter1011 : struct.triggers) { - _iter1003.write(oprot); + _iter1011.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMGetTriggersForRes if (struct.isSetTriggers()) { { oprot.writeI32(struct.triggers.size()); - for (WMTrigger _iter1004 : struct.triggers) + for (WMTrigger _iter1012 : struct.triggers) { - _iter1004.write(oprot); + _iter1012.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMGetTriggersForReso BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1005 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.triggers = new ArrayList(_list1005.size); - WMTrigger _elem1006; - for (int _i1007 = 0; _i1007 < _list1005.size; ++_i1007) + org.apache.thrift.protocol.TList _list1013 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.triggers = new ArrayList(_list1013.size); + WMTrigger _elem1014; + for (int _i1015 = 0; _i1015 < _list1013.size; ++_i1015) { - _elem1006 = new WMTrigger(); - _elem1006.read(iprot); - struct.triggers.add(_elem1006); + _elem1014 = new WMTrigger(); + _elem1014.read(iprot); + struct.triggers.add(_elem1014); } } struct.setTriggersIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java index 791174491c..b56dd720fb 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java @@ -441,13 +441,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMValidateResourceP case 1: // ERRORS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list984 = iprot.readListBegin(); - struct.errors = new ArrayList(_list984.size); - String _elem985; - for (int _i986 = 0; _i986 < _list984.size; ++_i986) + org.apache.thrift.protocol.TList _list992 = iprot.readListBegin(); + struct.errors = new ArrayList(_list992.size); + String _elem993; + for (int _i994 = 0; _i994 < _list992.size; ++_i994) { - _elem985 = iprot.readString(); - struct.errors.add(_elem985); + _elem993 = iprot.readString(); + struct.errors.add(_elem993); } iprot.readListEnd(); } @@ -459,13 +459,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMValidateResourceP case 2: // WARNINGS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list987 = iprot.readListBegin(); - struct.warnings = new ArrayList(_list987.size); - String _elem988; - for (int _i989 = 0; _i989 < _list987.size; ++_i989) + org.apache.thrift.protocol.TList _list995 = iprot.readListBegin(); + struct.warnings = new ArrayList(_list995.size); + String _elem996; + for (int _i997 = 0; _i997 < _list995.size; ++_i997) { - _elem988 = iprot.readString(); - struct.warnings.add(_elem988); + _elem996 = iprot.readString(); + struct.warnings.add(_elem996); } iprot.readListEnd(); } @@ -492,9 +492,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMValidateResource oprot.writeFieldBegin(ERRORS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.errors.size())); - for (String _iter990 : struct.errors) + for (String _iter998 : struct.errors) { - oprot.writeString(_iter990); + oprot.writeString(_iter998); } oprot.writeListEnd(); } @@ -506,9 +506,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMValidateResource oprot.writeFieldBegin(WARNINGS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.warnings.size())); - for (String _iter991 : struct.warnings) + for (String _iter999 : struct.warnings) { - oprot.writeString(_iter991); + oprot.writeString(_iter999); } oprot.writeListEnd(); } @@ -543,18 +543,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMValidateResourceP if (struct.isSetErrors()) { { oprot.writeI32(struct.errors.size()); - for (String _iter992 : struct.errors) + for (String _iter1000 : struct.errors) { - oprot.writeString(_iter992); + oprot.writeString(_iter1000); } } } if (struct.isSetWarnings()) { { oprot.writeI32(struct.warnings.size()); - for (String _iter993 : struct.warnings) + for (String _iter1001 : struct.warnings) { - oprot.writeString(_iter993); + oprot.writeString(_iter1001); } } } @@ -566,26 +566,26 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMValidateResourcePl BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list994 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.errors = new ArrayList(_list994.size); - String _elem995; - for (int _i996 = 0; _i996 < _list994.size; ++_i996) + org.apache.thrift.protocol.TList _list1002 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.errors = new ArrayList(_list1002.size); + String _elem1003; + for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004) { - _elem995 = iprot.readString(); - struct.errors.add(_elem995); + _elem1003 = iprot.readString(); + struct.errors.add(_elem1003); } } struct.setErrorsIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list997 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.warnings = new ArrayList(_list997.size); - String _elem998; - for (int _i999 = 0; _i999 < _list997.size; ++_i999) + org.apache.thrift.protocol.TList _list1005 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.warnings = new ArrayList(_list1005.size); + String _elem1006; + for (int _i1007 = 0; _i1007 < _list1005.size; ++_i1007) { - _elem998 = iprot.readString(); - struct.warnings.add(_elem998); + _elem1006 = iprot.readString(); + struct.warnings.add(_elem1006); } } struct.setWarningsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java index 46b92b87b9..14e0de7c03 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java @@ -813,13 +813,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WriteNotificationLo case 6: // PARTITION_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list812 = iprot.readListBegin(); - struct.partitionVals = new ArrayList(_list812.size); - String _elem813; - for (int _i814 = 0; _i814 < _list812.size; ++_i814) + org.apache.thrift.protocol.TList _list820 = iprot.readListBegin(); + struct.partitionVals = new ArrayList(_list820.size); + String _elem821; + for (int _i822 = 0; _i822 < _list820.size; ++_i822) { - _elem813 = iprot.readString(); - struct.partitionVals.add(_elem813); + _elem821 = iprot.readString(); + struct.partitionVals.add(_elem821); } iprot.readListEnd(); } @@ -867,9 +867,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WriteNotificationL oprot.writeFieldBegin(PARTITION_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionVals.size())); - for (String _iter815 : struct.partitionVals) + for (String _iter823 : struct.partitionVals) { - oprot.writeString(_iter815); + oprot.writeString(_iter823); } oprot.writeListEnd(); } @@ -906,9 +906,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WriteNotificationLo if (struct.isSetPartitionVals()) { { oprot.writeI32(struct.partitionVals.size()); - for (String _iter816 : struct.partitionVals) + for (String _iter824 : struct.partitionVals) { - oprot.writeString(_iter816); + oprot.writeString(_iter824); } } } @@ -931,13 +931,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WriteNotificationLog BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionVals = new ArrayList(_list817.size); - String _elem818; - for (int _i819 = 0; _i819 < _list817.size; ++_i819) + org.apache.thrift.protocol.TList _list825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionVals = new ArrayList(_list825.size); + String _elem826; + for (int _i827 = 0; _i827 < _list825.size; ++_i827) { - _elem818 = iprot.readString(); - struct.partitionVals.add(_elem818); + _elem826 = iprot.readString(); + struct.partitionVals.add(_elem826); } } struct.setPartitionValsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index e826dedc95..1560658aa2 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -137,41 +137,45 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { /** * @param string $db_name * @param string $table_name + * @param string $validWriteIdList * @return \metastore\FieldSchema[] * @throws \metastore\MetaException * @throws \metastore\UnknownTableException * @throws \metastore\UnknownDBException */ - public function get_fields($db_name, $table_name); + public function get_fields($db_name, $table_name, $validWriteIdList); /** * @param string $db_name * @param string $table_name * @param \metastore\EnvironmentContext $environment_context + * @param string $validWriteIdList * @return \metastore\FieldSchema[] * @throws \metastore\MetaException * @throws \metastore\UnknownTableException * @throws \metastore\UnknownDBException */ - public function get_fields_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context); + public function get_fields_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context, $validWriteIdList); /** * @param string $db_name * @param string $table_name + * @param string $validWriteIdList * @return \metastore\FieldSchema[] * @throws \metastore\MetaException * @throws \metastore\UnknownTableException * @throws \metastore\UnknownDBException */ - public function get_schema($db_name, $table_name); + public function get_schema($db_name, $table_name, $validWriteIdList); /** * @param string $db_name * @param string $table_name * @param \metastore\EnvironmentContext $environment_context + * @param string $validWriteIdList * @return \metastore\FieldSchema[] * @throws \metastore\MetaException * @throws \metastore\UnknownTableException * @throws \metastore\UnknownDBException */ - public function get_schema_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context); + public function get_schema_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context, $validWriteIdList); /** * @param \metastore\Table $tbl * @throws \metastore\AlreadyExistsException @@ -318,11 +322,12 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { /** * @param string $dbname * @param string $tbl_name + * @param string $validWriteIdList * @return \metastore\Table * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_table($dbname, $tbl_name); + public function get_table($dbname, $tbl_name, $validWriteIdList); /** * @param string $dbname * @param string[] $tbl_names @@ -548,11 +553,12 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @param string $db_name * @param string $tbl_name * @param string[] $part_vals + * @param string $validTxnList * @return \metastore\Partition * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_partition($db_name, $tbl_name, array $part_vals); + public function get_partition($db_name, $tbl_name, array $part_vals, $validTxnList); /** * @param array $partitionSpecs * @param string $source_db @@ -585,58 +591,64 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @param string[] $part_vals * @param string $user_name * @param string[] $group_names + * @param string $validTxnList * @return \metastore\Partition * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_partition_with_auth($db_name, $tbl_name, array $part_vals, $user_name, array $group_names); + public function get_partition_with_auth($db_name, $tbl_name, array $part_vals, $user_name, array $group_names, $validTxnList); /** * @param string $db_name * @param string $tbl_name * @param string $part_name + * @param string $validTxnList * @return \metastore\Partition * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_partition_by_name($db_name, $tbl_name, $part_name); + public function get_partition_by_name($db_name, $tbl_name, $part_name, $validTxnList); /** * @param string $db_name * @param string $tbl_name * @param int $max_parts + * @param string $validTxnList * @return \metastore\Partition[] * @throws \metastore\NoSuchObjectException * @throws \metastore\MetaException */ - public function get_partitions($db_name, $tbl_name, $max_parts); + public function get_partitions($db_name, $tbl_name, $max_parts, $validTxnList); /** * @param string $db_name * @param string $tbl_name * @param int $max_parts * @param string $user_name * @param string[] $group_names + * @param string $validTxnList * @return \metastore\Partition[] * @throws \metastore\NoSuchObjectException * @throws \metastore\MetaException */ - public function get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, array $group_names); + public function get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, array $group_names, $validTxnList); /** * @param string $db_name * @param string $tbl_name * @param int $max_parts + * @param string $validTxnList * @return \metastore\PartitionSpec[] * @throws \metastore\NoSuchObjectException * @throws \metastore\MetaException */ - public function get_partitions_pspec($db_name, $tbl_name, $max_parts); + public function get_partitions_pspec($db_name, $tbl_name, $max_parts, $validTxnList); /** * @param string $db_name * @param string $tbl_name * @param int $max_parts + * @param string $validTxnList * @return string[] * @throws \metastore\NoSuchObjectException * @throws \metastore\MetaException */ - public function get_partition_names($db_name, $tbl_name, $max_parts); + public function get_partition_names($db_name, $tbl_name, $max_parts, $validTxnList); /** * @param \metastore\PartitionValuesRequest $request * @return \metastore\PartitionValuesResponse @@ -649,11 +661,12 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @param string $tbl_name * @param string[] $part_vals * @param int $max_parts + * @param string $validTxnList * @return \metastore\Partition[] * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_partitions_ps($db_name, $tbl_name, array $part_vals, $max_parts); + public function get_partitions_ps($db_name, $tbl_name, array $part_vals, $max_parts, $validTxnList); /** * @param string $db_name * @param string $tbl_name @@ -661,41 +674,45 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @param int $max_parts * @param string $user_name * @param string[] $group_names + * @param string $validTxnList * @return \metastore\Partition[] * @throws \metastore\NoSuchObjectException * @throws \metastore\MetaException */ - public function get_partitions_ps_with_auth($db_name, $tbl_name, array $part_vals, $max_parts, $user_name, array $group_names); + public function get_partitions_ps_with_auth($db_name, $tbl_name, array $part_vals, $max_parts, $user_name, array $group_names, $validTxnList); /** * @param string $db_name * @param string $tbl_name * @param string[] $part_vals * @param int $max_parts + * @param string $validTxnList * @return string[] * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_partition_names_ps($db_name, $tbl_name, array $part_vals, $max_parts); + public function get_partition_names_ps($db_name, $tbl_name, array $part_vals, $max_parts, $validTxnList); /** * @param string $db_name * @param string $tbl_name * @param string $filter * @param int $max_parts + * @param string $validTxnList * @return \metastore\Partition[] * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts); + public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts, $validTxnList); /** * @param string $db_name * @param string $tbl_name * @param string $filter * @param int $max_parts + * @param string $validTxnList * @return \metastore\PartitionSpec[] * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts); + public function get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts, $validTxnList); /** * @param \metastore\PartitionsByExprRequest $req * @return \metastore\PartitionsByExprResult @@ -707,20 +724,22 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @param string $db_name * @param string $tbl_name * @param string $filter + * @param string $validTxnList * @return int * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_num_partitions_by_filter($db_name, $tbl_name, $filter); + public function get_num_partitions_by_filter($db_name, $tbl_name, $filter, $validTxnList); /** * @param string $db_name * @param string $tbl_name * @param string[] $names + * @param string $validTxnList * @return \metastore\Partition[] * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function get_partitions_by_names($db_name, $tbl_name, array $names); + public function get_partitions_by_names($db_name, $tbl_name, array $names, $validTxnList); /** * @param \metastore\GetPartitionsByNamesRequest $req * @return \metastore\GetPartitionsByNamesResult @@ -920,25 +939,27 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @param string $db_name * @param string $tbl_name * @param string $col_name + * @param string $validWriteIdList * @return \metastore\ColumnStatistics * @throws \metastore\NoSuchObjectException * @throws \metastore\MetaException * @throws \metastore\InvalidInputException * @throws \metastore\InvalidObjectException */ - public function get_table_column_statistics($db_name, $tbl_name, $col_name); + public function get_table_column_statistics($db_name, $tbl_name, $col_name, $validWriteIdList); /** * @param string $db_name * @param string $tbl_name * @param string $part_name * @param string $col_name + * @param string $validWriteIdList * @return \metastore\ColumnStatistics * @throws \metastore\NoSuchObjectException * @throws \metastore\MetaException * @throws \metastore\InvalidInputException * @throws \metastore\InvalidObjectException */ - public function get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name); + public function get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name, $validWriteIdList); /** * @param \metastore\TableStatsRequest $request * @return \metastore\TableStatsResult @@ -2625,17 +2646,18 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_type_all failed: unknown result"); } - public function get_fields($db_name, $table_name) + public function get_fields($db_name, $table_name, $validWriteIdList) { - $this->send_get_fields($db_name, $table_name); + $this->send_get_fields($db_name, $table_name, $validWriteIdList); return $this->recv_get_fields(); } - public function send_get_fields($db_name, $table_name) + public function send_get_fields($db_name, $table_name, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_get_fields_args(); $args->db_name = $db_name; $args->table_name = $table_name; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -2686,18 +2708,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_fields failed: unknown result"); } - public function get_fields_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context) + public function get_fields_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { - $this->send_get_fields_with_environment_context($db_name, $table_name, $environment_context); + $this->send_get_fields_with_environment_context($db_name, $table_name, $environment_context, $validWriteIdList); return $this->recv_get_fields_with_environment_context(); } - public function send_get_fields_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context) + public function send_get_fields_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_get_fields_with_environment_context_args(); $args->db_name = $db_name; $args->table_name = $table_name; $args->environment_context = $environment_context; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -2748,17 +2771,18 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_fields_with_environment_context failed: unknown result"); } - public function get_schema($db_name, $table_name) + public function get_schema($db_name, $table_name, $validWriteIdList) { - $this->send_get_schema($db_name, $table_name); + $this->send_get_schema($db_name, $table_name, $validWriteIdList); return $this->recv_get_schema(); } - public function send_get_schema($db_name, $table_name) + public function send_get_schema($db_name, $table_name, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_get_schema_args(); $args->db_name = $db_name; $args->table_name = $table_name; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -2809,18 +2833,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_schema failed: unknown result"); } - public function get_schema_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context) + public function get_schema_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { - $this->send_get_schema_with_environment_context($db_name, $table_name, $environment_context); + $this->send_get_schema_with_environment_context($db_name, $table_name, $environment_context, $validWriteIdList); return $this->recv_get_schema_with_environment_context(); } - public function send_get_schema_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context) + public function send_get_schema_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_get_schema_with_environment_context_args(); $args->db_name = $db_name; $args->table_name = $table_name; $args->environment_context = $environment_context; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -3984,17 +4009,18 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_all_tables failed: unknown result"); } - public function get_table($dbname, $tbl_name) + public function get_table($dbname, $tbl_name, $validWriteIdList) { - $this->send_get_table($dbname, $tbl_name); + $this->send_get_table($dbname, $tbl_name, $validWriteIdList); return $this->recv_get_table(); } - public function send_get_table($dbname, $tbl_name) + public function send_get_table($dbname, $tbl_name, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_get_table_args(); $args->dbname = $dbname; $args->tbl_name = $tbl_name; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -5525,18 +5551,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("drop_partitions_req failed: unknown result"); } - public function get_partition($db_name, $tbl_name, array $part_vals) + public function get_partition($db_name, $tbl_name, array $part_vals, $validTxnList) { - $this->send_get_partition($db_name, $tbl_name, $part_vals); + $this->send_get_partition($db_name, $tbl_name, $part_vals, $validTxnList); return $this->recv_get_partition(); } - public function send_get_partition($db_name, $tbl_name, array $part_vals) + public function send_get_partition($db_name, $tbl_name, array $part_vals, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partition_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->part_vals = $part_vals; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -5718,13 +5745,13 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("exchange_partitions failed: unknown result"); } - public function get_partition_with_auth($db_name, $tbl_name, array $part_vals, $user_name, array $group_names) + public function get_partition_with_auth($db_name, $tbl_name, array $part_vals, $user_name, array $group_names, $validTxnList) { - $this->send_get_partition_with_auth($db_name, $tbl_name, $part_vals, $user_name, $group_names); + $this->send_get_partition_with_auth($db_name, $tbl_name, $part_vals, $user_name, $group_names, $validTxnList); return $this->recv_get_partition_with_auth(); } - public function send_get_partition_with_auth($db_name, $tbl_name, array $part_vals, $user_name, array $group_names) + public function send_get_partition_with_auth($db_name, $tbl_name, array $part_vals, $user_name, array $group_names, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partition_with_auth_args(); $args->db_name = $db_name; @@ -5732,6 +5759,7 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas $args->part_vals = $part_vals; $args->user_name = $user_name; $args->group_names = $group_names; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -5779,18 +5807,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partition_with_auth failed: unknown result"); } - public function get_partition_by_name($db_name, $tbl_name, $part_name) + public function get_partition_by_name($db_name, $tbl_name, $part_name, $validTxnList) { - $this->send_get_partition_by_name($db_name, $tbl_name, $part_name); + $this->send_get_partition_by_name($db_name, $tbl_name, $part_name, $validTxnList); return $this->recv_get_partition_by_name(); } - public function send_get_partition_by_name($db_name, $tbl_name, $part_name) + public function send_get_partition_by_name($db_name, $tbl_name, $part_name, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partition_by_name_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->part_name = $part_name; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -5838,18 +5867,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partition_by_name failed: unknown result"); } - public function get_partitions($db_name, $tbl_name, $max_parts) + public function get_partitions($db_name, $tbl_name, $max_parts, $validTxnList) { - $this->send_get_partitions($db_name, $tbl_name, $max_parts); + $this->send_get_partitions($db_name, $tbl_name, $max_parts, $validTxnList); return $this->recv_get_partitions(); } - public function send_get_partitions($db_name, $tbl_name, $max_parts) + public function send_get_partitions($db_name, $tbl_name, $max_parts, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partitions_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->max_parts = $max_parts; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -5897,13 +5927,13 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partitions failed: unknown result"); } - public function get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, array $group_names) + public function get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, array $group_names, $validTxnList) { - $this->send_get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, $group_names); + $this->send_get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, $group_names, $validTxnList); return $this->recv_get_partitions_with_auth(); } - public function send_get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, array $group_names) + public function send_get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, array $group_names, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partitions_with_auth_args(); $args->db_name = $db_name; @@ -5911,6 +5941,7 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas $args->max_parts = $max_parts; $args->user_name = $user_name; $args->group_names = $group_names; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -5958,18 +5989,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partitions_with_auth failed: unknown result"); } - public function get_partitions_pspec($db_name, $tbl_name, $max_parts) + public function get_partitions_pspec($db_name, $tbl_name, $max_parts, $validTxnList) { - $this->send_get_partitions_pspec($db_name, $tbl_name, $max_parts); + $this->send_get_partitions_pspec($db_name, $tbl_name, $max_parts, $validTxnList); return $this->recv_get_partitions_pspec(); } - public function send_get_partitions_pspec($db_name, $tbl_name, $max_parts) + public function send_get_partitions_pspec($db_name, $tbl_name, $max_parts, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partitions_pspec_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->max_parts = $max_parts; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -6017,18 +6049,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partitions_pspec failed: unknown result"); } - public function get_partition_names($db_name, $tbl_name, $max_parts) + public function get_partition_names($db_name, $tbl_name, $max_parts, $validTxnList) { - $this->send_get_partition_names($db_name, $tbl_name, $max_parts); + $this->send_get_partition_names($db_name, $tbl_name, $max_parts, $validTxnList); return $this->recv_get_partition_names(); } - public function send_get_partition_names($db_name, $tbl_name, $max_parts) + public function send_get_partition_names($db_name, $tbl_name, $max_parts, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partition_names_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->max_parts = $max_parts; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -6133,19 +6166,20 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partition_values failed: unknown result"); } - public function get_partitions_ps($db_name, $tbl_name, array $part_vals, $max_parts) + public function get_partitions_ps($db_name, $tbl_name, array $part_vals, $max_parts, $validTxnList) { - $this->send_get_partitions_ps($db_name, $tbl_name, $part_vals, $max_parts); + $this->send_get_partitions_ps($db_name, $tbl_name, $part_vals, $max_parts, $validTxnList); return $this->recv_get_partitions_ps(); } - public function send_get_partitions_ps($db_name, $tbl_name, array $part_vals, $max_parts) + public function send_get_partitions_ps($db_name, $tbl_name, array $part_vals, $max_parts, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partitions_ps_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->part_vals = $part_vals; $args->max_parts = $max_parts; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -6193,13 +6227,13 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partitions_ps failed: unknown result"); } - public function get_partitions_ps_with_auth($db_name, $tbl_name, array $part_vals, $max_parts, $user_name, array $group_names) + public function get_partitions_ps_with_auth($db_name, $tbl_name, array $part_vals, $max_parts, $user_name, array $group_names, $validTxnList) { - $this->send_get_partitions_ps_with_auth($db_name, $tbl_name, $part_vals, $max_parts, $user_name, $group_names); + $this->send_get_partitions_ps_with_auth($db_name, $tbl_name, $part_vals, $max_parts, $user_name, $group_names, $validTxnList); return $this->recv_get_partitions_ps_with_auth(); } - public function send_get_partitions_ps_with_auth($db_name, $tbl_name, array $part_vals, $max_parts, $user_name, array $group_names) + public function send_get_partitions_ps_with_auth($db_name, $tbl_name, array $part_vals, $max_parts, $user_name, array $group_names, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partitions_ps_with_auth_args(); $args->db_name = $db_name; @@ -6208,6 +6242,7 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas $args->max_parts = $max_parts; $args->user_name = $user_name; $args->group_names = $group_names; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -6255,19 +6290,20 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partitions_ps_with_auth failed: unknown result"); } - public function get_partition_names_ps($db_name, $tbl_name, array $part_vals, $max_parts) + public function get_partition_names_ps($db_name, $tbl_name, array $part_vals, $max_parts, $validTxnList) { - $this->send_get_partition_names_ps($db_name, $tbl_name, $part_vals, $max_parts); + $this->send_get_partition_names_ps($db_name, $tbl_name, $part_vals, $max_parts, $validTxnList); return $this->recv_get_partition_names_ps(); } - public function send_get_partition_names_ps($db_name, $tbl_name, array $part_vals, $max_parts) + public function send_get_partition_names_ps($db_name, $tbl_name, array $part_vals, $max_parts, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partition_names_ps_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->part_vals = $part_vals; $args->max_parts = $max_parts; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -6315,19 +6351,20 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partition_names_ps failed: unknown result"); } - public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts) + public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts, $validTxnList) { - $this->send_get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts); + $this->send_get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts, $validTxnList); return $this->recv_get_partitions_by_filter(); } - public function send_get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts) + public function send_get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partitions_by_filter_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->filter = $filter; $args->max_parts = $max_parts; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -6375,19 +6412,20 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partitions_by_filter failed: unknown result"); } - public function get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts) + public function get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts, $validTxnList) { - $this->send_get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts); + $this->send_get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts, $validTxnList); return $this->recv_get_part_specs_by_filter(); } - public function send_get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts) + public function send_get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_part_specs_by_filter_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->filter = $filter; $args->max_parts = $max_parts; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -6492,18 +6530,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partitions_by_expr failed: unknown result"); } - public function get_num_partitions_by_filter($db_name, $tbl_name, $filter) + public function get_num_partitions_by_filter($db_name, $tbl_name, $filter, $validTxnList) { - $this->send_get_num_partitions_by_filter($db_name, $tbl_name, $filter); + $this->send_get_num_partitions_by_filter($db_name, $tbl_name, $filter, $validTxnList); return $this->recv_get_num_partitions_by_filter(); } - public function send_get_num_partitions_by_filter($db_name, $tbl_name, $filter) + public function send_get_num_partitions_by_filter($db_name, $tbl_name, $filter, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_num_partitions_by_filter_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->filter = $filter; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -6551,18 +6590,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_num_partitions_by_filter failed: unknown result"); } - public function get_partitions_by_names($db_name, $tbl_name, array $names) + public function get_partitions_by_names($db_name, $tbl_name, array $names, $validTxnList) { - $this->send_get_partitions_by_names($db_name, $tbl_name, $names); + $this->send_get_partitions_by_names($db_name, $tbl_name, $names, $validTxnList); return $this->recv_get_partitions_by_names(); } - public function send_get_partitions_by_names($db_name, $tbl_name, array $names) + public function send_get_partitions_by_names($db_name, $tbl_name, array $names, $validTxnList) { $args = new \metastore\ThriftHiveMetastore_get_partitions_by_names_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->names = $names; + $args->validTxnList = $validTxnList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -8017,18 +8057,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("update_partition_column_statistics_req failed: unknown result"); } - public function get_table_column_statistics($db_name, $tbl_name, $col_name) + public function get_table_column_statistics($db_name, $tbl_name, $col_name, $validWriteIdList) { - $this->send_get_table_column_statistics($db_name, $tbl_name, $col_name); + $this->send_get_table_column_statistics($db_name, $tbl_name, $col_name, $validWriteIdList); return $this->recv_get_table_column_statistics(); } - public function send_get_table_column_statistics($db_name, $tbl_name, $col_name) + public function send_get_table_column_statistics($db_name, $tbl_name, $col_name, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_get_table_column_statistics_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->col_name = $col_name; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -8082,19 +8123,20 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_table_column_statistics failed: unknown result"); } - public function get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name) + public function get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name, $validWriteIdList) { - $this->send_get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name); + $this->send_get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name, $validWriteIdList); return $this->recv_get_partition_column_statistics(); } - public function send_get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name) + public function send_get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_get_partition_column_statistics_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->part_name = $part_name; $args->col_name = $col_name; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -16526,14 +16568,14 @@ class ThriftHiveMetastore_get_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size952 = 0; - $_etype955 = 0; - $xfer += $input->readListBegin($_etype955, $_size952); - for ($_i956 = 0; $_i956 < $_size952; ++$_i956) + $_size959 = 0; + $_etype962 = 0; + $xfer += $input->readListBegin($_etype962, $_size959); + for ($_i963 = 0; $_i963 < $_size959; ++$_i963) { - $elem957 = null; - $xfer += $input->readString($elem957); - $this->success []= $elem957; + $elem964 = null; + $xfer += $input->readString($elem964); + $this->success []= $elem964; } $xfer += $input->readListEnd(); } else { @@ -16569,9 +16611,9 @@ class ThriftHiveMetastore_get_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter958) + foreach ($this->success as $iter965) { - $xfer += $output->writeString($iter958); + $xfer += $output->writeString($iter965); } } $output->writeListEnd(); @@ -16702,14 +16744,14 @@ class ThriftHiveMetastore_get_all_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size959 = 0; - $_etype962 = 0; - $xfer += $input->readListBegin($_etype962, $_size959); - for ($_i963 = 0; $_i963 < $_size959; ++$_i963) + $_size966 = 0; + $_etype969 = 0; + $xfer += $input->readListBegin($_etype969, $_size966); + for ($_i970 = 0; $_i970 < $_size966; ++$_i970) { - $elem964 = null; - $xfer += $input->readString($elem964); - $this->success []= $elem964; + $elem971 = null; + $xfer += $input->readString($elem971); + $this->success []= $elem971; } $xfer += $input->readListEnd(); } else { @@ -16745,9 +16787,9 @@ class ThriftHiveMetastore_get_all_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter965) + foreach ($this->success as $iter972) { - $xfer += $output->writeString($iter965); + $xfer += $output->writeString($iter972); } } $output->writeListEnd(); @@ -17748,18 +17790,18 @@ class ThriftHiveMetastore_get_type_all_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size966 = 0; - $_ktype967 = 0; - $_vtype968 = 0; - $xfer += $input->readMapBegin($_ktype967, $_vtype968, $_size966); - for ($_i970 = 0; $_i970 < $_size966; ++$_i970) + $_size973 = 0; + $_ktype974 = 0; + $_vtype975 = 0; + $xfer += $input->readMapBegin($_ktype974, $_vtype975, $_size973); + for ($_i977 = 0; $_i977 < $_size973; ++$_i977) { - $key971 = ''; - $val972 = new \metastore\Type(); - $xfer += $input->readString($key971); - $val972 = new \metastore\Type(); - $xfer += $val972->read($input); - $this->success[$key971] = $val972; + $key978 = ''; + $val979 = new \metastore\Type(); + $xfer += $input->readString($key978); + $val979 = new \metastore\Type(); + $xfer += $val979->read($input); + $this->success[$key978] = $val979; } $xfer += $input->readMapEnd(); } else { @@ -17795,10 +17837,10 @@ class ThriftHiveMetastore_get_type_all_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter973 => $viter974) + foreach ($this->success as $kiter980 => $viter981) { - $xfer += $output->writeString($kiter973); - $xfer += $viter974->write($output); + $xfer += $output->writeString($kiter980); + $xfer += $viter981->write($output); } } $output->writeMapEnd(); @@ -17828,6 +17870,10 @@ class ThriftHiveMetastore_get_fields_args { * @var string */ public $table_name = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -17840,6 +17886,10 @@ class ThriftHiveMetastore_get_fields_args { 'var' => 'table_name', 'type' => TType::STRING, ), + 3 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -17849,6 +17899,9 @@ class ThriftHiveMetastore_get_fields_args { if (isset($vals['table_name'])) { $this->table_name = $vals['table_name']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -17885,6 +17938,13 @@ class ThriftHiveMetastore_get_fields_args { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -17908,6 +17968,11 @@ class ThriftHiveMetastore_get_fields_args { $xfer += $output->writeString($this->table_name); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 3); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -18002,15 +18067,15 @@ class ThriftHiveMetastore_get_fields_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size975 = 0; - $_etype978 = 0; - $xfer += $input->readListBegin($_etype978, $_size975); - for ($_i979 = 0; $_i979 < $_size975; ++$_i979) + $_size982 = 0; + $_etype985 = 0; + $xfer += $input->readListBegin($_etype985, $_size982); + for ($_i986 = 0; $_i986 < $_size982; ++$_i986) { - $elem980 = null; - $elem980 = new \metastore\FieldSchema(); - $xfer += $elem980->read($input); - $this->success []= $elem980; + $elem987 = null; + $elem987 = new \metastore\FieldSchema(); + $xfer += $elem987->read($input); + $this->success []= $elem987; } $xfer += $input->readListEnd(); } else { @@ -18062,9 +18127,9 @@ class ThriftHiveMetastore_get_fields_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter981) + foreach ($this->success as $iter988) { - $xfer += $iter981->write($output); + $xfer += $iter988->write($output); } } $output->writeListEnd(); @@ -18108,6 +18173,10 @@ class ThriftHiveMetastore_get_fields_with_environment_context_args { * @var \metastore\EnvironmentContext */ public $environment_context = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -18125,6 +18194,10 @@ class ThriftHiveMetastore_get_fields_with_environment_context_args { 'type' => TType::STRUCT, 'class' => '\metastore\EnvironmentContext', ), + 4 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -18137,6 +18210,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_args { if (isset($vals['environment_context'])) { $this->environment_context = $vals['environment_context']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -18181,6 +18257,13 @@ class ThriftHiveMetastore_get_fields_with_environment_context_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -18212,6 +18295,11 @@ class ThriftHiveMetastore_get_fields_with_environment_context_args { $xfer += $this->environment_context->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -18306,15 +18394,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size982 = 0; - $_etype985 = 0; - $xfer += $input->readListBegin($_etype985, $_size982); - for ($_i986 = 0; $_i986 < $_size982; ++$_i986) + $_size989 = 0; + $_etype992 = 0; + $xfer += $input->readListBegin($_etype992, $_size989); + for ($_i993 = 0; $_i993 < $_size989; ++$_i993) { - $elem987 = null; - $elem987 = new \metastore\FieldSchema(); - $xfer += $elem987->read($input); - $this->success []= $elem987; + $elem994 = null; + $elem994 = new \metastore\FieldSchema(); + $xfer += $elem994->read($input); + $this->success []= $elem994; } $xfer += $input->readListEnd(); } else { @@ -18366,9 +18454,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter988) + foreach ($this->success as $iter995) { - $xfer += $iter988->write($output); + $xfer += $iter995->write($output); } } $output->writeListEnd(); @@ -18408,6 +18496,10 @@ class ThriftHiveMetastore_get_schema_args { * @var string */ public $table_name = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -18420,6 +18512,10 @@ class ThriftHiveMetastore_get_schema_args { 'var' => 'table_name', 'type' => TType::STRING, ), + 3 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -18429,6 +18525,9 @@ class ThriftHiveMetastore_get_schema_args { if (isset($vals['table_name'])) { $this->table_name = $vals['table_name']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -18465,6 +18564,13 @@ class ThriftHiveMetastore_get_schema_args { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -18488,6 +18594,11 @@ class ThriftHiveMetastore_get_schema_args { $xfer += $output->writeString($this->table_name); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 3); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -18582,15 +18693,15 @@ class ThriftHiveMetastore_get_schema_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size989 = 0; - $_etype992 = 0; - $xfer += $input->readListBegin($_etype992, $_size989); - for ($_i993 = 0; $_i993 < $_size989; ++$_i993) + $_size996 = 0; + $_etype999 = 0; + $xfer += $input->readListBegin($_etype999, $_size996); + for ($_i1000 = 0; $_i1000 < $_size996; ++$_i1000) { - $elem994 = null; - $elem994 = new \metastore\FieldSchema(); - $xfer += $elem994->read($input); - $this->success []= $elem994; + $elem1001 = null; + $elem1001 = new \metastore\FieldSchema(); + $xfer += $elem1001->read($input); + $this->success []= $elem1001; } $xfer += $input->readListEnd(); } else { @@ -18642,9 +18753,9 @@ class ThriftHiveMetastore_get_schema_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter995) + foreach ($this->success as $iter1002) { - $xfer += $iter995->write($output); + $xfer += $iter1002->write($output); } } $output->writeListEnd(); @@ -18688,6 +18799,10 @@ class ThriftHiveMetastore_get_schema_with_environment_context_args { * @var \metastore\EnvironmentContext */ public $environment_context = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -18705,6 +18820,10 @@ class ThriftHiveMetastore_get_schema_with_environment_context_args { 'type' => TType::STRUCT, 'class' => '\metastore\EnvironmentContext', ), + 4 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -18717,6 +18836,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_args { if (isset($vals['environment_context'])) { $this->environment_context = $vals['environment_context']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -18761,6 +18883,13 @@ class ThriftHiveMetastore_get_schema_with_environment_context_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -18792,6 +18921,11 @@ class ThriftHiveMetastore_get_schema_with_environment_context_args { $xfer += $this->environment_context->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -18886,15 +19020,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size996 = 0; - $_etype999 = 0; - $xfer += $input->readListBegin($_etype999, $_size996); - for ($_i1000 = 0; $_i1000 < $_size996; ++$_i1000) + $_size1003 = 0; + $_etype1006 = 0; + $xfer += $input->readListBegin($_etype1006, $_size1003); + for ($_i1007 = 0; $_i1007 < $_size1003; ++$_i1007) { - $elem1001 = null; - $elem1001 = new \metastore\FieldSchema(); - $xfer += $elem1001->read($input); - $this->success []= $elem1001; + $elem1008 = null; + $elem1008 = new \metastore\FieldSchema(); + $xfer += $elem1008->read($input); + $this->success []= $elem1008; } $xfer += $input->readListEnd(); } else { @@ -18946,9 +19080,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1002) + foreach ($this->success as $iter1009) { - $xfer += $iter1002->write($output); + $xfer += $iter1009->write($output); } } $output->writeListEnd(); @@ -19620,15 +19754,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 2: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size1003 = 0; - $_etype1006 = 0; - $xfer += $input->readListBegin($_etype1006, $_size1003); - for ($_i1007 = 0; $_i1007 < $_size1003; ++$_i1007) + $_size1010 = 0; + $_etype1013 = 0; + $xfer += $input->readListBegin($_etype1013, $_size1010); + for ($_i1014 = 0; $_i1014 < $_size1010; ++$_i1014) { - $elem1008 = null; - $elem1008 = new \metastore\SQLPrimaryKey(); - $xfer += $elem1008->read($input); - $this->primaryKeys []= $elem1008; + $elem1015 = null; + $elem1015 = new \metastore\SQLPrimaryKey(); + $xfer += $elem1015->read($input); + $this->primaryKeys []= $elem1015; } $xfer += $input->readListEnd(); } else { @@ -19638,15 +19772,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 3: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size1009 = 0; - $_etype1012 = 0; - $xfer += $input->readListBegin($_etype1012, $_size1009); - for ($_i1013 = 0; $_i1013 < $_size1009; ++$_i1013) + $_size1016 = 0; + $_etype1019 = 0; + $xfer += $input->readListBegin($_etype1019, $_size1016); + for ($_i1020 = 0; $_i1020 < $_size1016; ++$_i1020) { - $elem1014 = null; - $elem1014 = new \metastore\SQLForeignKey(); - $xfer += $elem1014->read($input); - $this->foreignKeys []= $elem1014; + $elem1021 = null; + $elem1021 = new \metastore\SQLForeignKey(); + $xfer += $elem1021->read($input); + $this->foreignKeys []= $elem1021; } $xfer += $input->readListEnd(); } else { @@ -19656,15 +19790,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 4: if ($ftype == TType::LST) { $this->uniqueConstraints = array(); - $_size1015 = 0; - $_etype1018 = 0; - $xfer += $input->readListBegin($_etype1018, $_size1015); - for ($_i1019 = 0; $_i1019 < $_size1015; ++$_i1019) + $_size1022 = 0; + $_etype1025 = 0; + $xfer += $input->readListBegin($_etype1025, $_size1022); + for ($_i1026 = 0; $_i1026 < $_size1022; ++$_i1026) { - $elem1020 = null; - $elem1020 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem1020->read($input); - $this->uniqueConstraints []= $elem1020; + $elem1027 = null; + $elem1027 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem1027->read($input); + $this->uniqueConstraints []= $elem1027; } $xfer += $input->readListEnd(); } else { @@ -19674,15 +19808,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 5: if ($ftype == TType::LST) { $this->notNullConstraints = array(); - $_size1021 = 0; - $_etype1024 = 0; - $xfer += $input->readListBegin($_etype1024, $_size1021); - for ($_i1025 = 0; $_i1025 < $_size1021; ++$_i1025) + $_size1028 = 0; + $_etype1031 = 0; + $xfer += $input->readListBegin($_etype1031, $_size1028); + for ($_i1032 = 0; $_i1032 < $_size1028; ++$_i1032) { - $elem1026 = null; - $elem1026 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem1026->read($input); - $this->notNullConstraints []= $elem1026; + $elem1033 = null; + $elem1033 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem1033->read($input); + $this->notNullConstraints []= $elem1033; } $xfer += $input->readListEnd(); } else { @@ -19692,15 +19826,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 6: if ($ftype == TType::LST) { $this->defaultConstraints = array(); - $_size1027 = 0; - $_etype1030 = 0; - $xfer += $input->readListBegin($_etype1030, $_size1027); - for ($_i1031 = 0; $_i1031 < $_size1027; ++$_i1031) + $_size1034 = 0; + $_etype1037 = 0; + $xfer += $input->readListBegin($_etype1037, $_size1034); + for ($_i1038 = 0; $_i1038 < $_size1034; ++$_i1038) { - $elem1032 = null; - $elem1032 = new \metastore\SQLDefaultConstraint(); - $xfer += $elem1032->read($input); - $this->defaultConstraints []= $elem1032; + $elem1039 = null; + $elem1039 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem1039->read($input); + $this->defaultConstraints []= $elem1039; } $xfer += $input->readListEnd(); } else { @@ -19710,15 +19844,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 7: if ($ftype == TType::LST) { $this->checkConstraints = array(); - $_size1033 = 0; - $_etype1036 = 0; - $xfer += $input->readListBegin($_etype1036, $_size1033); - for ($_i1037 = 0; $_i1037 < $_size1033; ++$_i1037) + $_size1040 = 0; + $_etype1043 = 0; + $xfer += $input->readListBegin($_etype1043, $_size1040); + for ($_i1044 = 0; $_i1044 < $_size1040; ++$_i1044) { - $elem1038 = null; - $elem1038 = new \metastore\SQLCheckConstraint(); - $xfer += $elem1038->read($input); - $this->checkConstraints []= $elem1038; + $elem1045 = null; + $elem1045 = new \metastore\SQLCheckConstraint(); + $xfer += $elem1045->read($input); + $this->checkConstraints []= $elem1045; } $xfer += $input->readListEnd(); } else { @@ -19754,9 +19888,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter1039) + foreach ($this->primaryKeys as $iter1046) { - $xfer += $iter1039->write($output); + $xfer += $iter1046->write($output); } } $output->writeListEnd(); @@ -19771,9 +19905,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter1040) + foreach ($this->foreignKeys as $iter1047) { - $xfer += $iter1040->write($output); + $xfer += $iter1047->write($output); } } $output->writeListEnd(); @@ -19788,9 +19922,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints)); { - foreach ($this->uniqueConstraints as $iter1041) + foreach ($this->uniqueConstraints as $iter1048) { - $xfer += $iter1041->write($output); + $xfer += $iter1048->write($output); } } $output->writeListEnd(); @@ -19805,9 +19939,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints)); { - foreach ($this->notNullConstraints as $iter1042) + foreach ($this->notNullConstraints as $iter1049) { - $xfer += $iter1042->write($output); + $xfer += $iter1049->write($output); } } $output->writeListEnd(); @@ -19822,9 +19956,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints)); { - foreach ($this->defaultConstraints as $iter1043) + foreach ($this->defaultConstraints as $iter1050) { - $xfer += $iter1043->write($output); + $xfer += $iter1050->write($output); } } $output->writeListEnd(); @@ -19839,9 +19973,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->checkConstraints)); { - foreach ($this->checkConstraints as $iter1044) + foreach ($this->checkConstraints as $iter1051) { - $xfer += $iter1044->write($output); + $xfer += $iter1051->write($output); } } $output->writeListEnd(); @@ -21841,14 +21975,14 @@ class ThriftHiveMetastore_truncate_table_args { case 3: if ($ftype == TType::LST) { $this->partNames = array(); - $_size1045 = 0; - $_etype1048 = 0; - $xfer += $input->readListBegin($_etype1048, $_size1045); - for ($_i1049 = 0; $_i1049 < $_size1045; ++$_i1049) + $_size1052 = 0; + $_etype1055 = 0; + $xfer += $input->readListBegin($_etype1055, $_size1052); + for ($_i1056 = 0; $_i1056 < $_size1052; ++$_i1056) { - $elem1050 = null; - $xfer += $input->readString($elem1050); - $this->partNames []= $elem1050; + $elem1057 = null; + $xfer += $input->readString($elem1057); + $this->partNames []= $elem1057; } $xfer += $input->readListEnd(); } else { @@ -21886,9 +22020,9 @@ class ThriftHiveMetastore_truncate_table_args { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter1051) + foreach ($this->partNames as $iter1058) { - $xfer += $output->writeString($iter1051); + $xfer += $output->writeString($iter1058); } } $output->writeListEnd(); @@ -22324,14 +22458,14 @@ class ThriftHiveMetastore_get_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1052 = 0; - $_etype1055 = 0; - $xfer += $input->readListBegin($_etype1055, $_size1052); - for ($_i1056 = 0; $_i1056 < $_size1052; ++$_i1056) + $_size1059 = 0; + $_etype1062 = 0; + $xfer += $input->readListBegin($_etype1062, $_size1059); + for ($_i1063 = 0; $_i1063 < $_size1059; ++$_i1063) { - $elem1057 = null; - $xfer += $input->readString($elem1057); - $this->success []= $elem1057; + $elem1064 = null; + $xfer += $input->readString($elem1064); + $this->success []= $elem1064; } $xfer += $input->readListEnd(); } else { @@ -22367,9 +22501,9 @@ class ThriftHiveMetastore_get_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1058) + foreach ($this->success as $iter1065) { - $xfer += $output->writeString($iter1058); + $xfer += $output->writeString($iter1065); } } $output->writeListEnd(); @@ -22571,14 +22705,14 @@ class ThriftHiveMetastore_get_tables_by_type_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1059 = 0; - $_etype1062 = 0; - $xfer += $input->readListBegin($_etype1062, $_size1059); - for ($_i1063 = 0; $_i1063 < $_size1059; ++$_i1063) + $_size1066 = 0; + $_etype1069 = 0; + $xfer += $input->readListBegin($_etype1069, $_size1066); + for ($_i1070 = 0; $_i1070 < $_size1066; ++$_i1070) { - $elem1064 = null; - $xfer += $input->readString($elem1064); - $this->success []= $elem1064; + $elem1071 = null; + $xfer += $input->readString($elem1071); + $this->success []= $elem1071; } $xfer += $input->readListEnd(); } else { @@ -22614,9 +22748,9 @@ class ThriftHiveMetastore_get_tables_by_type_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1065) + foreach ($this->success as $iter1072) { - $xfer += $output->writeString($iter1065); + $xfer += $output->writeString($iter1072); } } $output->writeListEnd(); @@ -22748,15 +22882,15 @@ class ThriftHiveMetastore_get_all_materialized_view_objects_for_rewriting_result case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1066 = 0; - $_etype1069 = 0; - $xfer += $input->readListBegin($_etype1069, $_size1066); - for ($_i1070 = 0; $_i1070 < $_size1066; ++$_i1070) + $_size1073 = 0; + $_etype1076 = 0; + $xfer += $input->readListBegin($_etype1076, $_size1073); + for ($_i1077 = 0; $_i1077 < $_size1073; ++$_i1077) { - $elem1071 = null; - $elem1071 = new \metastore\Table(); - $xfer += $elem1071->read($input); - $this->success []= $elem1071; + $elem1078 = null; + $elem1078 = new \metastore\Table(); + $xfer += $elem1078->read($input); + $this->success []= $elem1078; } $xfer += $input->readListEnd(); } else { @@ -22792,9 +22926,9 @@ class ThriftHiveMetastore_get_all_materialized_view_objects_for_rewriting_result { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1072) + foreach ($this->success as $iter1079) { - $xfer += $iter1072->write($output); + $xfer += $iter1079->write($output); } } $output->writeListEnd(); @@ -22950,14 +23084,14 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1073 = 0; - $_etype1076 = 0; - $xfer += $input->readListBegin($_etype1076, $_size1073); - for ($_i1077 = 0; $_i1077 < $_size1073; ++$_i1077) + $_size1080 = 0; + $_etype1083 = 0; + $xfer += $input->readListBegin($_etype1083, $_size1080); + for ($_i1084 = 0; $_i1084 < $_size1080; ++$_i1084) { - $elem1078 = null; - $xfer += $input->readString($elem1078); - $this->success []= $elem1078; + $elem1085 = null; + $xfer += $input->readString($elem1085); + $this->success []= $elem1085; } $xfer += $input->readListEnd(); } else { @@ -22993,9 +23127,9 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1079) + foreach ($this->success as $iter1086) { - $xfer += $output->writeString($iter1079); + $xfer += $output->writeString($iter1086); } } $output->writeListEnd(); @@ -23100,14 +23234,14 @@ class ThriftHiveMetastore_get_table_meta_args { case 3: if ($ftype == TType::LST) { $this->tbl_types = array(); - $_size1080 = 0; - $_etype1083 = 0; - $xfer += $input->readListBegin($_etype1083, $_size1080); - for ($_i1084 = 0; $_i1084 < $_size1080; ++$_i1084) + $_size1087 = 0; + $_etype1090 = 0; + $xfer += $input->readListBegin($_etype1090, $_size1087); + for ($_i1091 = 0; $_i1091 < $_size1087; ++$_i1091) { - $elem1085 = null; - $xfer += $input->readString($elem1085); - $this->tbl_types []= $elem1085; + $elem1092 = null; + $xfer += $input->readString($elem1092); + $this->tbl_types []= $elem1092; } $xfer += $input->readListEnd(); } else { @@ -23145,9 +23279,9 @@ class ThriftHiveMetastore_get_table_meta_args { { $output->writeListBegin(TType::STRING, count($this->tbl_types)); { - foreach ($this->tbl_types as $iter1086) + foreach ($this->tbl_types as $iter1093) { - $xfer += $output->writeString($iter1086); + $xfer += $output->writeString($iter1093); } } $output->writeListEnd(); @@ -23224,15 +23358,15 @@ class ThriftHiveMetastore_get_table_meta_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1087 = 0; - $_etype1090 = 0; - $xfer += $input->readListBegin($_etype1090, $_size1087); - for ($_i1091 = 0; $_i1091 < $_size1087; ++$_i1091) + $_size1094 = 0; + $_etype1097 = 0; + $xfer += $input->readListBegin($_etype1097, $_size1094); + for ($_i1098 = 0; $_i1098 < $_size1094; ++$_i1098) { - $elem1092 = null; - $elem1092 = new \metastore\TableMeta(); - $xfer += $elem1092->read($input); - $this->success []= $elem1092; + $elem1099 = null; + $elem1099 = new \metastore\TableMeta(); + $xfer += $elem1099->read($input); + $this->success []= $elem1099; } $xfer += $input->readListEnd(); } else { @@ -23268,9 +23402,9 @@ class ThriftHiveMetastore_get_table_meta_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1093) + foreach ($this->success as $iter1100) { - $xfer += $iter1093->write($output); + $xfer += $iter1100->write($output); } } $output->writeListEnd(); @@ -23426,14 +23560,14 @@ class ThriftHiveMetastore_get_all_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1094 = 0; - $_etype1097 = 0; - $xfer += $input->readListBegin($_etype1097, $_size1094); - for ($_i1098 = 0; $_i1098 < $_size1094; ++$_i1098) + $_size1101 = 0; + $_etype1104 = 0; + $xfer += $input->readListBegin($_etype1104, $_size1101); + for ($_i1105 = 0; $_i1105 < $_size1101; ++$_i1105) { - $elem1099 = null; - $xfer += $input->readString($elem1099); - $this->success []= $elem1099; + $elem1106 = null; + $xfer += $input->readString($elem1106); + $this->success []= $elem1106; } $xfer += $input->readListEnd(); } else { @@ -23469,9 +23603,9 @@ class ThriftHiveMetastore_get_all_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1100) + foreach ($this->success as $iter1107) { - $xfer += $output->writeString($iter1100); + $xfer += $output->writeString($iter1107); } } $output->writeListEnd(); @@ -23501,6 +23635,10 @@ class ThriftHiveMetastore_get_table_args { * @var string */ public $tbl_name = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -23513,6 +23651,10 @@ class ThriftHiveMetastore_get_table_args { 'var' => 'tbl_name', 'type' => TType::STRING, ), + 3 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -23522,6 +23664,9 @@ class ThriftHiveMetastore_get_table_args { if (isset($vals['tbl_name'])) { $this->tbl_name = $vals['tbl_name']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -23558,6 +23703,13 @@ class ThriftHiveMetastore_get_table_args { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -23581,6 +23733,11 @@ class ThriftHiveMetastore_get_table_args { $xfer += $output->writeString($this->tbl_name); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 3); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -23786,14 +23943,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size1101 = 0; - $_etype1104 = 0; - $xfer += $input->readListBegin($_etype1104, $_size1101); - for ($_i1105 = 0; $_i1105 < $_size1101; ++$_i1105) + $_size1108 = 0; + $_etype1111 = 0; + $xfer += $input->readListBegin($_etype1111, $_size1108); + for ($_i1112 = 0; $_i1112 < $_size1108; ++$_i1112) { - $elem1106 = null; - $xfer += $input->readString($elem1106); - $this->tbl_names []= $elem1106; + $elem1113 = null; + $xfer += $input->readString($elem1113); + $this->tbl_names []= $elem1113; } $xfer += $input->readListEnd(); } else { @@ -23826,9 +23983,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter1107) + foreach ($this->tbl_names as $iter1114) { - $xfer += $output->writeString($iter1107); + $xfer += $output->writeString($iter1114); } } $output->writeListEnd(); @@ -23893,15 +24050,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1108 = 0; - $_etype1111 = 0; - $xfer += $input->readListBegin($_etype1111, $_size1108); - for ($_i1112 = 0; $_i1112 < $_size1108; ++$_i1112) + $_size1115 = 0; + $_etype1118 = 0; + $xfer += $input->readListBegin($_etype1118, $_size1115); + for ($_i1119 = 0; $_i1119 < $_size1115; ++$_i1119) { - $elem1113 = null; - $elem1113 = new \metastore\Table(); - $xfer += $elem1113->read($input); - $this->success []= $elem1113; + $elem1120 = null; + $elem1120 = new \metastore\Table(); + $xfer += $elem1120->read($input); + $this->success []= $elem1120; } $xfer += $input->readListEnd(); } else { @@ -23929,9 +24086,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1114) + foreach ($this->success as $iter1121) { - $xfer += $iter1114->write($output); + $xfer += $iter1121->write($output); } } $output->writeListEnd(); @@ -24088,15 +24245,15 @@ class ThriftHiveMetastore_get_tables_ext_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1115 = 0; - $_etype1118 = 0; - $xfer += $input->readListBegin($_etype1118, $_size1115); - for ($_i1119 = 0; $_i1119 < $_size1115; ++$_i1119) + $_size1122 = 0; + $_etype1125 = 0; + $xfer += $input->readListBegin($_etype1125, $_size1122); + for ($_i1126 = 0; $_i1126 < $_size1122; ++$_i1126) { - $elem1120 = null; - $elem1120 = new \metastore\ExtendedTableInfo(); - $xfer += $elem1120->read($input); - $this->success []= $elem1120; + $elem1127 = null; + $elem1127 = new \metastore\ExtendedTableInfo(); + $xfer += $elem1127->read($input); + $this->success []= $elem1127; } $xfer += $input->readListEnd(); } else { @@ -24132,9 +24289,9 @@ class ThriftHiveMetastore_get_tables_ext_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1121) + foreach ($this->success as $iter1128) { - $xfer += $iter1121->write($output); + $xfer += $iter1128->write($output); } } $output->writeListEnd(); @@ -25339,14 +25496,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1122 = 0; - $_etype1125 = 0; - $xfer += $input->readListBegin($_etype1125, $_size1122); - for ($_i1126 = 0; $_i1126 < $_size1122; ++$_i1126) + $_size1129 = 0; + $_etype1132 = 0; + $xfer += $input->readListBegin($_etype1132, $_size1129); + for ($_i1133 = 0; $_i1133 < $_size1129; ++$_i1133) { - $elem1127 = null; - $xfer += $input->readString($elem1127); - $this->success []= $elem1127; + $elem1134 = null; + $xfer += $input->readString($elem1134); + $this->success []= $elem1134; } $xfer += $input->readListEnd(); } else { @@ -25398,9 +25555,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1128) + foreach ($this->success as $iter1135) { - $xfer += $output->writeString($iter1128); + $xfer += $output->writeString($iter1135); } } $output->writeListEnd(); @@ -26923,15 +27080,15 @@ class ThriftHiveMetastore_add_partitions_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1129 = 0; - $_etype1132 = 0; - $xfer += $input->readListBegin($_etype1132, $_size1129); - for ($_i1133 = 0; $_i1133 < $_size1129; ++$_i1133) + $_size1136 = 0; + $_etype1139 = 0; + $xfer += $input->readListBegin($_etype1139, $_size1136); + for ($_i1140 = 0; $_i1140 < $_size1136; ++$_i1140) { - $elem1134 = null; - $elem1134 = new \metastore\Partition(); - $xfer += $elem1134->read($input); - $this->new_parts []= $elem1134; + $elem1141 = null; + $elem1141 = new \metastore\Partition(); + $xfer += $elem1141->read($input); + $this->new_parts []= $elem1141; } $xfer += $input->readListEnd(); } else { @@ -26959,9 +27116,9 @@ class ThriftHiveMetastore_add_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1135) + foreach ($this->new_parts as $iter1142) { - $xfer += $iter1135->write($output); + $xfer += $iter1142->write($output); } } $output->writeListEnd(); @@ -27176,15 +27333,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1136 = 0; - $_etype1139 = 0; - $xfer += $input->readListBegin($_etype1139, $_size1136); - for ($_i1140 = 0; $_i1140 < $_size1136; ++$_i1140) + $_size1143 = 0; + $_etype1146 = 0; + $xfer += $input->readListBegin($_etype1146, $_size1143); + for ($_i1147 = 0; $_i1147 < $_size1143; ++$_i1147) { - $elem1141 = null; - $elem1141 = new \metastore\PartitionSpec(); - $xfer += $elem1141->read($input); - $this->new_parts []= $elem1141; + $elem1148 = null; + $elem1148 = new \metastore\PartitionSpec(); + $xfer += $elem1148->read($input); + $this->new_parts []= $elem1148; } $xfer += $input->readListEnd(); } else { @@ -27212,9 +27369,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1142) + foreach ($this->new_parts as $iter1149) { - $xfer += $iter1142->write($output); + $xfer += $iter1149->write($output); } } $output->writeListEnd(); @@ -27464,14 +27621,14 @@ class ThriftHiveMetastore_append_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1143 = 0; - $_etype1146 = 0; - $xfer += $input->readListBegin($_etype1146, $_size1143); - for ($_i1147 = 0; $_i1147 < $_size1143; ++$_i1147) + $_size1150 = 0; + $_etype1153 = 0; + $xfer += $input->readListBegin($_etype1153, $_size1150); + for ($_i1154 = 0; $_i1154 < $_size1150; ++$_i1154) { - $elem1148 = null; - $xfer += $input->readString($elem1148); - $this->part_vals []= $elem1148; + $elem1155 = null; + $xfer += $input->readString($elem1155); + $this->part_vals []= $elem1155; } $xfer += $input->readListEnd(); } else { @@ -27509,9 +27666,9 @@ class ThriftHiveMetastore_append_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1149) + foreach ($this->part_vals as $iter1156) { - $xfer += $output->writeString($iter1149); + $xfer += $output->writeString($iter1156); } } $output->writeListEnd(); @@ -28013,14 +28170,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1150 = 0; - $_etype1153 = 0; - $xfer += $input->readListBegin($_etype1153, $_size1150); - for ($_i1154 = 0; $_i1154 < $_size1150; ++$_i1154) + $_size1157 = 0; + $_etype1160 = 0; + $xfer += $input->readListBegin($_etype1160, $_size1157); + for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161) { - $elem1155 = null; - $xfer += $input->readString($elem1155); - $this->part_vals []= $elem1155; + $elem1162 = null; + $xfer += $input->readString($elem1162); + $this->part_vals []= $elem1162; } $xfer += $input->readListEnd(); } else { @@ -28066,9 +28223,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1156) + foreach ($this->part_vals as $iter1163) { - $xfer += $output->writeString($iter1156); + $xfer += $output->writeString($iter1163); } } $output->writeListEnd(); @@ -28922,14 +29079,14 @@ class ThriftHiveMetastore_drop_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1157 = 0; - $_etype1160 = 0; - $xfer += $input->readListBegin($_etype1160, $_size1157); - for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161) + $_size1164 = 0; + $_etype1167 = 0; + $xfer += $input->readListBegin($_etype1167, $_size1164); + for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168) { - $elem1162 = null; - $xfer += $input->readString($elem1162); - $this->part_vals []= $elem1162; + $elem1169 = null; + $xfer += $input->readString($elem1169); + $this->part_vals []= $elem1169; } $xfer += $input->readListEnd(); } else { @@ -28974,9 +29131,9 @@ class ThriftHiveMetastore_drop_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1163) + foreach ($this->part_vals as $iter1170) { - $xfer += $output->writeString($iter1163); + $xfer += $output->writeString($iter1170); } } $output->writeListEnd(); @@ -29229,14 +29386,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1164 = 0; - $_etype1167 = 0; - $xfer += $input->readListBegin($_etype1167, $_size1164); - for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168) + $_size1171 = 0; + $_etype1174 = 0; + $xfer += $input->readListBegin($_etype1174, $_size1171); + for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175) { - $elem1169 = null; - $xfer += $input->readString($elem1169); - $this->part_vals []= $elem1169; + $elem1176 = null; + $xfer += $input->readString($elem1176); + $this->part_vals []= $elem1176; } $xfer += $input->readListEnd(); } else { @@ -29289,9 +29446,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1170) + foreach ($this->part_vals as $iter1177) { - $xfer += $output->writeString($iter1170); + $xfer += $output->writeString($iter1177); } } $output->writeListEnd(); @@ -30234,6 +30391,10 @@ class ThriftHiveMetastore_get_partition_args { * @var string[] */ public $part_vals = null; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -30254,6 +30415,10 @@ class ThriftHiveMetastore_get_partition_args { 'type' => TType::STRING, ), ), + 4 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -30266,6 +30431,9 @@ class ThriftHiveMetastore_get_partition_args { if (isset($vals['part_vals'])) { $this->part_vals = $vals['part_vals']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -30305,20 +30473,27 @@ class ThriftHiveMetastore_get_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1171 = 0; - $_etype1174 = 0; - $xfer += $input->readListBegin($_etype1174, $_size1171); - for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175) + $_size1178 = 0; + $_etype1181 = 0; + $xfer += $input->readListBegin($_etype1181, $_size1178); + for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182) { - $elem1176 = null; - $xfer += $input->readString($elem1176); - $this->part_vals []= $elem1176; + $elem1183 = null; + $xfer += $input->readString($elem1183); + $this->part_vals []= $elem1183; } $xfer += $input->readListEnd(); } else { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -30350,15 +30525,20 @@ class ThriftHiveMetastore_get_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1177) + foreach ($this->part_vals as $iter1184) { - $xfer += $output->writeString($iter1177); + $xfer += $output->writeString($iter1184); } } $output->writeListEnd(); } $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 4); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -30594,17 +30774,17 @@ class ThriftHiveMetastore_exchange_partition_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1178 = 0; - $_ktype1179 = 0; - $_vtype1180 = 0; - $xfer += $input->readMapBegin($_ktype1179, $_vtype1180, $_size1178); - for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182) + $_size1185 = 0; + $_ktype1186 = 0; + $_vtype1187 = 0; + $xfer += $input->readMapBegin($_ktype1186, $_vtype1187, $_size1185); + for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189) { - $key1183 = ''; - $val1184 = ''; - $xfer += $input->readString($key1183); - $xfer += $input->readString($val1184); - $this->partitionSpecs[$key1183] = $val1184; + $key1190 = ''; + $val1191 = ''; + $xfer += $input->readString($key1190); + $xfer += $input->readString($val1191); + $this->partitionSpecs[$key1190] = $val1191; } $xfer += $input->readMapEnd(); } else { @@ -30660,10 +30840,10 @@ class ThriftHiveMetastore_exchange_partition_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1185 => $viter1186) + foreach ($this->partitionSpecs as $kiter1192 => $viter1193) { - $xfer += $output->writeString($kiter1185); - $xfer += $output->writeString($viter1186); + $xfer += $output->writeString($kiter1192); + $xfer += $output->writeString($viter1193); } } $output->writeMapEnd(); @@ -30975,17 +31155,17 @@ class ThriftHiveMetastore_exchange_partitions_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1187 = 0; - $_ktype1188 = 0; - $_vtype1189 = 0; - $xfer += $input->readMapBegin($_ktype1188, $_vtype1189, $_size1187); - for ($_i1191 = 0; $_i1191 < $_size1187; ++$_i1191) + $_size1194 = 0; + $_ktype1195 = 0; + $_vtype1196 = 0; + $xfer += $input->readMapBegin($_ktype1195, $_vtype1196, $_size1194); + for ($_i1198 = 0; $_i1198 < $_size1194; ++$_i1198) { - $key1192 = ''; - $val1193 = ''; - $xfer += $input->readString($key1192); - $xfer += $input->readString($val1193); - $this->partitionSpecs[$key1192] = $val1193; + $key1199 = ''; + $val1200 = ''; + $xfer += $input->readString($key1199); + $xfer += $input->readString($val1200); + $this->partitionSpecs[$key1199] = $val1200; } $xfer += $input->readMapEnd(); } else { @@ -31041,10 +31221,10 @@ class ThriftHiveMetastore_exchange_partitions_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1194 => $viter1195) + foreach ($this->partitionSpecs as $kiter1201 => $viter1202) { - $xfer += $output->writeString($kiter1194); - $xfer += $output->writeString($viter1195); + $xfer += $output->writeString($kiter1201); + $xfer += $output->writeString($viter1202); } } $output->writeMapEnd(); @@ -31177,15 +31357,15 @@ class ThriftHiveMetastore_exchange_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1196 = 0; - $_etype1199 = 0; - $xfer += $input->readListBegin($_etype1199, $_size1196); - for ($_i1200 = 0; $_i1200 < $_size1196; ++$_i1200) + $_size1203 = 0; + $_etype1206 = 0; + $xfer += $input->readListBegin($_etype1206, $_size1203); + for ($_i1207 = 0; $_i1207 < $_size1203; ++$_i1207) { - $elem1201 = null; - $elem1201 = new \metastore\Partition(); - $xfer += $elem1201->read($input); - $this->success []= $elem1201; + $elem1208 = null; + $elem1208 = new \metastore\Partition(); + $xfer += $elem1208->read($input); + $this->success []= $elem1208; } $xfer += $input->readListEnd(); } else { @@ -31245,9 +31425,9 @@ class ThriftHiveMetastore_exchange_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1202) + foreach ($this->success as $iter1209) { - $xfer += $iter1202->write($output); + $xfer += $iter1209->write($output); } } $output->writeListEnd(); @@ -31304,6 +31484,10 @@ class ThriftHiveMetastore_get_partition_with_auth_args { * @var string[] */ public $group_names = null; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -31336,6 +31520,10 @@ class ThriftHiveMetastore_get_partition_with_auth_args { 'type' => TType::STRING, ), ), + 6 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -31354,6 +31542,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { if (isset($vals['group_names'])) { $this->group_names = $vals['group_names']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -31393,14 +31584,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1203 = 0; - $_etype1206 = 0; - $xfer += $input->readListBegin($_etype1206, $_size1203); - for ($_i1207 = 0; $_i1207 < $_size1203; ++$_i1207) + $_size1210 = 0; + $_etype1213 = 0; + $xfer += $input->readListBegin($_etype1213, $_size1210); + for ($_i1214 = 0; $_i1214 < $_size1210; ++$_i1214) { - $elem1208 = null; - $xfer += $input->readString($elem1208); - $this->part_vals []= $elem1208; + $elem1215 = null; + $xfer += $input->readString($elem1215); + $this->part_vals []= $elem1215; } $xfer += $input->readListEnd(); } else { @@ -31417,20 +31608,27 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1209 = 0; - $_etype1212 = 0; - $xfer += $input->readListBegin($_etype1212, $_size1209); - for ($_i1213 = 0; $_i1213 < $_size1209; ++$_i1213) + $_size1216 = 0; + $_etype1219 = 0; + $xfer += $input->readListBegin($_etype1219, $_size1216); + for ($_i1220 = 0; $_i1220 < $_size1216; ++$_i1220) { - $elem1214 = null; - $xfer += $input->readString($elem1214); - $this->group_names []= $elem1214; + $elem1221 = null; + $xfer += $input->readString($elem1221); + $this->group_names []= $elem1221; } $xfer += $input->readListEnd(); } else { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -31462,9 +31660,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1215) + foreach ($this->part_vals as $iter1222) { - $xfer += $output->writeString($iter1215); + $xfer += $output->writeString($iter1222); } } $output->writeListEnd(); @@ -31484,15 +31682,20 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1216) + foreach ($this->group_names as $iter1223) { - $xfer += $output->writeString($iter1216); + $xfer += $output->writeString($iter1223); } } $output->writeListEnd(); } $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 6); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -31645,6 +31848,10 @@ class ThriftHiveMetastore_get_partition_by_name_args { * @var string */ public $part_name = null; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -31661,6 +31868,10 @@ class ThriftHiveMetastore_get_partition_by_name_args { 'var' => 'part_name', 'type' => TType::STRING, ), + 4 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -31673,6 +31884,9 @@ class ThriftHiveMetastore_get_partition_by_name_args { if (isset($vals['part_name'])) { $this->part_name = $vals['part_name']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -31716,6 +31930,13 @@ class ThriftHiveMetastore_get_partition_by_name_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -31744,6 +31965,11 @@ class ThriftHiveMetastore_get_partition_by_name_args { $xfer += $output->writeString($this->part_name); $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 4); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -31896,6 +32122,10 @@ class ThriftHiveMetastore_get_partitions_args { * @var int */ public $max_parts = -1; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -31912,6 +32142,10 @@ class ThriftHiveMetastore_get_partitions_args { 'var' => 'max_parts', 'type' => TType::I16, ), + 4 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -31924,6 +32158,9 @@ class ThriftHiveMetastore_get_partitions_args { if (isset($vals['max_parts'])) { $this->max_parts = $vals['max_parts']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -31967,6 +32204,13 @@ class ThriftHiveMetastore_get_partitions_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -31995,6 +32239,11 @@ class ThriftHiveMetastore_get_partitions_args { $xfer += $output->writeI16($this->max_parts); $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 4); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -32077,15 +32326,15 @@ class ThriftHiveMetastore_get_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1217 = 0; - $_etype1220 = 0; - $xfer += $input->readListBegin($_etype1220, $_size1217); - for ($_i1221 = 0; $_i1221 < $_size1217; ++$_i1221) + $_size1224 = 0; + $_etype1227 = 0; + $xfer += $input->readListBegin($_etype1227, $_size1224); + for ($_i1228 = 0; $_i1228 < $_size1224; ++$_i1228) { - $elem1222 = null; - $elem1222 = new \metastore\Partition(); - $xfer += $elem1222->read($input); - $this->success []= $elem1222; + $elem1229 = null; + $elem1229 = new \metastore\Partition(); + $xfer += $elem1229->read($input); + $this->success []= $elem1229; } $xfer += $input->readListEnd(); } else { @@ -32129,9 +32378,9 @@ class ThriftHiveMetastore_get_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1223) + foreach ($this->success as $iter1230) { - $xfer += $iter1223->write($output); + $xfer += $iter1230->write($output); } } $output->writeListEnd(); @@ -32178,6 +32427,10 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { * @var string[] */ public $group_names = null; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -32206,6 +32459,10 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { 'type' => TType::STRING, ), ), + 6 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -32224,6 +32481,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { if (isset($vals['group_names'])) { $this->group_names = $vals['group_names']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -32277,20 +32537,27 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1224 = 0; - $_etype1227 = 0; - $xfer += $input->readListBegin($_etype1227, $_size1224); - for ($_i1228 = 0; $_i1228 < $_size1224; ++$_i1228) + $_size1231 = 0; + $_etype1234 = 0; + $xfer += $input->readListBegin($_etype1234, $_size1231); + for ($_i1235 = 0; $_i1235 < $_size1231; ++$_i1235) { - $elem1229 = null; - $xfer += $input->readString($elem1229); - $this->group_names []= $elem1229; + $elem1236 = null; + $xfer += $input->readString($elem1236); + $this->group_names []= $elem1236; } $xfer += $input->readListEnd(); } else { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -32332,15 +32599,20 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1230) + foreach ($this->group_names as $iter1237) { - $xfer += $output->writeString($iter1230); + $xfer += $output->writeString($iter1237); } } $output->writeListEnd(); } $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 6); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -32423,15 +32695,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1231 = 0; - $_etype1234 = 0; - $xfer += $input->readListBegin($_etype1234, $_size1231); - for ($_i1235 = 0; $_i1235 < $_size1231; ++$_i1235) + $_size1238 = 0; + $_etype1241 = 0; + $xfer += $input->readListBegin($_etype1241, $_size1238); + for ($_i1242 = 0; $_i1242 < $_size1238; ++$_i1242) { - $elem1236 = null; - $elem1236 = new \metastore\Partition(); - $xfer += $elem1236->read($input); - $this->success []= $elem1236; + $elem1243 = null; + $elem1243 = new \metastore\Partition(); + $xfer += $elem1243->read($input); + $this->success []= $elem1243; } $xfer += $input->readListEnd(); } else { @@ -32475,9 +32747,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1237) + foreach ($this->success as $iter1244) { - $xfer += $iter1237->write($output); + $xfer += $iter1244->write($output); } } $output->writeListEnd(); @@ -32516,6 +32788,10 @@ class ThriftHiveMetastore_get_partitions_pspec_args { * @var int */ public $max_parts = -1; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -32532,6 +32808,10 @@ class ThriftHiveMetastore_get_partitions_pspec_args { 'var' => 'max_parts', 'type' => TType::I32, ), + 4 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -32544,6 +32824,9 @@ class ThriftHiveMetastore_get_partitions_pspec_args { if (isset($vals['max_parts'])) { $this->max_parts = $vals['max_parts']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -32587,6 +32870,13 @@ class ThriftHiveMetastore_get_partitions_pspec_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -32615,6 +32905,11 @@ class ThriftHiveMetastore_get_partitions_pspec_args { $xfer += $output->writeI32($this->max_parts); $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 4); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -32697,15 +32992,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1238 = 0; - $_etype1241 = 0; - $xfer += $input->readListBegin($_etype1241, $_size1238); - for ($_i1242 = 0; $_i1242 < $_size1238; ++$_i1242) + $_size1245 = 0; + $_etype1248 = 0; + $xfer += $input->readListBegin($_etype1248, $_size1245); + for ($_i1249 = 0; $_i1249 < $_size1245; ++$_i1249) { - $elem1243 = null; - $elem1243 = new \metastore\PartitionSpec(); - $xfer += $elem1243->read($input); - $this->success []= $elem1243; + $elem1250 = null; + $elem1250 = new \metastore\PartitionSpec(); + $xfer += $elem1250->read($input); + $this->success []= $elem1250; } $xfer += $input->readListEnd(); } else { @@ -32749,9 +33044,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1244) + foreach ($this->success as $iter1251) { - $xfer += $iter1244->write($output); + $xfer += $iter1251->write($output); } } $output->writeListEnd(); @@ -32790,6 +33085,10 @@ class ThriftHiveMetastore_get_partition_names_args { * @var int */ public $max_parts = -1; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -32806,6 +33105,10 @@ class ThriftHiveMetastore_get_partition_names_args { 'var' => 'max_parts', 'type' => TType::I16, ), + 4 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -32818,6 +33121,9 @@ class ThriftHiveMetastore_get_partition_names_args { if (isset($vals['max_parts'])) { $this->max_parts = $vals['max_parts']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -32861,6 +33167,13 @@ class ThriftHiveMetastore_get_partition_names_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -32889,6 +33202,11 @@ class ThriftHiveMetastore_get_partition_names_args { $xfer += $output->writeI16($this->max_parts); $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 4); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -32970,14 +33288,14 @@ class ThriftHiveMetastore_get_partition_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1245 = 0; - $_etype1248 = 0; - $xfer += $input->readListBegin($_etype1248, $_size1245); - for ($_i1249 = 0; $_i1249 < $_size1245; ++$_i1249) + $_size1252 = 0; + $_etype1255 = 0; + $xfer += $input->readListBegin($_etype1255, $_size1252); + for ($_i1256 = 0; $_i1256 < $_size1252; ++$_i1256) { - $elem1250 = null; - $xfer += $input->readString($elem1250); - $this->success []= $elem1250; + $elem1257 = null; + $xfer += $input->readString($elem1257); + $this->success []= $elem1257; } $xfer += $input->readListEnd(); } else { @@ -33021,9 +33339,9 @@ class ThriftHiveMetastore_get_partition_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1251) + foreach ($this->success as $iter1258) { - $xfer += $output->writeString($iter1251); + $xfer += $output->writeString($iter1258); } } $output->writeListEnd(); @@ -33276,6 +33594,10 @@ class ThriftHiveMetastore_get_partitions_ps_args { * @var int */ public $max_parts = -1; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -33300,6 +33622,10 @@ class ThriftHiveMetastore_get_partitions_ps_args { 'var' => 'max_parts', 'type' => TType::I16, ), + 5 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -33315,6 +33641,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { if (isset($vals['max_parts'])) { $this->max_parts = $vals['max_parts']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -33354,14 +33683,14 @@ class ThriftHiveMetastore_get_partitions_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1252 = 0; - $_etype1255 = 0; - $xfer += $input->readListBegin($_etype1255, $_size1252); - for ($_i1256 = 0; $_i1256 < $_size1252; ++$_i1256) + $_size1259 = 0; + $_etype1262 = 0; + $xfer += $input->readListBegin($_etype1262, $_size1259); + for ($_i1263 = 0; $_i1263 < $_size1259; ++$_i1263) { - $elem1257 = null; - $xfer += $input->readString($elem1257); - $this->part_vals []= $elem1257; + $elem1264 = null; + $xfer += $input->readString($elem1264); + $this->part_vals []= $elem1264; } $xfer += $input->readListEnd(); } else { @@ -33375,6 +33704,13 @@ class ThriftHiveMetastore_get_partitions_ps_args { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -33406,9 +33742,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1258) + foreach ($this->part_vals as $iter1265) { - $xfer += $output->writeString($iter1258); + $xfer += $output->writeString($iter1265); } } $output->writeListEnd(); @@ -33420,6 +33756,11 @@ class ThriftHiveMetastore_get_partitions_ps_args { $xfer += $output->writeI16($this->max_parts); $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 5); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -33502,15 +33843,15 @@ class ThriftHiveMetastore_get_partitions_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1259 = 0; - $_etype1262 = 0; - $xfer += $input->readListBegin($_etype1262, $_size1259); - for ($_i1263 = 0; $_i1263 < $_size1259; ++$_i1263) + $_size1266 = 0; + $_etype1269 = 0; + $xfer += $input->readListBegin($_etype1269, $_size1266); + for ($_i1270 = 0; $_i1270 < $_size1266; ++$_i1270) { - $elem1264 = null; - $elem1264 = new \metastore\Partition(); - $xfer += $elem1264->read($input); - $this->success []= $elem1264; + $elem1271 = null; + $elem1271 = new \metastore\Partition(); + $xfer += $elem1271->read($input); + $this->success []= $elem1271; } $xfer += $input->readListEnd(); } else { @@ -33554,9 +33895,9 @@ class ThriftHiveMetastore_get_partitions_ps_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1265) + foreach ($this->success as $iter1272) { - $xfer += $iter1265->write($output); + $xfer += $iter1272->write($output); } } $output->writeListEnd(); @@ -33607,6 +33948,10 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { * @var string[] */ public $group_names = null; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -33643,6 +33988,10 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { 'type' => TType::STRING, ), ), + 7 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -33664,6 +34013,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { if (isset($vals['group_names'])) { $this->group_names = $vals['group_names']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -33703,14 +34055,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1266 = 0; - $_etype1269 = 0; - $xfer += $input->readListBegin($_etype1269, $_size1266); - for ($_i1270 = 0; $_i1270 < $_size1266; ++$_i1270) + $_size1273 = 0; + $_etype1276 = 0; + $xfer += $input->readListBegin($_etype1276, $_size1273); + for ($_i1277 = 0; $_i1277 < $_size1273; ++$_i1277) { - $elem1271 = null; - $xfer += $input->readString($elem1271); - $this->part_vals []= $elem1271; + $elem1278 = null; + $xfer += $input->readString($elem1278); + $this->part_vals []= $elem1278; } $xfer += $input->readListEnd(); } else { @@ -33734,20 +34086,27 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1272 = 0; - $_etype1275 = 0; - $xfer += $input->readListBegin($_etype1275, $_size1272); - for ($_i1276 = 0; $_i1276 < $_size1272; ++$_i1276) + $_size1279 = 0; + $_etype1282 = 0; + $xfer += $input->readListBegin($_etype1282, $_size1279); + for ($_i1283 = 0; $_i1283 < $_size1279; ++$_i1283) { - $elem1277 = null; - $xfer += $input->readString($elem1277); - $this->group_names []= $elem1277; + $elem1284 = null; + $xfer += $input->readString($elem1284); + $this->group_names []= $elem1284; } $xfer += $input->readListEnd(); } else { $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -33779,9 +34138,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1278) + foreach ($this->part_vals as $iter1285) { - $xfer += $output->writeString($iter1278); + $xfer += $output->writeString($iter1285); } } $output->writeListEnd(); @@ -33806,15 +34165,20 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1279) + foreach ($this->group_names as $iter1286) { - $xfer += $output->writeString($iter1279); + $xfer += $output->writeString($iter1286); } } $output->writeListEnd(); } $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 7); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -33897,15 +34261,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1280 = 0; - $_etype1283 = 0; - $xfer += $input->readListBegin($_etype1283, $_size1280); - for ($_i1284 = 0; $_i1284 < $_size1280; ++$_i1284) + $_size1287 = 0; + $_etype1290 = 0; + $xfer += $input->readListBegin($_etype1290, $_size1287); + for ($_i1291 = 0; $_i1291 < $_size1287; ++$_i1291) { - $elem1285 = null; - $elem1285 = new \metastore\Partition(); - $xfer += $elem1285->read($input); - $this->success []= $elem1285; + $elem1292 = null; + $elem1292 = new \metastore\Partition(); + $xfer += $elem1292->read($input); + $this->success []= $elem1292; } $xfer += $input->readListEnd(); } else { @@ -33949,9 +34313,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1286) + foreach ($this->success as $iter1293) { - $xfer += $iter1286->write($output); + $xfer += $iter1293->write($output); } } $output->writeListEnd(); @@ -33994,6 +34358,10 @@ class ThriftHiveMetastore_get_partition_names_ps_args { * @var int */ public $max_parts = -1; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -34018,6 +34386,10 @@ class ThriftHiveMetastore_get_partition_names_ps_args { 'var' => 'max_parts', 'type' => TType::I16, ), + 5 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -34033,6 +34405,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { if (isset($vals['max_parts'])) { $this->max_parts = $vals['max_parts']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -34072,14 +34447,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1287 = 0; - $_etype1290 = 0; - $xfer += $input->readListBegin($_etype1290, $_size1287); - for ($_i1291 = 0; $_i1291 < $_size1287; ++$_i1291) + $_size1294 = 0; + $_etype1297 = 0; + $xfer += $input->readListBegin($_etype1297, $_size1294); + for ($_i1298 = 0; $_i1298 < $_size1294; ++$_i1298) { - $elem1292 = null; - $xfer += $input->readString($elem1292); - $this->part_vals []= $elem1292; + $elem1299 = null; + $xfer += $input->readString($elem1299); + $this->part_vals []= $elem1299; } $xfer += $input->readListEnd(); } else { @@ -34093,6 +34468,13 @@ class ThriftHiveMetastore_get_partition_names_ps_args { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -34124,9 +34506,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1293) + foreach ($this->part_vals as $iter1300) { - $xfer += $output->writeString($iter1293); + $xfer += $output->writeString($iter1300); } } $output->writeListEnd(); @@ -34138,6 +34520,11 @@ class ThriftHiveMetastore_get_partition_names_ps_args { $xfer += $output->writeI16($this->max_parts); $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 5); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -34219,14 +34606,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1294 = 0; - $_etype1297 = 0; - $xfer += $input->readListBegin($_etype1297, $_size1294); - for ($_i1298 = 0; $_i1298 < $_size1294; ++$_i1298) + $_size1301 = 0; + $_etype1304 = 0; + $xfer += $input->readListBegin($_etype1304, $_size1301); + for ($_i1305 = 0; $_i1305 < $_size1301; ++$_i1305) { - $elem1299 = null; - $xfer += $input->readString($elem1299); - $this->success []= $elem1299; + $elem1306 = null; + $xfer += $input->readString($elem1306); + $this->success []= $elem1306; } $xfer += $input->readListEnd(); } else { @@ -34270,9 +34657,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1300) + foreach ($this->success as $iter1307) { - $xfer += $output->writeString($iter1300); + $xfer += $output->writeString($iter1307); } } $output->writeListEnd(); @@ -34315,6 +34702,10 @@ class ThriftHiveMetastore_get_partitions_by_filter_args { * @var int */ public $max_parts = -1; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -34335,6 +34726,10 @@ class ThriftHiveMetastore_get_partitions_by_filter_args { 'var' => 'max_parts', 'type' => TType::I16, ), + 5 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -34350,6 +34745,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_args { if (isset($vals['max_parts'])) { $this->max_parts = $vals['max_parts']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -34400,6 +34798,13 @@ class ThriftHiveMetastore_get_partitions_by_filter_args { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -34433,6 +34838,11 @@ class ThriftHiveMetastore_get_partitions_by_filter_args { $xfer += $output->writeI16($this->max_parts); $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 5); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -34515,15 +34925,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1301 = 0; - $_etype1304 = 0; - $xfer += $input->readListBegin($_etype1304, $_size1301); - for ($_i1305 = 0; $_i1305 < $_size1301; ++$_i1305) + $_size1308 = 0; + $_etype1311 = 0; + $xfer += $input->readListBegin($_etype1311, $_size1308); + for ($_i1312 = 0; $_i1312 < $_size1308; ++$_i1312) { - $elem1306 = null; - $elem1306 = new \metastore\Partition(); - $xfer += $elem1306->read($input); - $this->success []= $elem1306; + $elem1313 = null; + $elem1313 = new \metastore\Partition(); + $xfer += $elem1313->read($input); + $this->success []= $elem1313; } $xfer += $input->readListEnd(); } else { @@ -34567,9 +34977,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1307) + foreach ($this->success as $iter1314) { - $xfer += $iter1307->write($output); + $xfer += $iter1314->write($output); } } $output->writeListEnd(); @@ -34612,6 +35022,10 @@ class ThriftHiveMetastore_get_part_specs_by_filter_args { * @var int */ public $max_parts = -1; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -34632,6 +35046,10 @@ class ThriftHiveMetastore_get_part_specs_by_filter_args { 'var' => 'max_parts', 'type' => TType::I32, ), + 5 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -34647,6 +35065,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_args { if (isset($vals['max_parts'])) { $this->max_parts = $vals['max_parts']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -34697,6 +35118,13 @@ class ThriftHiveMetastore_get_part_specs_by_filter_args { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -34730,6 +35158,11 @@ class ThriftHiveMetastore_get_part_specs_by_filter_args { $xfer += $output->writeI32($this->max_parts); $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 5); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -34812,15 +35245,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1308 = 0; - $_etype1311 = 0; - $xfer += $input->readListBegin($_etype1311, $_size1308); - for ($_i1312 = 0; $_i1312 < $_size1308; ++$_i1312) + $_size1315 = 0; + $_etype1318 = 0; + $xfer += $input->readListBegin($_etype1318, $_size1315); + for ($_i1319 = 0; $_i1319 < $_size1315; ++$_i1319) { - $elem1313 = null; - $elem1313 = new \metastore\PartitionSpec(); - $xfer += $elem1313->read($input); - $this->success []= $elem1313; + $elem1320 = null; + $elem1320 = new \metastore\PartitionSpec(); + $xfer += $elem1320->read($input); + $this->success []= $elem1320; } $xfer += $input->readListEnd(); } else { @@ -34864,9 +35297,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1314) + foreach ($this->success as $iter1321) { - $xfer += $iter1314->write($output); + $xfer += $iter1321->write($output); } } $output->writeListEnd(); @@ -35115,6 +35548,10 @@ class ThriftHiveMetastore_get_num_partitions_by_filter_args { * @var string */ public $filter = null; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -35131,6 +35568,10 @@ class ThriftHiveMetastore_get_num_partitions_by_filter_args { 'var' => 'filter', 'type' => TType::STRING, ), + 4 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -35143,6 +35584,9 @@ class ThriftHiveMetastore_get_num_partitions_by_filter_args { if (isset($vals['filter'])) { $this->filter = $vals['filter']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -35186,6 +35630,13 @@ class ThriftHiveMetastore_get_num_partitions_by_filter_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -35214,6 +35665,11 @@ class ThriftHiveMetastore_get_num_partitions_by_filter_args { $xfer += $output->writeString($this->filter); $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 4); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -35361,6 +35817,10 @@ class ThriftHiveMetastore_get_partitions_by_names_args { * @var string[] */ public $names = null; + /** + * @var string + */ + public $validTxnList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -35381,6 +35841,10 @@ class ThriftHiveMetastore_get_partitions_by_names_args { 'type' => TType::STRING, ), ), + 4 => array( + 'var' => 'validTxnList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -35393,6 +35857,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args { if (isset($vals['names'])) { $this->names = $vals['names']; } + if (isset($vals['validTxnList'])) { + $this->validTxnList = $vals['validTxnList']; + } } } @@ -35432,20 +35899,27 @@ class ThriftHiveMetastore_get_partitions_by_names_args { case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size1315 = 0; - $_etype1318 = 0; - $xfer += $input->readListBegin($_etype1318, $_size1315); - for ($_i1319 = 0; $_i1319 < $_size1315; ++$_i1319) + $_size1322 = 0; + $_etype1325 = 0; + $xfer += $input->readListBegin($_etype1325, $_size1322); + for ($_i1326 = 0; $_i1326 < $_size1322; ++$_i1326) { - $elem1320 = null; - $xfer += $input->readString($elem1320); - $this->names []= $elem1320; + $elem1327 = null; + $xfer += $input->readString($elem1327); + $this->names []= $elem1327; } $xfer += $input->readListEnd(); } else { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validTxnList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -35477,15 +35951,20 @@ class ThriftHiveMetastore_get_partitions_by_names_args { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter1321) + foreach ($this->names as $iter1328) { - $xfer += $output->writeString($iter1321); + $xfer += $output->writeString($iter1328); } } $output->writeListEnd(); } $xfer += $output->writeFieldEnd(); } + if ($this->validTxnList !== null) { + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 4); + $xfer += $output->writeString($this->validTxnList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -35568,15 +36047,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1322 = 0; - $_etype1325 = 0; - $xfer += $input->readListBegin($_etype1325, $_size1322); - for ($_i1326 = 0; $_i1326 < $_size1322; ++$_i1326) + $_size1329 = 0; + $_etype1332 = 0; + $xfer += $input->readListBegin($_etype1332, $_size1329); + for ($_i1333 = 0; $_i1333 < $_size1329; ++$_i1333) { - $elem1327 = null; - $elem1327 = new \metastore\Partition(); - $xfer += $elem1327->read($input); - $this->success []= $elem1327; + $elem1334 = null; + $elem1334 = new \metastore\Partition(); + $xfer += $elem1334->read($input); + $this->success []= $elem1334; } $xfer += $input->readListEnd(); } else { @@ -35620,9 +36099,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1328) + foreach ($this->success as $iter1335) { - $xfer += $iter1328->write($output); + $xfer += $iter1335->write($output); } } $output->writeListEnd(); @@ -36171,15 +36650,15 @@ class ThriftHiveMetastore_alter_partitions_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1329 = 0; - $_etype1332 = 0; - $xfer += $input->readListBegin($_etype1332, $_size1329); - for ($_i1333 = 0; $_i1333 < $_size1329; ++$_i1333) + $_size1336 = 0; + $_etype1339 = 0; + $xfer += $input->readListBegin($_etype1339, $_size1336); + for ($_i1340 = 0; $_i1340 < $_size1336; ++$_i1340) { - $elem1334 = null; - $elem1334 = new \metastore\Partition(); - $xfer += $elem1334->read($input); - $this->new_parts []= $elem1334; + $elem1341 = null; + $elem1341 = new \metastore\Partition(); + $xfer += $elem1341->read($input); + $this->new_parts []= $elem1341; } $xfer += $input->readListEnd(); } else { @@ -36217,9 +36696,9 @@ class ThriftHiveMetastore_alter_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1335) + foreach ($this->new_parts as $iter1342) { - $xfer += $iter1335->write($output); + $xfer += $iter1342->write($output); } } $output->writeListEnd(); @@ -36434,15 +36913,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1336 = 0; - $_etype1339 = 0; - $xfer += $input->readListBegin($_etype1339, $_size1336); - for ($_i1340 = 0; $_i1340 < $_size1336; ++$_i1340) + $_size1343 = 0; + $_etype1346 = 0; + $xfer += $input->readListBegin($_etype1346, $_size1343); + for ($_i1347 = 0; $_i1347 < $_size1343; ++$_i1347) { - $elem1341 = null; - $elem1341 = new \metastore\Partition(); - $xfer += $elem1341->read($input); - $this->new_parts []= $elem1341; + $elem1348 = null; + $elem1348 = new \metastore\Partition(); + $xfer += $elem1348->read($input); + $this->new_parts []= $elem1348; } $xfer += $input->readListEnd(); } else { @@ -36488,9 +36967,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1342) + foreach ($this->new_parts as $iter1349) { - $xfer += $iter1342->write($output); + $xfer += $iter1349->write($output); } } $output->writeListEnd(); @@ -37178,14 +37657,14 @@ class ThriftHiveMetastore_rename_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1343 = 0; - $_etype1346 = 0; - $xfer += $input->readListBegin($_etype1346, $_size1343); - for ($_i1347 = 0; $_i1347 < $_size1343; ++$_i1347) + $_size1350 = 0; + $_etype1353 = 0; + $xfer += $input->readListBegin($_etype1353, $_size1350); + for ($_i1354 = 0; $_i1354 < $_size1350; ++$_i1354) { - $elem1348 = null; - $xfer += $input->readString($elem1348); - $this->part_vals []= $elem1348; + $elem1355 = null; + $xfer += $input->readString($elem1355); + $this->part_vals []= $elem1355; } $xfer += $input->readListEnd(); } else { @@ -37231,9 +37710,9 @@ class ThriftHiveMetastore_rename_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1349) + foreach ($this->part_vals as $iter1356) { - $xfer += $output->writeString($iter1349); + $xfer += $output->writeString($iter1356); } } $output->writeListEnd(); @@ -37628,14 +38107,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1350 = 0; - $_etype1353 = 0; - $xfer += $input->readListBegin($_etype1353, $_size1350); - for ($_i1354 = 0; $_i1354 < $_size1350; ++$_i1354) + $_size1357 = 0; + $_etype1360 = 0; + $xfer += $input->readListBegin($_etype1360, $_size1357); + for ($_i1361 = 0; $_i1361 < $_size1357; ++$_i1361) { - $elem1355 = null; - $xfer += $input->readString($elem1355); - $this->part_vals []= $elem1355; + $elem1362 = null; + $xfer += $input->readString($elem1362); + $this->part_vals []= $elem1362; } $xfer += $input->readListEnd(); } else { @@ -37670,9 +38149,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1356) + foreach ($this->part_vals as $iter1363) { - $xfer += $output->writeString($iter1356); + $xfer += $output->writeString($iter1363); } } $output->writeListEnd(); @@ -38126,14 +38605,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1357 = 0; - $_etype1360 = 0; - $xfer += $input->readListBegin($_etype1360, $_size1357); - for ($_i1361 = 0; $_i1361 < $_size1357; ++$_i1361) + $_size1364 = 0; + $_etype1367 = 0; + $xfer += $input->readListBegin($_etype1367, $_size1364); + for ($_i1368 = 0; $_i1368 < $_size1364; ++$_i1368) { - $elem1362 = null; - $xfer += $input->readString($elem1362); - $this->success []= $elem1362; + $elem1369 = null; + $xfer += $input->readString($elem1369); + $this->success []= $elem1369; } $xfer += $input->readListEnd(); } else { @@ -38169,9 +38648,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1363) + foreach ($this->success as $iter1370) { - $xfer += $output->writeString($iter1363); + $xfer += $output->writeString($iter1370); } } $output->writeListEnd(); @@ -38331,17 +38810,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1364 = 0; - $_ktype1365 = 0; - $_vtype1366 = 0; - $xfer += $input->readMapBegin($_ktype1365, $_vtype1366, $_size1364); - for ($_i1368 = 0; $_i1368 < $_size1364; ++$_i1368) + $_size1371 = 0; + $_ktype1372 = 0; + $_vtype1373 = 0; + $xfer += $input->readMapBegin($_ktype1372, $_vtype1373, $_size1371); + for ($_i1375 = 0; $_i1375 < $_size1371; ++$_i1375) { - $key1369 = ''; - $val1370 = ''; - $xfer += $input->readString($key1369); - $xfer += $input->readString($val1370); - $this->success[$key1369] = $val1370; + $key1376 = ''; + $val1377 = ''; + $xfer += $input->readString($key1376); + $xfer += $input->readString($val1377); + $this->success[$key1376] = $val1377; } $xfer += $input->readMapEnd(); } else { @@ -38377,10 +38856,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter1371 => $viter1372) + foreach ($this->success as $kiter1378 => $viter1379) { - $xfer += $output->writeString($kiter1371); - $xfer += $output->writeString($viter1372); + $xfer += $output->writeString($kiter1378); + $xfer += $output->writeString($viter1379); } } $output->writeMapEnd(); @@ -38500,17 +38979,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1373 = 0; - $_ktype1374 = 0; - $_vtype1375 = 0; - $xfer += $input->readMapBegin($_ktype1374, $_vtype1375, $_size1373); - for ($_i1377 = 0; $_i1377 < $_size1373; ++$_i1377) + $_size1380 = 0; + $_ktype1381 = 0; + $_vtype1382 = 0; + $xfer += $input->readMapBegin($_ktype1381, $_vtype1382, $_size1380); + for ($_i1384 = 0; $_i1384 < $_size1380; ++$_i1384) { - $key1378 = ''; - $val1379 = ''; - $xfer += $input->readString($key1378); - $xfer += $input->readString($val1379); - $this->part_vals[$key1378] = $val1379; + $key1385 = ''; + $val1386 = ''; + $xfer += $input->readString($key1385); + $xfer += $input->readString($val1386); + $this->part_vals[$key1385] = $val1386; } $xfer += $input->readMapEnd(); } else { @@ -38555,10 +39034,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1380 => $viter1381) + foreach ($this->part_vals as $kiter1387 => $viter1388) { - $xfer += $output->writeString($kiter1380); - $xfer += $output->writeString($viter1381); + $xfer += $output->writeString($kiter1387); + $xfer += $output->writeString($viter1388); } } $output->writeMapEnd(); @@ -38880,17 +39359,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1382 = 0; - $_ktype1383 = 0; - $_vtype1384 = 0; - $xfer += $input->readMapBegin($_ktype1383, $_vtype1384, $_size1382); - for ($_i1386 = 0; $_i1386 < $_size1382; ++$_i1386) + $_size1389 = 0; + $_ktype1390 = 0; + $_vtype1391 = 0; + $xfer += $input->readMapBegin($_ktype1390, $_vtype1391, $_size1389); + for ($_i1393 = 0; $_i1393 < $_size1389; ++$_i1393) { - $key1387 = ''; - $val1388 = ''; - $xfer += $input->readString($key1387); - $xfer += $input->readString($val1388); - $this->part_vals[$key1387] = $val1388; + $key1394 = ''; + $val1395 = ''; + $xfer += $input->readString($key1394); + $xfer += $input->readString($val1395); + $this->part_vals[$key1394] = $val1395; } $xfer += $input->readMapEnd(); } else { @@ -38935,10 +39414,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1389 => $viter1390) + foreach ($this->part_vals as $kiter1396 => $viter1397) { - $xfer += $output->writeString($kiter1389); - $xfer += $output->writeString($viter1390); + $xfer += $output->writeString($kiter1396); + $xfer += $output->writeString($viter1397); } } $output->writeMapEnd(); @@ -41487,6 +41966,10 @@ class ThriftHiveMetastore_get_table_column_statistics_args { * @var string */ public $col_name = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -41503,6 +41986,10 @@ class ThriftHiveMetastore_get_table_column_statistics_args { 'var' => 'col_name', 'type' => TType::STRING, ), + 4 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -41515,6 +42002,9 @@ class ThriftHiveMetastore_get_table_column_statistics_args { if (isset($vals['col_name'])) { $this->col_name = $vals['col_name']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -41558,6 +42048,13 @@ class ThriftHiveMetastore_get_table_column_statistics_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -41586,6 +42083,11 @@ class ThriftHiveMetastore_get_table_column_statistics_args { $xfer += $output->writeString($this->col_name); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -41792,6 +42294,10 @@ class ThriftHiveMetastore_get_partition_column_statistics_args { * @var string */ public $col_name = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -41812,6 +42318,10 @@ class ThriftHiveMetastore_get_partition_column_statistics_args { 'var' => 'col_name', 'type' => TType::STRING, ), + 5 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -41827,6 +42337,9 @@ class ThriftHiveMetastore_get_partition_column_statistics_args { if (isset($vals['col_name'])) { $this->col_name = $vals['col_name']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -41877,6 +42390,13 @@ class ThriftHiveMetastore_get_partition_column_statistics_args { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -41910,6 +42430,11 @@ class ThriftHiveMetastore_get_partition_column_statistics_args { $xfer += $output->writeString($this->col_name); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 5); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -44417,14 +44942,14 @@ class ThriftHiveMetastore_get_functions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1391 = 0; - $_etype1394 = 0; - $xfer += $input->readListBegin($_etype1394, $_size1391); - for ($_i1395 = 0; $_i1395 < $_size1391; ++$_i1395) + $_size1398 = 0; + $_etype1401 = 0; + $xfer += $input->readListBegin($_etype1401, $_size1398); + for ($_i1402 = 0; $_i1402 < $_size1398; ++$_i1402) { - $elem1396 = null; - $xfer += $input->readString($elem1396); - $this->success []= $elem1396; + $elem1403 = null; + $xfer += $input->readString($elem1403); + $this->success []= $elem1403; } $xfer += $input->readListEnd(); } else { @@ -44460,9 +44985,9 @@ class ThriftHiveMetastore_get_functions_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1397) + foreach ($this->success as $iter1404) { - $xfer += $output->writeString($iter1397); + $xfer += $output->writeString($iter1404); } } $output->writeListEnd(); @@ -45331,14 +45856,14 @@ class ThriftHiveMetastore_get_role_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1398 = 0; - $_etype1401 = 0; - $xfer += $input->readListBegin($_etype1401, $_size1398); - for ($_i1402 = 0; $_i1402 < $_size1398; ++$_i1402) + $_size1405 = 0; + $_etype1408 = 0; + $xfer += $input->readListBegin($_etype1408, $_size1405); + for ($_i1409 = 0; $_i1409 < $_size1405; ++$_i1409) { - $elem1403 = null; - $xfer += $input->readString($elem1403); - $this->success []= $elem1403; + $elem1410 = null; + $xfer += $input->readString($elem1410); + $this->success []= $elem1410; } $xfer += $input->readListEnd(); } else { @@ -45374,9 +45899,9 @@ class ThriftHiveMetastore_get_role_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1404) + foreach ($this->success as $iter1411) { - $xfer += $output->writeString($iter1404); + $xfer += $output->writeString($iter1411); } } $output->writeListEnd(); @@ -46067,15 +46592,15 @@ class ThriftHiveMetastore_list_roles_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1405 = 0; - $_etype1408 = 0; - $xfer += $input->readListBegin($_etype1408, $_size1405); - for ($_i1409 = 0; $_i1409 < $_size1405; ++$_i1409) + $_size1412 = 0; + $_etype1415 = 0; + $xfer += $input->readListBegin($_etype1415, $_size1412); + for ($_i1416 = 0; $_i1416 < $_size1412; ++$_i1416) { - $elem1410 = null; - $elem1410 = new \metastore\Role(); - $xfer += $elem1410->read($input); - $this->success []= $elem1410; + $elem1417 = null; + $elem1417 = new \metastore\Role(); + $xfer += $elem1417->read($input); + $this->success []= $elem1417; } $xfer += $input->readListEnd(); } else { @@ -46111,9 +46636,9 @@ class ThriftHiveMetastore_list_roles_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1411) + foreach ($this->success as $iter1418) { - $xfer += $iter1411->write($output); + $xfer += $iter1418->write($output); } } $output->writeListEnd(); @@ -46775,14 +47300,14 @@ class ThriftHiveMetastore_get_privilege_set_args { case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1412 = 0; - $_etype1415 = 0; - $xfer += $input->readListBegin($_etype1415, $_size1412); - for ($_i1416 = 0; $_i1416 < $_size1412; ++$_i1416) + $_size1419 = 0; + $_etype1422 = 0; + $xfer += $input->readListBegin($_etype1422, $_size1419); + for ($_i1423 = 0; $_i1423 < $_size1419; ++$_i1423) { - $elem1417 = null; - $xfer += $input->readString($elem1417); - $this->group_names []= $elem1417; + $elem1424 = null; + $xfer += $input->readString($elem1424); + $this->group_names []= $elem1424; } $xfer += $input->readListEnd(); } else { @@ -46823,9 +47348,9 @@ class ThriftHiveMetastore_get_privilege_set_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1418) + foreach ($this->group_names as $iter1425) { - $xfer += $output->writeString($iter1418); + $xfer += $output->writeString($iter1425); } } $output->writeListEnd(); @@ -47133,15 +47658,15 @@ class ThriftHiveMetastore_list_privileges_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1419 = 0; - $_etype1422 = 0; - $xfer += $input->readListBegin($_etype1422, $_size1419); - for ($_i1423 = 0; $_i1423 < $_size1419; ++$_i1423) + $_size1426 = 0; + $_etype1429 = 0; + $xfer += $input->readListBegin($_etype1429, $_size1426); + for ($_i1430 = 0; $_i1430 < $_size1426; ++$_i1430) { - $elem1424 = null; - $elem1424 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem1424->read($input); - $this->success []= $elem1424; + $elem1431 = null; + $elem1431 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem1431->read($input); + $this->success []= $elem1431; } $xfer += $input->readListEnd(); } else { @@ -47177,9 +47702,9 @@ class ThriftHiveMetastore_list_privileges_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1425) + foreach ($this->success as $iter1432) { - $xfer += $iter1425->write($output); + $xfer += $iter1432->write($output); } } $output->writeListEnd(); @@ -48047,14 +48572,14 @@ class ThriftHiveMetastore_set_ugi_args { case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1426 = 0; - $_etype1429 = 0; - $xfer += $input->readListBegin($_etype1429, $_size1426); - for ($_i1430 = 0; $_i1430 < $_size1426; ++$_i1430) + $_size1433 = 0; + $_etype1436 = 0; + $xfer += $input->readListBegin($_etype1436, $_size1433); + for ($_i1437 = 0; $_i1437 < $_size1433; ++$_i1437) { - $elem1431 = null; - $xfer += $input->readString($elem1431); - $this->group_names []= $elem1431; + $elem1438 = null; + $xfer += $input->readString($elem1438); + $this->group_names []= $elem1438; } $xfer += $input->readListEnd(); } else { @@ -48087,9 +48612,9 @@ class ThriftHiveMetastore_set_ugi_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1432) + foreach ($this->group_names as $iter1439) { - $xfer += $output->writeString($iter1432); + $xfer += $output->writeString($iter1439); } } $output->writeListEnd(); @@ -48165,14 +48690,14 @@ class ThriftHiveMetastore_set_ugi_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1433 = 0; - $_etype1436 = 0; - $xfer += $input->readListBegin($_etype1436, $_size1433); - for ($_i1437 = 0; $_i1437 < $_size1433; ++$_i1437) + $_size1440 = 0; + $_etype1443 = 0; + $xfer += $input->readListBegin($_etype1443, $_size1440); + for ($_i1444 = 0; $_i1444 < $_size1440; ++$_i1444) { - $elem1438 = null; - $xfer += $input->readString($elem1438); - $this->success []= $elem1438; + $elem1445 = null; + $xfer += $input->readString($elem1445); + $this->success []= $elem1445; } $xfer += $input->readListEnd(); } else { @@ -48208,9 +48733,9 @@ class ThriftHiveMetastore_set_ugi_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1439) + foreach ($this->success as $iter1446) { - $xfer += $output->writeString($iter1439); + $xfer += $output->writeString($iter1446); } } $output->writeListEnd(); @@ -49327,14 +49852,14 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1440 = 0; - $_etype1443 = 0; - $xfer += $input->readListBegin($_etype1443, $_size1440); - for ($_i1444 = 0; $_i1444 < $_size1440; ++$_i1444) + $_size1447 = 0; + $_etype1450 = 0; + $xfer += $input->readListBegin($_etype1450, $_size1447); + for ($_i1451 = 0; $_i1451 < $_size1447; ++$_i1451) { - $elem1445 = null; - $xfer += $input->readString($elem1445); - $this->success []= $elem1445; + $elem1452 = null; + $xfer += $input->readString($elem1452); + $this->success []= $elem1452; } $xfer += $input->readListEnd(); } else { @@ -49362,9 +49887,9 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1446) + foreach ($this->success as $iter1453) { - $xfer += $output->writeString($iter1446); + $xfer += $output->writeString($iter1453); } } $output->writeListEnd(); @@ -50003,14 +50528,14 @@ class ThriftHiveMetastore_get_master_keys_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1447 = 0; - $_etype1450 = 0; - $xfer += $input->readListBegin($_etype1450, $_size1447); - for ($_i1451 = 0; $_i1451 < $_size1447; ++$_i1451) + $_size1454 = 0; + $_etype1457 = 0; + $xfer += $input->readListBegin($_etype1457, $_size1454); + for ($_i1458 = 0; $_i1458 < $_size1454; ++$_i1458) { - $elem1452 = null; - $xfer += $input->readString($elem1452); - $this->success []= $elem1452; + $elem1459 = null; + $xfer += $input->readString($elem1459); + $this->success []= $elem1459; } $xfer += $input->readListEnd(); } else { @@ -50038,9 +50563,9 @@ class ThriftHiveMetastore_get_master_keys_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1453) + foreach ($this->success as $iter1460) { - $xfer += $output->writeString($iter1453); + $xfer += $output->writeString($iter1460); } } $output->writeListEnd(); @@ -53794,14 +54319,14 @@ class ThriftHiveMetastore_find_columns_with_stats_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1454 = 0; - $_etype1457 = 0; - $xfer += $input->readListBegin($_etype1457, $_size1454); - for ($_i1458 = 0; $_i1458 < $_size1454; ++$_i1458) + $_size1461 = 0; + $_etype1464 = 0; + $xfer += $input->readListBegin($_etype1464, $_size1461); + for ($_i1465 = 0; $_i1465 < $_size1461; ++$_i1465) { - $elem1459 = null; - $xfer += $input->readString($elem1459); - $this->success []= $elem1459; + $elem1466 = null; + $xfer += $input->readString($elem1466); + $this->success []= $elem1466; } $xfer += $input->readListEnd(); } else { @@ -53829,9 +54354,9 @@ class ThriftHiveMetastore_find_columns_with_stats_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1460) + foreach ($this->success as $iter1467) { - $xfer += $output->writeString($iter1460); + $xfer += $output->writeString($iter1467); } } $output->writeListEnd(); @@ -62002,15 +62527,15 @@ class ThriftHiveMetastore_get_schema_all_versions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1461 = 0; - $_etype1464 = 0; - $xfer += $input->readListBegin($_etype1464, $_size1461); - for ($_i1465 = 0; $_i1465 < $_size1461; ++$_i1465) + $_size1468 = 0; + $_etype1471 = 0; + $xfer += $input->readListBegin($_etype1471, $_size1468); + for ($_i1472 = 0; $_i1472 < $_size1468; ++$_i1472) { - $elem1466 = null; - $elem1466 = new \metastore\SchemaVersion(); - $xfer += $elem1466->read($input); - $this->success []= $elem1466; + $elem1473 = null; + $elem1473 = new \metastore\SchemaVersion(); + $xfer += $elem1473->read($input); + $this->success []= $elem1473; } $xfer += $input->readListEnd(); } else { @@ -62054,9 +62579,9 @@ class ThriftHiveMetastore_get_schema_all_versions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1467) + foreach ($this->success as $iter1474) { - $xfer += $iter1467->write($output); + $xfer += $iter1474->write($output); } } $output->writeListEnd(); @@ -63925,15 +64450,15 @@ class ThriftHiveMetastore_get_runtime_stats_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1468 = 0; - $_etype1471 = 0; - $xfer += $input->readListBegin($_etype1471, $_size1468); - for ($_i1472 = 0; $_i1472 < $_size1468; ++$_i1472) + $_size1475 = 0; + $_etype1478 = 0; + $xfer += $input->readListBegin($_etype1478, $_size1475); + for ($_i1479 = 0; $_i1479 < $_size1475; ++$_i1479) { - $elem1473 = null; - $elem1473 = new \metastore\RuntimeStat(); - $xfer += $elem1473->read($input); - $this->success []= $elem1473; + $elem1480 = null; + $elem1480 = new \metastore\RuntimeStat(); + $xfer += $elem1480->read($input); + $this->success []= $elem1480; } $xfer += $input->readListEnd(); } else { @@ -63969,9 +64494,9 @@ class ThriftHiveMetastore_get_runtime_stats_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1474) + foreach ($this->success as $iter1481) { - $xfer += $iter1474->write($output); + $xfer += $iter1481->write($output); } } $output->writeListEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php index a09c1d540c..20ba931084 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php @@ -13795,6 +13795,10 @@ class PartitionsByExprRequest { * @var string */ public $catName = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13823,6 +13827,10 @@ class PartitionsByExprRequest { 'var' => 'catName', 'type' => TType::STRING, ), + 7 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -13844,6 +13852,9 @@ class PartitionsByExprRequest { if (isset($vals['catName'])) { $this->catName = $vals['catName']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -13908,6 +13919,13 @@ class PartitionsByExprRequest { $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -13951,6 +13969,11 @@ class PartitionsByExprRequest { $xfer += $output->writeString($this->catName); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 7); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -15704,6 +15727,10 @@ class PartitionValuesRequest { * @var string */ public $catName = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -15754,6 +15781,10 @@ class PartitionValuesRequest { 'var' => 'catName', 'type' => TType::STRING, ), + 10 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -15784,6 +15815,9 @@ class PartitionValuesRequest { if (isset($vals['catName'])) { $this->catName = $vals['catName']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -15891,6 +15925,13 @@ class PartitionValuesRequest { $xfer += $input->skip($ftype); } break; + case 10: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -15973,6 +16014,11 @@ class PartitionValuesRequest { $xfer += $output->writeString($this->catName); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 10); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -16211,6 +16257,10 @@ class GetPartitionsByNamesRequest { * @var string */ public $processorIdentifier = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -16247,6 +16297,10 @@ class GetPartitionsByNamesRequest { 'var' => 'processorIdentifier', 'type' => TType::STRING, ), + 7 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -16268,6 +16322,9 @@ class GetPartitionsByNamesRequest { if (isset($vals['processorIdentifier'])) { $this->processorIdentifier = $vals['processorIdentifier']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -16352,6 +16409,13 @@ class GetPartitionsByNamesRequest { $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -16419,6 +16483,11 @@ class GetPartitionsByNamesRequest { $xfer += $output->writeString($this->processorIdentifier); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 7); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -19296,6 +19365,207 @@ class TableValidWriteIds { } +class TableWriteId { + static $_TSPEC; + + /** + * @var string + */ + public $fullTableName = null; + /** + * @var int + */ + public $writeId = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'fullTableName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['fullTableName'])) { + $this->fullTableName = $vals['fullTableName']; + } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } + } + } + + public function getName() { + return 'TableWriteId'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->fullTableName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('TableWriteId'); + if ($this->fullTableName !== null) { + $xfer += $output->writeFieldBegin('fullTableName', TType::STRING, 1); + $xfer += $output->writeString($this->fullTableName); + $xfer += $output->writeFieldEnd(); + } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 2); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetTxnTableWriteIdsResponse { + static $_TSPEC; + + /** + * @var \metastore\TableWriteId[] + */ + public $tableWriteIds = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'tableWriteIds', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\TableWriteId', + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['tableWriteIds'])) { + $this->tableWriteIds = $vals['tableWriteIds']; + } + } + } + + public function getName() { + return 'GetTxnTableWriteIdsResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->tableWriteIds = array(); + $_size594 = 0; + $_etype597 = 0; + $xfer += $input->readListBegin($_etype597, $_size594); + for ($_i598 = 0; $_i598 < $_size594; ++$_i598) + { + $elem599 = null; + $elem599 = new \metastore\TableWriteId(); + $xfer += $elem599->read($input); + $this->tableWriteIds []= $elem599; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetTxnTableWriteIdsResponse'); + if ($this->tableWriteIds !== null) { + if (!is_array($this->tableWriteIds)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('tableWriteIds', TType::LST, 1); + { + $output->writeListBegin(TType::STRUCT, count($this->tableWriteIds)); + { + foreach ($this->tableWriteIds as $iter600) + { + $xfer += $iter600->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class GetValidWriteIdsResponse { static $_TSPEC; @@ -19347,15 +19617,15 @@ class GetValidWriteIdsResponse { case 1: if ($ftype == TType::LST) { $this->tblValidWriteIds = array(); - $_size594 = 0; - $_etype597 = 0; - $xfer += $input->readListBegin($_etype597, $_size594); - for ($_i598 = 0; $_i598 < $_size594; ++$_i598) + $_size601 = 0; + $_etype604 = 0; + $xfer += $input->readListBegin($_etype604, $_size601); + for ($_i605 = 0; $_i605 < $_size601; ++$_i605) { - $elem599 = null; - $elem599 = new \metastore\TableValidWriteIds(); - $xfer += $elem599->read($input); - $this->tblValidWriteIds []= $elem599; + $elem606 = null; + $elem606 = new \metastore\TableValidWriteIds(); + $xfer += $elem606->read($input); + $this->tblValidWriteIds []= $elem606; } $xfer += $input->readListEnd(); } else { @@ -19383,9 +19653,9 @@ class GetValidWriteIdsResponse { { $output->writeListBegin(TType::STRUCT, count($this->tblValidWriteIds)); { - foreach ($this->tblValidWriteIds as $iter600) + foreach ($this->tblValidWriteIds as $iter607) { - $xfer += $iter600->write($output); + $xfer += $iter607->write($output); } } $output->writeListEnd(); @@ -19610,14 +19880,14 @@ class AllocateTableWriteIdsRequest { case 3: if ($ftype == TType::LST) { $this->txnIds = array(); - $_size601 = 0; - $_etype604 = 0; - $xfer += $input->readListBegin($_etype604, $_size601); - for ($_i605 = 0; $_i605 < $_size601; ++$_i605) + $_size608 = 0; + $_etype611 = 0; + $xfer += $input->readListBegin($_etype611, $_size608); + for ($_i612 = 0; $_i612 < $_size608; ++$_i612) { - $elem606 = null; - $xfer += $input->readI64($elem606); - $this->txnIds []= $elem606; + $elem613 = null; + $xfer += $input->readI64($elem613); + $this->txnIds []= $elem613; } $xfer += $input->readListEnd(); } else { @@ -19634,15 +19904,15 @@ class AllocateTableWriteIdsRequest { case 5: if ($ftype == TType::LST) { $this->srcTxnToWriteIdList = array(); - $_size607 = 0; - $_etype610 = 0; - $xfer += $input->readListBegin($_etype610, $_size607); - for ($_i611 = 0; $_i611 < $_size607; ++$_i611) + $_size614 = 0; + $_etype617 = 0; + $xfer += $input->readListBegin($_etype617, $_size614); + for ($_i618 = 0; $_i618 < $_size614; ++$_i618) { - $elem612 = null; - $elem612 = new \metastore\TxnToWriteId(); - $xfer += $elem612->read($input); - $this->srcTxnToWriteIdList []= $elem612; + $elem619 = null; + $elem619 = new \metastore\TxnToWriteId(); + $xfer += $elem619->read($input); + $this->srcTxnToWriteIdList []= $elem619; } $xfer += $input->readListEnd(); } else { @@ -19680,9 +19950,9 @@ class AllocateTableWriteIdsRequest { { $output->writeListBegin(TType::I64, count($this->txnIds)); { - foreach ($this->txnIds as $iter613) + foreach ($this->txnIds as $iter620) { - $xfer += $output->writeI64($iter613); + $xfer += $output->writeI64($iter620); } } $output->writeListEnd(); @@ -19702,9 +19972,9 @@ class AllocateTableWriteIdsRequest { { $output->writeListBegin(TType::STRUCT, count($this->srcTxnToWriteIdList)); { - foreach ($this->srcTxnToWriteIdList as $iter614) + foreach ($this->srcTxnToWriteIdList as $iter621) { - $xfer += $iter614->write($output); + $xfer += $iter621->write($output); } } $output->writeListEnd(); @@ -19769,15 +20039,15 @@ class AllocateTableWriteIdsResponse { case 1: if ($ftype == TType::LST) { $this->txnToWriteIds = array(); - $_size615 = 0; - $_etype618 = 0; - $xfer += $input->readListBegin($_etype618, $_size615); - for ($_i619 = 0; $_i619 < $_size615; ++$_i619) + $_size622 = 0; + $_etype625 = 0; + $xfer += $input->readListBegin($_etype625, $_size622); + for ($_i626 = 0; $_i626 < $_size622; ++$_i626) { - $elem620 = null; - $elem620 = new \metastore\TxnToWriteId(); - $xfer += $elem620->read($input); - $this->txnToWriteIds []= $elem620; + $elem627 = null; + $elem627 = new \metastore\TxnToWriteId(); + $xfer += $elem627->read($input); + $this->txnToWriteIds []= $elem627; } $xfer += $input->readListEnd(); } else { @@ -19805,9 +20075,9 @@ class AllocateTableWriteIdsResponse { { $output->writeListBegin(TType::STRUCT, count($this->txnToWriteIds)); { - foreach ($this->txnToWriteIds as $iter621) + foreach ($this->txnToWriteIds as $iter628) { - $xfer += $iter621->write($output); + $xfer += $iter628->write($output); } } $output->writeListEnd(); @@ -20152,15 +20422,15 @@ class LockRequest { case 1: if ($ftype == TType::LST) { $this->component = array(); - $_size622 = 0; - $_etype625 = 0; - $xfer += $input->readListBegin($_etype625, $_size622); - for ($_i626 = 0; $_i626 < $_size622; ++$_i626) + $_size629 = 0; + $_etype632 = 0; + $xfer += $input->readListBegin($_etype632, $_size629); + for ($_i633 = 0; $_i633 < $_size629; ++$_i633) { - $elem627 = null; - $elem627 = new \metastore\LockComponent(); - $xfer += $elem627->read($input); - $this->component []= $elem627; + $elem634 = null; + $elem634 = new \metastore\LockComponent(); + $xfer += $elem634->read($input); + $this->component []= $elem634; } $xfer += $input->readListEnd(); } else { @@ -20216,9 +20486,9 @@ class LockRequest { { $output->writeListBegin(TType::STRUCT, count($this->component)); { - foreach ($this->component as $iter628) + foreach ($this->component as $iter635) { - $xfer += $iter628->write($output); + $xfer += $iter635->write($output); } } $output->writeListEnd(); @@ -21161,15 +21431,15 @@ class ShowLocksResponse { case 1: if ($ftype == TType::LST) { $this->locks = array(); - $_size629 = 0; - $_etype632 = 0; - $xfer += $input->readListBegin($_etype632, $_size629); - for ($_i633 = 0; $_i633 < $_size629; ++$_i633) + $_size636 = 0; + $_etype639 = 0; + $xfer += $input->readListBegin($_etype639, $_size636); + for ($_i640 = 0; $_i640 < $_size636; ++$_i640) { - $elem634 = null; - $elem634 = new \metastore\ShowLocksResponseElement(); - $xfer += $elem634->read($input); - $this->locks []= $elem634; + $elem641 = null; + $elem641 = new \metastore\ShowLocksResponseElement(); + $xfer += $elem641->read($input); + $this->locks []= $elem641; } $xfer += $input->readListEnd(); } else { @@ -21197,9 +21467,9 @@ class ShowLocksResponse { { $output->writeListBegin(TType::STRUCT, count($this->locks)); { - foreach ($this->locks as $iter635) + foreach ($this->locks as $iter642) { - $xfer += $iter635->write($output); + $xfer += $iter642->write($output); } } $output->writeListEnd(); @@ -21474,17 +21744,17 @@ class HeartbeatTxnRangeResponse { case 1: if ($ftype == TType::SET) { $this->aborted = array(); - $_size636 = 0; - $_etype639 = 0; - $xfer += $input->readSetBegin($_etype639, $_size636); - for ($_i640 = 0; $_i640 < $_size636; ++$_i640) + $_size643 = 0; + $_etype646 = 0; + $xfer += $input->readSetBegin($_etype646, $_size643); + for ($_i647 = 0; $_i647 < $_size643; ++$_i647) { - $elem641 = null; - $xfer += $input->readI64($elem641); - if (is_scalar($elem641)) { - $this->aborted[$elem641] = true; + $elem648 = null; + $xfer += $input->readI64($elem648); + if (is_scalar($elem648)) { + $this->aborted[$elem648] = true; } else { - $this->aborted []= $elem641; + $this->aborted []= $elem648; } } $xfer += $input->readSetEnd(); @@ -21495,17 +21765,17 @@ class HeartbeatTxnRangeResponse { case 2: if ($ftype == TType::SET) { $this->nosuch = array(); - $_size642 = 0; - $_etype645 = 0; - $xfer += $input->readSetBegin($_etype645, $_size642); - for ($_i646 = 0; $_i646 < $_size642; ++$_i646) + $_size649 = 0; + $_etype652 = 0; + $xfer += $input->readSetBegin($_etype652, $_size649); + for ($_i653 = 0; $_i653 < $_size649; ++$_i653) { - $elem647 = null; - $xfer += $input->readI64($elem647); - if (is_scalar($elem647)) { - $this->nosuch[$elem647] = true; + $elem654 = null; + $xfer += $input->readI64($elem654); + if (is_scalar($elem654)) { + $this->nosuch[$elem654] = true; } else { - $this->nosuch []= $elem647; + $this->nosuch []= $elem654; } } $xfer += $input->readSetEnd(); @@ -21534,12 +21804,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->aborted)); { - foreach ($this->aborted as $iter648 => $iter649) + foreach ($this->aborted as $iter655 => $iter656) { - if (is_scalar($iter649)) { - $xfer += $output->writeI64($iter648); + if (is_scalar($iter656)) { + $xfer += $output->writeI64($iter655); } else { - $xfer += $output->writeI64($iter649); + $xfer += $output->writeI64($iter656); } } } @@ -21555,12 +21825,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->nosuch)); { - foreach ($this->nosuch as $iter650 => $iter651) + foreach ($this->nosuch as $iter657 => $iter658) { - if (is_scalar($iter651)) { - $xfer += $output->writeI64($iter650); + if (is_scalar($iter658)) { + $xfer += $output->writeI64($iter657); } else { - $xfer += $output->writeI64($iter651); + $xfer += $output->writeI64($iter658); } } } @@ -21719,17 +21989,17 @@ class CompactionRequest { case 6: if ($ftype == TType::MAP) { $this->properties = array(); - $_size652 = 0; - $_ktype653 = 0; - $_vtype654 = 0; - $xfer += $input->readMapBegin($_ktype653, $_vtype654, $_size652); - for ($_i656 = 0; $_i656 < $_size652; ++$_i656) + $_size659 = 0; + $_ktype660 = 0; + $_vtype661 = 0; + $xfer += $input->readMapBegin($_ktype660, $_vtype661, $_size659); + for ($_i663 = 0; $_i663 < $_size659; ++$_i663) { - $key657 = ''; - $val658 = ''; - $xfer += $input->readString($key657); - $xfer += $input->readString($val658); - $this->properties[$key657] = $val658; + $key664 = ''; + $val665 = ''; + $xfer += $input->readString($key664); + $xfer += $input->readString($val665); + $this->properties[$key664] = $val665; } $xfer += $input->readMapEnd(); } else { @@ -21782,10 +22052,10 @@ class CompactionRequest { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter659 => $viter660) + foreach ($this->properties as $kiter666 => $viter667) { - $xfer += $output->writeString($kiter659); - $xfer += $output->writeString($viter660); + $xfer += $output->writeString($kiter666); + $xfer += $output->writeString($viter667); } } $output->writeMapEnd(); @@ -22780,15 +23050,15 @@ class ShowCompactResponse { case 1: if ($ftype == TType::LST) { $this->compacts = array(); - $_size661 = 0; - $_etype664 = 0; - $xfer += $input->readListBegin($_etype664, $_size661); - for ($_i665 = 0; $_i665 < $_size661; ++$_i665) + $_size668 = 0; + $_etype671 = 0; + $xfer += $input->readListBegin($_etype671, $_size668); + for ($_i672 = 0; $_i672 < $_size668; ++$_i672) { - $elem666 = null; - $elem666 = new \metastore\ShowCompactResponseElement(); - $xfer += $elem666->read($input); - $this->compacts []= $elem666; + $elem673 = null; + $elem673 = new \metastore\ShowCompactResponseElement(); + $xfer += $elem673->read($input); + $this->compacts []= $elem673; } $xfer += $input->readListEnd(); } else { @@ -22816,9 +23086,9 @@ class ShowCompactResponse { { $output->writeListBegin(TType::STRUCT, count($this->compacts)); { - foreach ($this->compacts as $iter667) + foreach ($this->compacts as $iter674) { - $xfer += $iter667->write($output); + $xfer += $iter674->write($output); } } $output->writeListEnd(); @@ -22965,14 +23235,14 @@ class AddDynamicPartitions { case 5: if ($ftype == TType::LST) { $this->partitionnames = array(); - $_size668 = 0; - $_etype671 = 0; - $xfer += $input->readListBegin($_etype671, $_size668); - for ($_i672 = 0; $_i672 < $_size668; ++$_i672) + $_size675 = 0; + $_etype678 = 0; + $xfer += $input->readListBegin($_etype678, $_size675); + for ($_i679 = 0; $_i679 < $_size675; ++$_i679) { - $elem673 = null; - $xfer += $input->readString($elem673); - $this->partitionnames []= $elem673; + $elem680 = null; + $xfer += $input->readString($elem680); + $this->partitionnames []= $elem680; } $xfer += $input->readListEnd(); } else { @@ -23027,9 +23297,9 @@ class AddDynamicPartitions { { $output->writeListBegin(TType::STRING, count($this->partitionnames)); { - foreach ($this->partitionnames as $iter674) + foreach ($this->partitionnames as $iter681) { - $xfer += $output->writeString($iter674); + $xfer += $output->writeString($iter681); } } $output->writeListEnd(); @@ -23324,14 +23594,14 @@ class NotificationEventRequest { case 3: if ($ftype == TType::LST) { $this->eventTypeSkipList = array(); - $_size675 = 0; - $_etype678 = 0; - $xfer += $input->readListBegin($_etype678, $_size675); - for ($_i679 = 0; $_i679 < $_size675; ++$_i679) + $_size682 = 0; + $_etype685 = 0; + $xfer += $input->readListBegin($_etype685, $_size682); + for ($_i686 = 0; $_i686 < $_size682; ++$_i686) { - $elem680 = null; - $xfer += $input->readString($elem680); - $this->eventTypeSkipList []= $elem680; + $elem687 = null; + $xfer += $input->readString($elem687); + $this->eventTypeSkipList []= $elem687; } $xfer += $input->readListEnd(); } else { @@ -23369,9 +23639,9 @@ class NotificationEventRequest { { $output->writeListBegin(TType::STRING, count($this->eventTypeSkipList)); { - foreach ($this->eventTypeSkipList as $iter681) + foreach ($this->eventTypeSkipList as $iter688) { - $xfer += $output->writeString($iter681); + $xfer += $output->writeString($iter688); } } $output->writeListEnd(); @@ -23672,15 +23942,15 @@ class NotificationEventResponse { case 1: if ($ftype == TType::LST) { $this->events = array(); - $_size682 = 0; - $_etype685 = 0; - $xfer += $input->readListBegin($_etype685, $_size682); - for ($_i686 = 0; $_i686 < $_size682; ++$_i686) + $_size689 = 0; + $_etype692 = 0; + $xfer += $input->readListBegin($_etype692, $_size689); + for ($_i693 = 0; $_i693 < $_size689; ++$_i693) { - $elem687 = null; - $elem687 = new \metastore\NotificationEvent(); - $xfer += $elem687->read($input); - $this->events []= $elem687; + $elem694 = null; + $elem694 = new \metastore\NotificationEvent(); + $xfer += $elem694->read($input); + $this->events []= $elem694; } $xfer += $input->readListEnd(); } else { @@ -23708,9 +23978,9 @@ class NotificationEventResponse { { $output->writeListBegin(TType::STRUCT, count($this->events)); { - foreach ($this->events as $iter688) + foreach ($this->events as $iter695) { - $xfer += $iter688->write($output); + $xfer += $iter695->write($output); } } $output->writeListEnd(); @@ -24139,14 +24409,14 @@ class InsertEventRequestData { case 2: if ($ftype == TType::LST) { $this->filesAdded = array(); - $_size689 = 0; - $_etype692 = 0; - $xfer += $input->readListBegin($_etype692, $_size689); - for ($_i693 = 0; $_i693 < $_size689; ++$_i693) + $_size696 = 0; + $_etype699 = 0; + $xfer += $input->readListBegin($_etype699, $_size696); + for ($_i700 = 0; $_i700 < $_size696; ++$_i700) { - $elem694 = null; - $xfer += $input->readString($elem694); - $this->filesAdded []= $elem694; + $elem701 = null; + $xfer += $input->readString($elem701); + $this->filesAdded []= $elem701; } $xfer += $input->readListEnd(); } else { @@ -24156,14 +24426,14 @@ class InsertEventRequestData { case 3: if ($ftype == TType::LST) { $this->filesAddedChecksum = array(); - $_size695 = 0; - $_etype698 = 0; - $xfer += $input->readListBegin($_etype698, $_size695); - for ($_i699 = 0; $_i699 < $_size695; ++$_i699) + $_size702 = 0; + $_etype705 = 0; + $xfer += $input->readListBegin($_etype705, $_size702); + for ($_i706 = 0; $_i706 < $_size702; ++$_i706) { - $elem700 = null; - $xfer += $input->readString($elem700); - $this->filesAddedChecksum []= $elem700; + $elem707 = null; + $xfer += $input->readString($elem707); + $this->filesAddedChecksum []= $elem707; } $xfer += $input->readListEnd(); } else { @@ -24173,14 +24443,14 @@ class InsertEventRequestData { case 4: if ($ftype == TType::LST) { $this->subDirectoryList = array(); - $_size701 = 0; - $_etype704 = 0; - $xfer += $input->readListBegin($_etype704, $_size701); - for ($_i705 = 0; $_i705 < $_size701; ++$_i705) + $_size708 = 0; + $_etype711 = 0; + $xfer += $input->readListBegin($_etype711, $_size708); + for ($_i712 = 0; $_i712 < $_size708; ++$_i712) { - $elem706 = null; - $xfer += $input->readString($elem706); - $this->subDirectoryList []= $elem706; + $elem713 = null; + $xfer += $input->readString($elem713); + $this->subDirectoryList []= $elem713; } $xfer += $input->readListEnd(); } else { @@ -24213,9 +24483,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->filesAdded)); { - foreach ($this->filesAdded as $iter707) + foreach ($this->filesAdded as $iter714) { - $xfer += $output->writeString($iter707); + $xfer += $output->writeString($iter714); } } $output->writeListEnd(); @@ -24230,9 +24500,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->filesAddedChecksum)); { - foreach ($this->filesAddedChecksum as $iter708) + foreach ($this->filesAddedChecksum as $iter715) { - $xfer += $output->writeString($iter708); + $xfer += $output->writeString($iter715); } } $output->writeListEnd(); @@ -24247,9 +24517,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->subDirectoryList)); { - foreach ($this->subDirectoryList as $iter709) + foreach ($this->subDirectoryList as $iter716) { - $xfer += $output->writeString($iter709); + $xfer += $output->writeString($iter716); } } $output->writeListEnd(); @@ -24478,14 +24748,14 @@ class FireEventRequest { case 5: if ($ftype == TType::LST) { $this->partitionVals = array(); - $_size710 = 0; - $_etype713 = 0; - $xfer += $input->readListBegin($_etype713, $_size710); - for ($_i714 = 0; $_i714 < $_size710; ++$_i714) + $_size717 = 0; + $_etype720 = 0; + $xfer += $input->readListBegin($_etype720, $_size717); + for ($_i721 = 0; $_i721 < $_size717; ++$_i721) { - $elem715 = null; - $xfer += $input->readString($elem715); - $this->partitionVals []= $elem715; + $elem722 = null; + $xfer += $input->readString($elem722); + $this->partitionVals []= $elem722; } $xfer += $input->readListEnd(); } else { @@ -24543,9 +24813,9 @@ class FireEventRequest { { $output->writeListBegin(TType::STRING, count($this->partitionVals)); { - foreach ($this->partitionVals as $iter716) + foreach ($this->partitionVals as $iter723) { - $xfer += $output->writeString($iter716); + $xfer += $output->writeString($iter723); } } $output->writeListEnd(); @@ -24756,14 +25026,14 @@ class WriteNotificationLogRequest { case 6: if ($ftype == TType::LST) { $this->partitionVals = array(); - $_size717 = 0; - $_etype720 = 0; - $xfer += $input->readListBegin($_etype720, $_size717); - for ($_i721 = 0; $_i721 < $_size717; ++$_i721) + $_size724 = 0; + $_etype727 = 0; + $xfer += $input->readListBegin($_etype727, $_size724); + for ($_i728 = 0; $_i728 < $_size724; ++$_i728) { - $elem722 = null; - $xfer += $input->readString($elem722); - $this->partitionVals []= $elem722; + $elem729 = null; + $xfer += $input->readString($elem729); + $this->partitionVals []= $elem729; } $xfer += $input->readListEnd(); } else { @@ -24819,9 +25089,9 @@ class WriteNotificationLogRequest { { $output->writeListBegin(TType::STRING, count($this->partitionVals)); { - foreach ($this->partitionVals as $iter723) + foreach ($this->partitionVals as $iter730) { - $xfer += $output->writeString($iter723); + $xfer += $output->writeString($iter730); } } $output->writeListEnd(); @@ -25049,18 +25319,18 @@ class GetFileMetadataByExprResult { case 1: if ($ftype == TType::MAP) { $this->metadata = array(); - $_size724 = 0; - $_ktype725 = 0; - $_vtype726 = 0; - $xfer += $input->readMapBegin($_ktype725, $_vtype726, $_size724); - for ($_i728 = 0; $_i728 < $_size724; ++$_i728) + $_size731 = 0; + $_ktype732 = 0; + $_vtype733 = 0; + $xfer += $input->readMapBegin($_ktype732, $_vtype733, $_size731); + for ($_i735 = 0; $_i735 < $_size731; ++$_i735) { - $key729 = 0; - $val730 = new \metastore\MetadataPpdResult(); - $xfer += $input->readI64($key729); - $val730 = new \metastore\MetadataPpdResult(); - $xfer += $val730->read($input); - $this->metadata[$key729] = $val730; + $key736 = 0; + $val737 = new \metastore\MetadataPpdResult(); + $xfer += $input->readI64($key736); + $val737 = new \metastore\MetadataPpdResult(); + $xfer += $val737->read($input); + $this->metadata[$key736] = $val737; } $xfer += $input->readMapEnd(); } else { @@ -25095,10 +25365,10 @@ class GetFileMetadataByExprResult { { $output->writeMapBegin(TType::I64, TType::STRUCT, count($this->metadata)); { - foreach ($this->metadata as $kiter731 => $viter732) + foreach ($this->metadata as $kiter738 => $viter739) { - $xfer += $output->writeI64($kiter731); - $xfer += $viter732->write($output); + $xfer += $output->writeI64($kiter738); + $xfer += $viter739->write($output); } } $output->writeMapEnd(); @@ -25200,14 +25470,14 @@ class GetFileMetadataByExprRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size733 = 0; - $_etype736 = 0; - $xfer += $input->readListBegin($_etype736, $_size733); - for ($_i737 = 0; $_i737 < $_size733; ++$_i737) + $_size740 = 0; + $_etype743 = 0; + $xfer += $input->readListBegin($_etype743, $_size740); + for ($_i744 = 0; $_i744 < $_size740; ++$_i744) { - $elem738 = null; - $xfer += $input->readI64($elem738); - $this->fileIds []= $elem738; + $elem745 = null; + $xfer += $input->readI64($elem745); + $this->fileIds []= $elem745; } $xfer += $input->readListEnd(); } else { @@ -25256,9 +25526,9 @@ class GetFileMetadataByExprRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter739) + foreach ($this->fileIds as $iter746) { - $xfer += $output->writeI64($iter739); + $xfer += $output->writeI64($iter746); } } $output->writeListEnd(); @@ -25352,17 +25622,17 @@ class GetFileMetadataResult { case 1: if ($ftype == TType::MAP) { $this->metadata = array(); - $_size740 = 0; - $_ktype741 = 0; - $_vtype742 = 0; - $xfer += $input->readMapBegin($_ktype741, $_vtype742, $_size740); - for ($_i744 = 0; $_i744 < $_size740; ++$_i744) + $_size747 = 0; + $_ktype748 = 0; + $_vtype749 = 0; + $xfer += $input->readMapBegin($_ktype748, $_vtype749, $_size747); + for ($_i751 = 0; $_i751 < $_size747; ++$_i751) { - $key745 = 0; - $val746 = ''; - $xfer += $input->readI64($key745); - $xfer += $input->readString($val746); - $this->metadata[$key745] = $val746; + $key752 = 0; + $val753 = ''; + $xfer += $input->readI64($key752); + $xfer += $input->readString($val753); + $this->metadata[$key752] = $val753; } $xfer += $input->readMapEnd(); } else { @@ -25397,10 +25667,10 @@ class GetFileMetadataResult { { $output->writeMapBegin(TType::I64, TType::STRING, count($this->metadata)); { - foreach ($this->metadata as $kiter747 => $viter748) + foreach ($this->metadata as $kiter754 => $viter755) { - $xfer += $output->writeI64($kiter747); - $xfer += $output->writeString($viter748); + $xfer += $output->writeI64($kiter754); + $xfer += $output->writeString($viter755); } } $output->writeMapEnd(); @@ -25469,14 +25739,14 @@ class GetFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size749 = 0; - $_etype752 = 0; - $xfer += $input->readListBegin($_etype752, $_size749); - for ($_i753 = 0; $_i753 < $_size749; ++$_i753) + $_size756 = 0; + $_etype759 = 0; + $xfer += $input->readListBegin($_etype759, $_size756); + for ($_i760 = 0; $_i760 < $_size756; ++$_i760) { - $elem754 = null; - $xfer += $input->readI64($elem754); - $this->fileIds []= $elem754; + $elem761 = null; + $xfer += $input->readI64($elem761); + $this->fileIds []= $elem761; } $xfer += $input->readListEnd(); } else { @@ -25504,9 +25774,9 @@ class GetFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter755) + foreach ($this->fileIds as $iter762) { - $xfer += $output->writeI64($iter755); + $xfer += $output->writeI64($iter762); } } $output->writeListEnd(); @@ -25646,14 +25916,14 @@ class PutFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size756 = 0; - $_etype759 = 0; - $xfer += $input->readListBegin($_etype759, $_size756); - for ($_i760 = 0; $_i760 < $_size756; ++$_i760) + $_size763 = 0; + $_etype766 = 0; + $xfer += $input->readListBegin($_etype766, $_size763); + for ($_i767 = 0; $_i767 < $_size763; ++$_i767) { - $elem761 = null; - $xfer += $input->readI64($elem761); - $this->fileIds []= $elem761; + $elem768 = null; + $xfer += $input->readI64($elem768); + $this->fileIds []= $elem768; } $xfer += $input->readListEnd(); } else { @@ -25663,14 +25933,14 @@ class PutFileMetadataRequest { case 2: if ($ftype == TType::LST) { $this->metadata = array(); - $_size762 = 0; - $_etype765 = 0; - $xfer += $input->readListBegin($_etype765, $_size762); - for ($_i766 = 0; $_i766 < $_size762; ++$_i766) + $_size769 = 0; + $_etype772 = 0; + $xfer += $input->readListBegin($_etype772, $_size769); + for ($_i773 = 0; $_i773 < $_size769; ++$_i773) { - $elem767 = null; - $xfer += $input->readString($elem767); - $this->metadata []= $elem767; + $elem774 = null; + $xfer += $input->readString($elem774); + $this->metadata []= $elem774; } $xfer += $input->readListEnd(); } else { @@ -25705,9 +25975,9 @@ class PutFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter768) + foreach ($this->fileIds as $iter775) { - $xfer += $output->writeI64($iter768); + $xfer += $output->writeI64($iter775); } } $output->writeListEnd(); @@ -25722,9 +25992,9 @@ class PutFileMetadataRequest { { $output->writeListBegin(TType::STRING, count($this->metadata)); { - foreach ($this->metadata as $iter769) + foreach ($this->metadata as $iter776) { - $xfer += $output->writeString($iter769); + $xfer += $output->writeString($iter776); } } $output->writeListEnd(); @@ -25843,14 +26113,14 @@ class ClearFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size770 = 0; - $_etype773 = 0; - $xfer += $input->readListBegin($_etype773, $_size770); - for ($_i774 = 0; $_i774 < $_size770; ++$_i774) + $_size777 = 0; + $_etype780 = 0; + $xfer += $input->readListBegin($_etype780, $_size777); + for ($_i781 = 0; $_i781 < $_size777; ++$_i781) { - $elem775 = null; - $xfer += $input->readI64($elem775); - $this->fileIds []= $elem775; + $elem782 = null; + $xfer += $input->readI64($elem782); + $this->fileIds []= $elem782; } $xfer += $input->readListEnd(); } else { @@ -25878,9 +26148,9 @@ class ClearFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter776) + foreach ($this->fileIds as $iter783) { - $xfer += $output->writeI64($iter776); + $xfer += $output->writeI64($iter783); } } $output->writeListEnd(); @@ -26164,15 +26434,15 @@ class GetAllFunctionsResponse { case 1: if ($ftype == TType::LST) { $this->functions = array(); - $_size777 = 0; - $_etype780 = 0; - $xfer += $input->readListBegin($_etype780, $_size777); - for ($_i781 = 0; $_i781 < $_size777; ++$_i781) + $_size784 = 0; + $_etype787 = 0; + $xfer += $input->readListBegin($_etype787, $_size784); + for ($_i788 = 0; $_i788 < $_size784; ++$_i788) { - $elem782 = null; - $elem782 = new \metastore\Function(); - $xfer += $elem782->read($input); - $this->functions []= $elem782; + $elem789 = null; + $elem789 = new \metastore\Function(); + $xfer += $elem789->read($input); + $this->functions []= $elem789; } $xfer += $input->readListEnd(); } else { @@ -26200,9 +26470,9 @@ class GetAllFunctionsResponse { { $output->writeListBegin(TType::STRUCT, count($this->functions)); { - foreach ($this->functions as $iter783) + foreach ($this->functions as $iter790) { - $xfer += $iter783->write($output); + $xfer += $iter790->write($output); } } $output->writeListEnd(); @@ -26266,14 +26536,14 @@ class ClientCapabilities { case 1: if ($ftype == TType::LST) { $this->values = array(); - $_size784 = 0; - $_etype787 = 0; - $xfer += $input->readListBegin($_etype787, $_size784); - for ($_i788 = 0; $_i788 < $_size784; ++$_i788) + $_size791 = 0; + $_etype794 = 0; + $xfer += $input->readListBegin($_etype794, $_size791); + for ($_i795 = 0; $_i795 < $_size791; ++$_i795) { - $elem789 = null; - $xfer += $input->readI32($elem789); - $this->values []= $elem789; + $elem796 = null; + $xfer += $input->readI32($elem796); + $this->values []= $elem796; } $xfer += $input->readListEnd(); } else { @@ -26301,9 +26571,9 @@ class ClientCapabilities { { $output->writeListBegin(TType::I32, count($this->values)); { - foreach ($this->values as $iter790) + foreach ($this->values as $iter797) { - $xfer += $output->writeI32($iter790); + $xfer += $output->writeI32($iter797); } } $output->writeListEnd(); @@ -26488,14 +26758,14 @@ class GetTableRequest { case 8: if ($ftype == TType::LST) { $this->processorCapabilities = array(); - $_size791 = 0; - $_etype794 = 0; - $xfer += $input->readListBegin($_etype794, $_size791); - for ($_i795 = 0; $_i795 < $_size791; ++$_i795) + $_size798 = 0; + $_etype801 = 0; + $xfer += $input->readListBegin($_etype801, $_size798); + for ($_i802 = 0; $_i802 < $_size798; ++$_i802) { - $elem796 = null; - $xfer += $input->readString($elem796); - $this->processorCapabilities []= $elem796; + $elem803 = null; + $xfer += $input->readString($elem803); + $this->processorCapabilities []= $elem803; } $xfer += $input->readListEnd(); } else { @@ -26563,9 +26833,9 @@ class GetTableRequest { { $output->writeListBegin(TType::STRING, count($this->processorCapabilities)); { - foreach ($this->processorCapabilities as $iter797) + foreach ($this->processorCapabilities as $iter804) { - $xfer += $output->writeString($iter797); + $xfer += $output->writeString($iter804); } } $output->writeListEnd(); @@ -26804,14 +27074,14 @@ class GetTablesRequest { case 2: if ($ftype == TType::LST) { $this->tblNames = array(); - $_size798 = 0; - $_etype801 = 0; - $xfer += $input->readListBegin($_etype801, $_size798); - for ($_i802 = 0; $_i802 < $_size798; ++$_i802) + $_size805 = 0; + $_etype808 = 0; + $xfer += $input->readListBegin($_etype808, $_size805); + for ($_i809 = 0; $_i809 < $_size805; ++$_i809) { - $elem803 = null; - $xfer += $input->readString($elem803); - $this->tblNames []= $elem803; + $elem810 = null; + $xfer += $input->readString($elem810); + $this->tblNames []= $elem810; } $xfer += $input->readListEnd(); } else { @@ -26836,14 +27106,14 @@ class GetTablesRequest { case 5: if ($ftype == TType::LST) { $this->processorCapabilities = array(); - $_size804 = 0; - $_etype807 = 0; - $xfer += $input->readListBegin($_etype807, $_size804); - for ($_i808 = 0; $_i808 < $_size804; ++$_i808) + $_size811 = 0; + $_etype814 = 0; + $xfer += $input->readListBegin($_etype814, $_size811); + for ($_i815 = 0; $_i815 < $_size811; ++$_i815) { - $elem809 = null; - $xfer += $input->readString($elem809); - $this->processorCapabilities []= $elem809; + $elem816 = null; + $xfer += $input->readString($elem816); + $this->processorCapabilities []= $elem816; } $xfer += $input->readListEnd(); } else { @@ -26883,9 +27153,9 @@ class GetTablesRequest { { $output->writeListBegin(TType::STRING, count($this->tblNames)); { - foreach ($this->tblNames as $iter810) + foreach ($this->tblNames as $iter817) { - $xfer += $output->writeString($iter810); + $xfer += $output->writeString($iter817); } } $output->writeListEnd(); @@ -26913,9 +27183,9 @@ class GetTablesRequest { { $output->writeListBegin(TType::STRING, count($this->processorCapabilities)); { - foreach ($this->processorCapabilities as $iter811) + foreach ($this->processorCapabilities as $iter818) { - $xfer += $output->writeString($iter811); + $xfer += $output->writeString($iter818); } } $output->writeListEnd(); @@ -26985,15 +27255,15 @@ class GetTablesResult { case 1: if ($ftype == TType::LST) { $this->tables = array(); - $_size812 = 0; - $_etype815 = 0; - $xfer += $input->readListBegin($_etype815, $_size812); - for ($_i816 = 0; $_i816 < $_size812; ++$_i816) + $_size819 = 0; + $_etype822 = 0; + $xfer += $input->readListBegin($_etype822, $_size819); + for ($_i823 = 0; $_i823 < $_size819; ++$_i823) { - $elem817 = null; - $elem817 = new \metastore\Table(); - $xfer += $elem817->read($input); - $this->tables []= $elem817; + $elem824 = null; + $elem824 = new \metastore\Table(); + $xfer += $elem824->read($input); + $this->tables []= $elem824; } $xfer += $input->readListEnd(); } else { @@ -27021,9 +27291,9 @@ class GetTablesResult { { $output->writeListBegin(TType::STRUCT, count($this->tables)); { - foreach ($this->tables as $iter818) + foreach ($this->tables as $iter825) { - $xfer += $iter818->write($output); + $xfer += $iter825->write($output); } } $output->writeListEnd(); @@ -27188,14 +27458,14 @@ class GetTablesExtRequest { case 6: if ($ftype == TType::LST) { $this->processorCapabilities = array(); - $_size819 = 0; - $_etype822 = 0; - $xfer += $input->readListBegin($_etype822, $_size819); - for ($_i823 = 0; $_i823 < $_size819; ++$_i823) + $_size826 = 0; + $_etype829 = 0; + $xfer += $input->readListBegin($_etype829, $_size826); + for ($_i830 = 0; $_i830 < $_size826; ++$_i830) { - $elem824 = null; - $xfer += $input->readString($elem824); - $this->processorCapabilities []= $elem824; + $elem831 = null; + $xfer += $input->readString($elem831); + $this->processorCapabilities []= $elem831; } $xfer += $input->readListEnd(); } else { @@ -27255,9 +27525,9 @@ class GetTablesExtRequest { { $output->writeListBegin(TType::STRING, count($this->processorCapabilities)); { - foreach ($this->processorCapabilities as $iter825) + foreach ($this->processorCapabilities as $iter832) { - $xfer += $output->writeString($iter825); + $xfer += $output->writeString($iter832); } } $output->writeListEnd(); @@ -27362,14 +27632,14 @@ class ExtendedTableInfo { case 3: if ($ftype == TType::LST) { $this->processorCapabilities = array(); - $_size826 = 0; - $_etype829 = 0; - $xfer += $input->readListBegin($_etype829, $_size826); - for ($_i830 = 0; $_i830 < $_size826; ++$_i830) + $_size833 = 0; + $_etype836 = 0; + $xfer += $input->readListBegin($_etype836, $_size833); + for ($_i837 = 0; $_i837 < $_size833; ++$_i837) { - $elem831 = null; - $xfer += $input->readString($elem831); - $this->processorCapabilities []= $elem831; + $elem838 = null; + $xfer += $input->readString($elem838); + $this->processorCapabilities []= $elem838; } $xfer += $input->readListEnd(); } else { @@ -27407,9 +27677,9 @@ class ExtendedTableInfo { { $output->writeListBegin(TType::STRING, count($this->processorCapabilities)); { - foreach ($this->processorCapabilities as $iter832) + foreach ($this->processorCapabilities as $iter839) { - $xfer += $output->writeString($iter832); + $xfer += $output->writeString($iter839); } } $output->writeListEnd(); @@ -29216,15 +29486,15 @@ class WMFullResourcePlan { case 2: if ($ftype == TType::LST) { $this->pools = array(); - $_size833 = 0; - $_etype836 = 0; - $xfer += $input->readListBegin($_etype836, $_size833); - for ($_i837 = 0; $_i837 < $_size833; ++$_i837) + $_size840 = 0; + $_etype843 = 0; + $xfer += $input->readListBegin($_etype843, $_size840); + for ($_i844 = 0; $_i844 < $_size840; ++$_i844) { - $elem838 = null; - $elem838 = new \metastore\WMPool(); - $xfer += $elem838->read($input); - $this->pools []= $elem838; + $elem845 = null; + $elem845 = new \metastore\WMPool(); + $xfer += $elem845->read($input); + $this->pools []= $elem845; } $xfer += $input->readListEnd(); } else { @@ -29234,15 +29504,15 @@ class WMFullResourcePlan { case 3: if ($ftype == TType::LST) { $this->mappings = array(); - $_size839 = 0; - $_etype842 = 0; - $xfer += $input->readListBegin($_etype842, $_size839); - for ($_i843 = 0; $_i843 < $_size839; ++$_i843) + $_size846 = 0; + $_etype849 = 0; + $xfer += $input->readListBegin($_etype849, $_size846); + for ($_i850 = 0; $_i850 < $_size846; ++$_i850) { - $elem844 = null; - $elem844 = new \metastore\WMMapping(); - $xfer += $elem844->read($input); - $this->mappings []= $elem844; + $elem851 = null; + $elem851 = new \metastore\WMMapping(); + $xfer += $elem851->read($input); + $this->mappings []= $elem851; } $xfer += $input->readListEnd(); } else { @@ -29252,15 +29522,15 @@ class WMFullResourcePlan { case 4: if ($ftype == TType::LST) { $this->triggers = array(); - $_size845 = 0; - $_etype848 = 0; - $xfer += $input->readListBegin($_etype848, $_size845); - for ($_i849 = 0; $_i849 < $_size845; ++$_i849) + $_size852 = 0; + $_etype855 = 0; + $xfer += $input->readListBegin($_etype855, $_size852); + for ($_i856 = 0; $_i856 < $_size852; ++$_i856) { - $elem850 = null; - $elem850 = new \metastore\WMTrigger(); - $xfer += $elem850->read($input); - $this->triggers []= $elem850; + $elem857 = null; + $elem857 = new \metastore\WMTrigger(); + $xfer += $elem857->read($input); + $this->triggers []= $elem857; } $xfer += $input->readListEnd(); } else { @@ -29270,15 +29540,15 @@ class WMFullResourcePlan { case 5: if ($ftype == TType::LST) { $this->poolTriggers = array(); - $_size851 = 0; - $_etype854 = 0; - $xfer += $input->readListBegin($_etype854, $_size851); - for ($_i855 = 0; $_i855 < $_size851; ++$_i855) + $_size858 = 0; + $_etype861 = 0; + $xfer += $input->readListBegin($_etype861, $_size858); + for ($_i862 = 0; $_i862 < $_size858; ++$_i862) { - $elem856 = null; - $elem856 = new \metastore\WMPoolTrigger(); - $xfer += $elem856->read($input); - $this->poolTriggers []= $elem856; + $elem863 = null; + $elem863 = new \metastore\WMPoolTrigger(); + $xfer += $elem863->read($input); + $this->poolTriggers []= $elem863; } $xfer += $input->readListEnd(); } else { @@ -29314,9 +29584,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->pools)); { - foreach ($this->pools as $iter857) + foreach ($this->pools as $iter864) { - $xfer += $iter857->write($output); + $xfer += $iter864->write($output); } } $output->writeListEnd(); @@ -29331,9 +29601,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->mappings)); { - foreach ($this->mappings as $iter858) + foreach ($this->mappings as $iter865) { - $xfer += $iter858->write($output); + $xfer += $iter865->write($output); } } $output->writeListEnd(); @@ -29348,9 +29618,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->triggers)); { - foreach ($this->triggers as $iter859) + foreach ($this->triggers as $iter866) { - $xfer += $iter859->write($output); + $xfer += $iter866->write($output); } } $output->writeListEnd(); @@ -29365,9 +29635,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->poolTriggers)); { - foreach ($this->poolTriggers as $iter860) + foreach ($this->poolTriggers as $iter867) { - $xfer += $iter860->write($output); + $xfer += $iter867->write($output); } } $output->writeListEnd(); @@ -29993,15 +30263,15 @@ class WMGetAllResourcePlanResponse { case 1: if ($ftype == TType::LST) { $this->resourcePlans = array(); - $_size861 = 0; - $_etype864 = 0; - $xfer += $input->readListBegin($_etype864, $_size861); - for ($_i865 = 0; $_i865 < $_size861; ++$_i865) + $_size868 = 0; + $_etype871 = 0; + $xfer += $input->readListBegin($_etype871, $_size868); + for ($_i872 = 0; $_i872 < $_size868; ++$_i872) { - $elem866 = null; - $elem866 = new \metastore\WMResourcePlan(); - $xfer += $elem866->read($input); - $this->resourcePlans []= $elem866; + $elem873 = null; + $elem873 = new \metastore\WMResourcePlan(); + $xfer += $elem873->read($input); + $this->resourcePlans []= $elem873; } $xfer += $input->readListEnd(); } else { @@ -30029,9 +30299,9 @@ class WMGetAllResourcePlanResponse { { $output->writeListBegin(TType::STRUCT, count($this->resourcePlans)); { - foreach ($this->resourcePlans as $iter867) + foreach ($this->resourcePlans as $iter874) { - $xfer += $iter867->write($output); + $xfer += $iter874->write($output); } } $output->writeListEnd(); @@ -30483,14 +30753,14 @@ class WMValidateResourcePlanResponse { case 1: if ($ftype == TType::LST) { $this->errors = array(); - $_size868 = 0; - $_etype871 = 0; - $xfer += $input->readListBegin($_etype871, $_size868); - for ($_i872 = 0; $_i872 < $_size868; ++$_i872) + $_size875 = 0; + $_etype878 = 0; + $xfer += $input->readListBegin($_etype878, $_size875); + for ($_i879 = 0; $_i879 < $_size875; ++$_i879) { - $elem873 = null; - $xfer += $input->readString($elem873); - $this->errors []= $elem873; + $elem880 = null; + $xfer += $input->readString($elem880); + $this->errors []= $elem880; } $xfer += $input->readListEnd(); } else { @@ -30500,14 +30770,14 @@ class WMValidateResourcePlanResponse { case 2: if ($ftype == TType::LST) { $this->warnings = array(); - $_size874 = 0; - $_etype877 = 0; - $xfer += $input->readListBegin($_etype877, $_size874); - for ($_i878 = 0; $_i878 < $_size874; ++$_i878) + $_size881 = 0; + $_etype884 = 0; + $xfer += $input->readListBegin($_etype884, $_size881); + for ($_i885 = 0; $_i885 < $_size881; ++$_i885) { - $elem879 = null; - $xfer += $input->readString($elem879); - $this->warnings []= $elem879; + $elem886 = null; + $xfer += $input->readString($elem886); + $this->warnings []= $elem886; } $xfer += $input->readListEnd(); } else { @@ -30535,9 +30805,9 @@ class WMValidateResourcePlanResponse { { $output->writeListBegin(TType::STRING, count($this->errors)); { - foreach ($this->errors as $iter880) + foreach ($this->errors as $iter887) { - $xfer += $output->writeString($iter880); + $xfer += $output->writeString($iter887); } } $output->writeListEnd(); @@ -30552,9 +30822,9 @@ class WMValidateResourcePlanResponse { { $output->writeListBegin(TType::STRING, count($this->warnings)); { - foreach ($this->warnings as $iter881) + foreach ($this->warnings as $iter888) { - $xfer += $output->writeString($iter881); + $xfer += $output->writeString($iter888); } } $output->writeListEnd(); @@ -31296,15 +31566,15 @@ class WMGetTriggersForResourePlanResponse { case 1: if ($ftype == TType::LST) { $this->triggers = array(); - $_size882 = 0; - $_etype885 = 0; - $xfer += $input->readListBegin($_etype885, $_size882); - for ($_i886 = 0; $_i886 < $_size882; ++$_i886) + $_size889 = 0; + $_etype892 = 0; + $xfer += $input->readListBegin($_etype892, $_size889); + for ($_i893 = 0; $_i893 < $_size889; ++$_i893) { - $elem887 = null; - $elem887 = new \metastore\WMTrigger(); - $xfer += $elem887->read($input); - $this->triggers []= $elem887; + $elem894 = null; + $elem894 = new \metastore\WMTrigger(); + $xfer += $elem894->read($input); + $this->triggers []= $elem894; } $xfer += $input->readListEnd(); } else { @@ -31332,9 +31602,9 @@ class WMGetTriggersForResourePlanResponse { { $output->writeListBegin(TType::STRUCT, count($this->triggers)); { - foreach ($this->triggers as $iter888) + foreach ($this->triggers as $iter895) { - $xfer += $iter888->write($output); + $xfer += $iter895->write($output); } } $output->writeListEnd(); @@ -32964,15 +33234,15 @@ class SchemaVersion { case 4: if ($ftype == TType::LST) { $this->cols = array(); - $_size889 = 0; - $_etype892 = 0; - $xfer += $input->readListBegin($_etype892, $_size889); - for ($_i893 = 0; $_i893 < $_size889; ++$_i893) + $_size896 = 0; + $_etype899 = 0; + $xfer += $input->readListBegin($_etype899, $_size896); + for ($_i900 = 0; $_i900 < $_size896; ++$_i900) { - $elem894 = null; - $elem894 = new \metastore\FieldSchema(); - $xfer += $elem894->read($input); - $this->cols []= $elem894; + $elem901 = null; + $elem901 = new \metastore\FieldSchema(); + $xfer += $elem901->read($input); + $this->cols []= $elem901; } $xfer += $input->readListEnd(); } else { @@ -33061,9 +33331,9 @@ class SchemaVersion { { $output->writeListBegin(TType::STRUCT, count($this->cols)); { - foreach ($this->cols as $iter895) + foreach ($this->cols as $iter902) { - $xfer += $iter895->write($output); + $xfer += $iter902->write($output); } } $output->writeListEnd(); @@ -33385,15 +33655,15 @@ class FindSchemasByColsResp { case 1: if ($ftype == TType::LST) { $this->schemaVersions = array(); - $_size896 = 0; - $_etype899 = 0; - $xfer += $input->readListBegin($_etype899, $_size896); - for ($_i900 = 0; $_i900 < $_size896; ++$_i900) + $_size903 = 0; + $_etype906 = 0; + $xfer += $input->readListBegin($_etype906, $_size903); + for ($_i907 = 0; $_i907 < $_size903; ++$_i907) { - $elem901 = null; - $elem901 = new \metastore\SchemaVersionDescriptor(); - $xfer += $elem901->read($input); - $this->schemaVersions []= $elem901; + $elem908 = null; + $elem908 = new \metastore\SchemaVersionDescriptor(); + $xfer += $elem908->read($input); + $this->schemaVersions []= $elem908; } $xfer += $input->readListEnd(); } else { @@ -33421,9 +33691,9 @@ class FindSchemasByColsResp { { $output->writeListBegin(TType::STRUCT, count($this->schemaVersions)); { - foreach ($this->schemaVersions as $iter902) + foreach ($this->schemaVersions as $iter909) { - $xfer += $iter902->write($output); + $xfer += $iter909->write($output); } } $output->writeListEnd(); @@ -34076,15 +34346,15 @@ class AlterPartitionsRequest { case 4: if ($ftype == TType::LST) { $this->partitions = array(); - $_size903 = 0; - $_etype906 = 0; - $xfer += $input->readListBegin($_etype906, $_size903); - for ($_i907 = 0; $_i907 < $_size903; ++$_i907) + $_size910 = 0; + $_etype913 = 0; + $xfer += $input->readListBegin($_etype913, $_size910); + for ($_i914 = 0; $_i914 < $_size910; ++$_i914) { - $elem908 = null; - $elem908 = new \metastore\Partition(); - $xfer += $elem908->read($input); - $this->partitions []= $elem908; + $elem915 = null; + $elem915 = new \metastore\Partition(); + $xfer += $elem915->read($input); + $this->partitions []= $elem915; } $xfer += $input->readListEnd(); } else { @@ -34149,9 +34419,9 @@ class AlterPartitionsRequest { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter909) + foreach ($this->partitions as $iter916) { - $xfer += $iter909->write($output); + $xfer += $iter916->write($output); } } $output->writeListEnd(); @@ -34360,14 +34630,14 @@ class RenamePartitionRequest { case 4: if ($ftype == TType::LST) { $this->partVals = array(); - $_size910 = 0; - $_etype913 = 0; - $xfer += $input->readListBegin($_etype913, $_size910); - for ($_i914 = 0; $_i914 < $_size910; ++$_i914) + $_size917 = 0; + $_etype920 = 0; + $xfer += $input->readListBegin($_etype920, $_size917); + for ($_i921 = 0; $_i921 < $_size917; ++$_i921) { - $elem915 = null; - $xfer += $input->readString($elem915); - $this->partVals []= $elem915; + $elem922 = null; + $xfer += $input->readString($elem922); + $this->partVals []= $elem922; } $xfer += $input->readListEnd(); } else { @@ -34425,9 +34695,9 @@ class RenamePartitionRequest { { $output->writeListBegin(TType::STRING, count($this->partVals)); { - foreach ($this->partVals as $iter916) + foreach ($this->partVals as $iter923) { - $xfer += $output->writeString($iter916); + $xfer += $output->writeString($iter923); } } $output->writeListEnd(); @@ -34849,14 +35119,14 @@ class GetPartitionsProjectionSpec { case 1: if ($ftype == TType::LST) { $this->fieldList = array(); - $_size917 = 0; - $_etype920 = 0; - $xfer += $input->readListBegin($_etype920, $_size917); - for ($_i921 = 0; $_i921 < $_size917; ++$_i921) + $_size924 = 0; + $_etype927 = 0; + $xfer += $input->readListBegin($_etype927, $_size924); + for ($_i928 = 0; $_i928 < $_size924; ++$_i928) { - $elem922 = null; - $xfer += $input->readString($elem922); - $this->fieldList []= $elem922; + $elem929 = null; + $xfer += $input->readString($elem929); + $this->fieldList []= $elem929; } $xfer += $input->readListEnd(); } else { @@ -34898,9 +35168,9 @@ class GetPartitionsProjectionSpec { { $output->writeListBegin(TType::STRING, count($this->fieldList)); { - foreach ($this->fieldList as $iter923) + foreach ($this->fieldList as $iter930) { - $xfer += $output->writeString($iter923); + $xfer += $output->writeString($iter930); } } $output->writeListEnd(); @@ -34992,14 +35262,14 @@ class GetPartitionsFilterSpec { case 8: if ($ftype == TType::LST) { $this->filters = array(); - $_size924 = 0; - $_etype927 = 0; - $xfer += $input->readListBegin($_etype927, $_size924); - for ($_i928 = 0; $_i928 < $_size924; ++$_i928) + $_size931 = 0; + $_etype934 = 0; + $xfer += $input->readListBegin($_etype934, $_size931); + for ($_i935 = 0; $_i935 < $_size931; ++$_i935) { - $elem929 = null; - $xfer += $input->readString($elem929); - $this->filters []= $elem929; + $elem936 = null; + $xfer += $input->readString($elem936); + $this->filters []= $elem936; } $xfer += $input->readListEnd(); } else { @@ -35032,9 +35302,9 @@ class GetPartitionsFilterSpec { { $output->writeListBegin(TType::STRING, count($this->filters)); { - foreach ($this->filters as $iter930) + foreach ($this->filters as $iter937) { - $xfer += $output->writeString($iter930); + $xfer += $output->writeString($iter937); } } $output->writeListEnd(); @@ -35099,15 +35369,15 @@ class GetPartitionsResponse { case 1: if ($ftype == TType::LST) { $this->partitionSpec = array(); - $_size931 = 0; - $_etype934 = 0; - $xfer += $input->readListBegin($_etype934, $_size931); - for ($_i935 = 0; $_i935 < $_size931; ++$_i935) + $_size938 = 0; + $_etype941 = 0; + $xfer += $input->readListBegin($_etype941, $_size938); + for ($_i942 = 0; $_i942 < $_size938; ++$_i942) { - $elem936 = null; - $elem936 = new \metastore\PartitionSpec(); - $xfer += $elem936->read($input); - $this->partitionSpec []= $elem936; + $elem943 = null; + $elem943 = new \metastore\PartitionSpec(); + $xfer += $elem943->read($input); + $this->partitionSpec []= $elem943; } $xfer += $input->readListEnd(); } else { @@ -35135,9 +35405,9 @@ class GetPartitionsResponse { { $output->writeListBegin(TType::STRUCT, count($this->partitionSpec)); { - foreach ($this->partitionSpec as $iter937) + foreach ($this->partitionSpec as $iter944) { - $xfer += $iter937->write($output); + $xfer += $iter944->write($output); } } $output->writeListEnd(); @@ -35194,6 +35464,10 @@ class GetPartitionsRequest { * @var string */ public $processorIdentifier = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -35248,6 +35522,10 @@ class GetPartitionsRequest { 'var' => 'processorIdentifier', 'type' => TType::STRING, ), + 11 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -35281,6 +35559,9 @@ class GetPartitionsRequest { if (isset($vals['processorIdentifier'])) { $this->processorIdentifier = $vals['processorIdentifier']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -35341,14 +35622,14 @@ class GetPartitionsRequest { case 6: if ($ftype == TType::LST) { $this->groupNames = array(); - $_size938 = 0; - $_etype941 = 0; - $xfer += $input->readListBegin($_etype941, $_size938); - for ($_i942 = 0; $_i942 < $_size938; ++$_i942) + $_size945 = 0; + $_etype948 = 0; + $xfer += $input->readListBegin($_etype948, $_size945); + for ($_i949 = 0; $_i949 < $_size945; ++$_i949) { - $elem943 = null; - $xfer += $input->readString($elem943); - $this->groupNames []= $elem943; + $elem950 = null; + $xfer += $input->readString($elem950); + $this->groupNames []= $elem950; } $xfer += $input->readListEnd(); } else { @@ -35374,14 +35655,14 @@ class GetPartitionsRequest { case 9: if ($ftype == TType::LST) { $this->processorCapabilities = array(); - $_size944 = 0; - $_etype947 = 0; - $xfer += $input->readListBegin($_etype947, $_size944); - for ($_i948 = 0; $_i948 < $_size944; ++$_i948) + $_size951 = 0; + $_etype954 = 0; + $xfer += $input->readListBegin($_etype954, $_size951); + for ($_i955 = 0; $_i955 < $_size951; ++$_i955) { - $elem949 = null; - $xfer += $input->readString($elem949); - $this->processorCapabilities []= $elem949; + $elem956 = null; + $xfer += $input->readString($elem956); + $this->processorCapabilities []= $elem956; } $xfer += $input->readListEnd(); } else { @@ -35395,6 +35676,13 @@ class GetPartitionsRequest { $xfer += $input->skip($ftype); } break; + case 11: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -35441,9 +35729,9 @@ class GetPartitionsRequest { { $output->writeListBegin(TType::STRING, count($this->groupNames)); { - foreach ($this->groupNames as $iter950) + foreach ($this->groupNames as $iter957) { - $xfer += $output->writeString($iter950); + $xfer += $output->writeString($iter957); } } $output->writeListEnd(); @@ -35474,9 +35762,9 @@ class GetPartitionsRequest { { $output->writeListBegin(TType::STRING, count($this->processorCapabilities)); { - foreach ($this->processorCapabilities as $iter951) + foreach ($this->processorCapabilities as $iter958) { - $xfer += $output->writeString($iter951); + $xfer += $output->writeString($iter958); } } $output->writeListEnd(); @@ -35488,6 +35776,11 @@ class GetPartitionsRequest { $xfer += $output->writeString($this->processorIdentifier); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 11); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index 16add782ce..5074ee17c5 100755 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -41,10 +41,10 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' bool create_type(Type type)') print(' bool drop_type(string type)') print(' get_type_all(string name)') - print(' get_fields(string db_name, string table_name)') - print(' get_fields_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)') - print(' get_schema(string db_name, string table_name)') - print(' get_schema_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)') + print(' get_fields(string db_name, string table_name, string validWriteIdList)') + print(' get_fields_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context, string validWriteIdList)') + print(' get_schema(string db_name, string table_name, string validWriteIdList)') + print(' get_schema_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context, string validWriteIdList)') print(' void create_table(Table tbl)') print(' void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context)') print(' void create_table_with_constraints(Table tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints)') @@ -65,7 +65,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' get_materialized_views_for_rewriting(string db_name)') print(' get_table_meta(string db_patterns, string tbl_patterns, tbl_types)') print(' get_all_tables(string db_name)') - print(' Table get_table(string dbname, string tbl_name)') + print(' Table get_table(string dbname, string tbl_name, string validWriteIdList)') print(' get_table_objects_by_name(string dbname, tbl_names)') print(' get_tables_ext(GetTablesExtRequest req)') print(' GetTableResult get_table_req(GetTableRequest req)') @@ -91,24 +91,24 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' bool drop_partition_by_name(string db_name, string tbl_name, string part_name, bool deleteData)') print(' bool drop_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, bool deleteData, EnvironmentContext environment_context)') print(' DropPartitionsResult drop_partitions_req(DropPartitionsRequest req)') - print(' Partition get_partition(string db_name, string tbl_name, part_vals)') + print(' Partition get_partition(string db_name, string tbl_name, part_vals, string validTxnList)') print(' Partition exchange_partition( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)') print(' exchange_partitions( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)') - print(' Partition get_partition_with_auth(string db_name, string tbl_name, part_vals, string user_name, group_names)') - print(' Partition get_partition_by_name(string db_name, string tbl_name, string part_name)') - print(' get_partitions(string db_name, string tbl_name, i16 max_parts)') - print(' get_partitions_with_auth(string db_name, string tbl_name, i16 max_parts, string user_name, group_names)') - print(' get_partitions_pspec(string db_name, string tbl_name, i32 max_parts)') - print(' get_partition_names(string db_name, string tbl_name, i16 max_parts)') + print(' Partition get_partition_with_auth(string db_name, string tbl_name, part_vals, string user_name, group_names, string validTxnList)') + print(' Partition get_partition_by_name(string db_name, string tbl_name, string part_name, string validTxnList)') + print(' get_partitions(string db_name, string tbl_name, i16 max_parts, string validTxnList)') + print(' get_partitions_with_auth(string db_name, string tbl_name, i16 max_parts, string user_name, group_names, string validTxnList)') + print(' get_partitions_pspec(string db_name, string tbl_name, i32 max_parts, string validTxnList)') + print(' get_partition_names(string db_name, string tbl_name, i16 max_parts, string validTxnList)') print(' PartitionValuesResponse get_partition_values(PartitionValuesRequest request)') - print(' get_partitions_ps(string db_name, string tbl_name, part_vals, i16 max_parts)') - print(' get_partitions_ps_with_auth(string db_name, string tbl_name, part_vals, i16 max_parts, string user_name, group_names)') - print(' get_partition_names_ps(string db_name, string tbl_name, part_vals, i16 max_parts)') - print(' get_partitions_by_filter(string db_name, string tbl_name, string filter, i16 max_parts)') - print(' get_part_specs_by_filter(string db_name, string tbl_name, string filter, i32 max_parts)') + print(' get_partitions_ps(string db_name, string tbl_name, part_vals, i16 max_parts, string validTxnList)') + print(' get_partitions_ps_with_auth(string db_name, string tbl_name, part_vals, i16 max_parts, string user_name, group_names, string validTxnList)') + print(' get_partition_names_ps(string db_name, string tbl_name, part_vals, i16 max_parts, string validTxnList)') + print(' get_partitions_by_filter(string db_name, string tbl_name, string filter, i16 max_parts, string validTxnList)') + print(' get_part_specs_by_filter(string db_name, string tbl_name, string filter, i32 max_parts, string validTxnList)') print(' PartitionsByExprResult get_partitions_by_expr(PartitionsByExprRequest req)') - print(' i32 get_num_partitions_by_filter(string db_name, string tbl_name, string filter)') - print(' get_partitions_by_names(string db_name, string tbl_name, names)') + print(' i32 get_num_partitions_by_filter(string db_name, string tbl_name, string filter, string validTxnList)') + print(' get_partitions_by_names(string db_name, string tbl_name, names, string validTxnList)') print(' GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNamesRequest req)') print(' void alter_partition(string db_name, string tbl_name, Partition new_part)') print(' void alter_partitions(string db_name, string tbl_name, new_parts)') @@ -133,8 +133,8 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' bool update_partition_column_statistics(ColumnStatistics stats_obj)') print(' SetPartitionsStatsResponse update_table_column_statistics_req(SetPartitionsStatsRequest req)') print(' SetPartitionsStatsResponse update_partition_column_statistics_req(SetPartitionsStatsRequest req)') - print(' ColumnStatistics get_table_column_statistics(string db_name, string tbl_name, string col_name)') - print(' ColumnStatistics get_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)') + print(' ColumnStatistics get_table_column_statistics(string db_name, string tbl_name, string col_name, string validWriteIdList)') + print(' ColumnStatistics get_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name, string validWriteIdList)') print(' TableStatsResult get_table_statistics_req(TableStatsRequest request)') print(' PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsRequest request)') print(' AggrStats get_aggr_stats_for(PartitionsStatsRequest request)') @@ -421,28 +421,28 @@ elif cmd == 'get_type_all': pp.pprint(client.get_type_all(args[0],)) elif cmd == 'get_fields': - if len(args) != 2: - print('get_fields requires 2 args') + if len(args) != 3: + print('get_fields requires 3 args') sys.exit(1) - pp.pprint(client.get_fields(args[0],args[1],)) + pp.pprint(client.get_fields(args[0],args[1],args[2],)) elif cmd == 'get_fields_with_environment_context': - if len(args) != 3: - print('get_fields_with_environment_context requires 3 args') + if len(args) != 4: + print('get_fields_with_environment_context requires 4 args') sys.exit(1) - pp.pprint(client.get_fields_with_environment_context(args[0],args[1],eval(args[2]),)) + pp.pprint(client.get_fields_with_environment_context(args[0],args[1],eval(args[2]),args[3],)) elif cmd == 'get_schema': - if len(args) != 2: - print('get_schema requires 2 args') + if len(args) != 3: + print('get_schema requires 3 args') sys.exit(1) - pp.pprint(client.get_schema(args[0],args[1],)) + pp.pprint(client.get_schema(args[0],args[1],args[2],)) elif cmd == 'get_schema_with_environment_context': - if len(args) != 3: - print('get_schema_with_environment_context requires 3 args') + if len(args) != 4: + print('get_schema_with_environment_context requires 4 args') sys.exit(1) - pp.pprint(client.get_schema_with_environment_context(args[0],args[1],eval(args[2]),)) + pp.pprint(client.get_schema_with_environment_context(args[0],args[1],eval(args[2]),args[3],)) elif cmd == 'create_table': if len(args) != 1: @@ -565,10 +565,10 @@ elif cmd == 'get_all_tables': pp.pprint(client.get_all_tables(args[0],)) elif cmd == 'get_table': - if len(args) != 2: - print('get_table requires 2 args') + if len(args) != 3: + print('get_table requires 3 args') sys.exit(1) - pp.pprint(client.get_table(args[0],args[1],)) + pp.pprint(client.get_table(args[0],args[1],args[2],)) elif cmd == 'get_table_objects_by_name': if len(args) != 2: @@ -721,10 +721,10 @@ elif cmd == 'drop_partitions_req': pp.pprint(client.drop_partitions_req(eval(args[0]),)) elif cmd == 'get_partition': - if len(args) != 3: - print('get_partition requires 3 args') + if len(args) != 4: + print('get_partition requires 4 args') sys.exit(1) - pp.pprint(client.get_partition(args[0],args[1],eval(args[2]),)) + pp.pprint(client.get_partition(args[0],args[1],eval(args[2]),args[3],)) elif cmd == 'exchange_partition': if len(args) != 5: @@ -739,40 +739,40 @@ elif cmd == 'exchange_partitions': pp.pprint(client.exchange_partitions(eval(args[0]),args[1],args[2],args[3],args[4],)) elif cmd == 'get_partition_with_auth': - if len(args) != 5: - print('get_partition_with_auth requires 5 args') + if len(args) != 6: + print('get_partition_with_auth requires 6 args') sys.exit(1) - pp.pprint(client.get_partition_with_auth(args[0],args[1],eval(args[2]),args[3],eval(args[4]),)) + pp.pprint(client.get_partition_with_auth(args[0],args[1],eval(args[2]),args[3],eval(args[4]),args[5],)) elif cmd == 'get_partition_by_name': - if len(args) != 3: - print('get_partition_by_name requires 3 args') + if len(args) != 4: + print('get_partition_by_name requires 4 args') sys.exit(1) - pp.pprint(client.get_partition_by_name(args[0],args[1],args[2],)) + pp.pprint(client.get_partition_by_name(args[0],args[1],args[2],args[3],)) elif cmd == 'get_partitions': - if len(args) != 3: - print('get_partitions requires 3 args') + if len(args) != 4: + print('get_partitions requires 4 args') sys.exit(1) - pp.pprint(client.get_partitions(args[0],args[1],eval(args[2]),)) + pp.pprint(client.get_partitions(args[0],args[1],eval(args[2]),args[3],)) elif cmd == 'get_partitions_with_auth': - if len(args) != 5: - print('get_partitions_with_auth requires 5 args') + if len(args) != 6: + print('get_partitions_with_auth requires 6 args') sys.exit(1) - pp.pprint(client.get_partitions_with_auth(args[0],args[1],eval(args[2]),args[3],eval(args[4]),)) + pp.pprint(client.get_partitions_with_auth(args[0],args[1],eval(args[2]),args[3],eval(args[4]),args[5],)) elif cmd == 'get_partitions_pspec': - if len(args) != 3: - print('get_partitions_pspec requires 3 args') + if len(args) != 4: + print('get_partitions_pspec requires 4 args') sys.exit(1) - pp.pprint(client.get_partitions_pspec(args[0],args[1],eval(args[2]),)) + pp.pprint(client.get_partitions_pspec(args[0],args[1],eval(args[2]),args[3],)) elif cmd == 'get_partition_names': - if len(args) != 3: - print('get_partition_names requires 3 args') + if len(args) != 4: + print('get_partition_names requires 4 args') sys.exit(1) - pp.pprint(client.get_partition_names(args[0],args[1],eval(args[2]),)) + pp.pprint(client.get_partition_names(args[0],args[1],eval(args[2]),args[3],)) elif cmd == 'get_partition_values': if len(args) != 1: @@ -781,34 +781,34 @@ elif cmd == 'get_partition_values': pp.pprint(client.get_partition_values(eval(args[0]),)) elif cmd == 'get_partitions_ps': - if len(args) != 4: - print('get_partitions_ps requires 4 args') + if len(args) != 5: + print('get_partitions_ps requires 5 args') sys.exit(1) - pp.pprint(client.get_partitions_ps(args[0],args[1],eval(args[2]),eval(args[3]),)) + pp.pprint(client.get_partitions_ps(args[0],args[1],eval(args[2]),eval(args[3]),args[4],)) elif cmd == 'get_partitions_ps_with_auth': - if len(args) != 6: - print('get_partitions_ps_with_auth requires 6 args') + if len(args) != 7: + print('get_partitions_ps_with_auth requires 7 args') sys.exit(1) - pp.pprint(client.get_partitions_ps_with_auth(args[0],args[1],eval(args[2]),eval(args[3]),args[4],eval(args[5]),)) + pp.pprint(client.get_partitions_ps_with_auth(args[0],args[1],eval(args[2]),eval(args[3]),args[4],eval(args[5]),args[6],)) elif cmd == 'get_partition_names_ps': - if len(args) != 4: - print('get_partition_names_ps requires 4 args') + if len(args) != 5: + print('get_partition_names_ps requires 5 args') sys.exit(1) - pp.pprint(client.get_partition_names_ps(args[0],args[1],eval(args[2]),eval(args[3]),)) + pp.pprint(client.get_partition_names_ps(args[0],args[1],eval(args[2]),eval(args[3]),args[4],)) elif cmd == 'get_partitions_by_filter': - if len(args) != 4: - print('get_partitions_by_filter requires 4 args') + if len(args) != 5: + print('get_partitions_by_filter requires 5 args') sys.exit(1) - pp.pprint(client.get_partitions_by_filter(args[0],args[1],args[2],eval(args[3]),)) + pp.pprint(client.get_partitions_by_filter(args[0],args[1],args[2],eval(args[3]),args[4],)) elif cmd == 'get_part_specs_by_filter': - if len(args) != 4: - print('get_part_specs_by_filter requires 4 args') + if len(args) != 5: + print('get_part_specs_by_filter requires 5 args') sys.exit(1) - pp.pprint(client.get_part_specs_by_filter(args[0],args[1],args[2],eval(args[3]),)) + pp.pprint(client.get_part_specs_by_filter(args[0],args[1],args[2],eval(args[3]),args[4],)) elif cmd == 'get_partitions_by_expr': if len(args) != 1: @@ -817,16 +817,16 @@ elif cmd == 'get_partitions_by_expr': pp.pprint(client.get_partitions_by_expr(eval(args[0]),)) elif cmd == 'get_num_partitions_by_filter': - if len(args) != 3: - print('get_num_partitions_by_filter requires 3 args') + if len(args) != 4: + print('get_num_partitions_by_filter requires 4 args') sys.exit(1) - pp.pprint(client.get_num_partitions_by_filter(args[0],args[1],args[2],)) + pp.pprint(client.get_num_partitions_by_filter(args[0],args[1],args[2],args[3],)) elif cmd == 'get_partitions_by_names': - if len(args) != 3: - print('get_partitions_by_names requires 3 args') + if len(args) != 4: + print('get_partitions_by_names requires 4 args') sys.exit(1) - pp.pprint(client.get_partitions_by_names(args[0],args[1],eval(args[2]),)) + pp.pprint(client.get_partitions_by_names(args[0],args[1],eval(args[2]),args[3],)) elif cmd == 'get_partitions_by_names_req': if len(args) != 1: @@ -973,16 +973,16 @@ elif cmd == 'update_partition_column_statistics_req': pp.pprint(client.update_partition_column_statistics_req(eval(args[0]),)) elif cmd == 'get_table_column_statistics': - if len(args) != 3: - print('get_table_column_statistics requires 3 args') + if len(args) != 4: + print('get_table_column_statistics requires 4 args') sys.exit(1) - pp.pprint(client.get_table_column_statistics(args[0],args[1],args[2],)) + pp.pprint(client.get_table_column_statistics(args[0],args[1],args[2],args[3],)) elif cmd == 'get_partition_column_statistics': - if len(args) != 4: - print('get_partition_column_statistics requires 4 args') + if len(args) != 5: + print('get_partition_column_statistics requires 5 args') sys.exit(1) - pp.pprint(client.get_partition_column_statistics(args[0],args[1],args[2],args[3],)) + pp.pprint(client.get_partition_column_statistics(args[0],args[1],args[2],args[3],args[4],)) elif cmd == 'get_table_statistics_req': if len(args) != 1: diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 540e89356c..153e7bf741 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -138,37 +138,41 @@ def get_type_all(self, name): """ pass - def get_fields(self, db_name, table_name): + def get_fields(self, db_name, table_name, validWriteIdList): """ Parameters: - db_name - table_name + - validWriteIdList """ pass - def get_fields_with_environment_context(self, db_name, table_name, environment_context): + def get_fields_with_environment_context(self, db_name, table_name, environment_context, validWriteIdList): """ Parameters: - db_name - table_name - environment_context + - validWriteIdList """ pass - def get_schema(self, db_name, table_name): + def get_schema(self, db_name, table_name, validWriteIdList): """ Parameters: - db_name - table_name + - validWriteIdList """ pass - def get_schema_with_environment_context(self, db_name, table_name, environment_context): + def get_schema_with_environment_context(self, db_name, table_name, environment_context, validWriteIdList): """ Parameters: - db_name - table_name - environment_context + - validWriteIdList """ pass @@ -327,11 +331,12 @@ def get_all_tables(self, db_name): """ pass - def get_table(self, dbname, tbl_name): + def get_table(self, dbname, tbl_name, validWriteIdList): """ Parameters: - dbname - tbl_name + - validWriteIdList """ pass @@ -550,12 +555,13 @@ def drop_partitions_req(self, req): """ pass - def get_partition(self, db_name, tbl_name, part_vals): + def get_partition(self, db_name, tbl_name, part_vals, validTxnList): """ Parameters: - db_name - tbl_name - part_vals + - validTxnList """ pass @@ -581,7 +587,7 @@ def exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest """ pass - def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names): + def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names, validTxnList): """ Parameters: - db_name @@ -589,28 +595,31 @@ def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group - part_vals - user_name - group_names + - validTxnList """ pass - def get_partition_by_name(self, db_name, tbl_name, part_name): + def get_partition_by_name(self, db_name, tbl_name, part_name, validTxnList): """ Parameters: - db_name - tbl_name - part_name + - validTxnList """ pass - def get_partitions(self, db_name, tbl_name, max_parts): + def get_partitions(self, db_name, tbl_name, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - max_parts + - validTxnList """ pass - def get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, group_names): + def get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, group_names, validTxnList): """ Parameters: - db_name @@ -618,24 +627,27 @@ def get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, grou - max_parts - user_name - group_names + - validTxnList """ pass - def get_partitions_pspec(self, db_name, tbl_name, max_parts): + def get_partitions_pspec(self, db_name, tbl_name, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - max_parts + - validTxnList """ pass - def get_partition_names(self, db_name, tbl_name, max_parts): + def get_partition_names(self, db_name, tbl_name, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - max_parts + - validTxnList """ pass @@ -646,17 +658,18 @@ def get_partition_values(self, request): """ pass - def get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts): + def get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - part_vals - max_parts + - validTxnList """ pass - def get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, user_name, group_names): + def get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, user_name, group_names, validTxnList): """ Parameters: - db_name @@ -665,36 +678,40 @@ def get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, u - max_parts - user_name - group_names + - validTxnList """ pass - def get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts): + def get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - part_vals - max_parts + - validTxnList """ pass - def get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts): + def get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - filter - max_parts + - validTxnList """ pass - def get_part_specs_by_filter(self, db_name, tbl_name, filter, max_parts): + def get_part_specs_by_filter(self, db_name, tbl_name, filter, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - filter - max_parts + - validTxnList """ pass @@ -705,21 +722,23 @@ def get_partitions_by_expr(self, req): """ pass - def get_num_partitions_by_filter(self, db_name, tbl_name, filter): + def get_num_partitions_by_filter(self, db_name, tbl_name, filter, validTxnList): """ Parameters: - db_name - tbl_name - filter + - validTxnList """ pass - def get_partitions_by_names(self, db_name, tbl_name, names): + def get_partitions_by_names(self, db_name, tbl_name, names, validTxnList): """ Parameters: - db_name - tbl_name - names + - validTxnList """ pass @@ -912,22 +931,24 @@ def update_partition_column_statistics_req(self, req): """ pass - def get_table_column_statistics(self, db_name, tbl_name, col_name): + def get_table_column_statistics(self, db_name, tbl_name, col_name, validWriteIdList): """ Parameters: - db_name - tbl_name - col_name + - validWriteIdList """ pass - def get_partition_column_statistics(self, db_name, tbl_name, part_name, col_name): + def get_partition_column_statistics(self, db_name, tbl_name, part_name, col_name, validWriteIdList): """ Parameters: - db_name - tbl_name - part_name - col_name + - validWriteIdList """ pass @@ -2314,20 +2335,22 @@ def recv_get_type_all(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_type_all failed: unknown result") - def get_fields(self, db_name, table_name): + def get_fields(self, db_name, table_name, validWriteIdList): """ Parameters: - db_name - table_name + - validWriteIdList """ - self.send_get_fields(db_name, table_name) + self.send_get_fields(db_name, table_name, validWriteIdList) return self.recv_get_fields() - def send_get_fields(self, db_name, table_name): + def send_get_fields(self, db_name, table_name, validWriteIdList): self._oprot.writeMessageBegin('get_fields', TMessageType.CALL, self._seqid) args = get_fields_args() args.db_name = db_name args.table_name = table_name + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -2353,22 +2376,24 @@ def recv_get_fields(self): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_fields failed: unknown result") - def get_fields_with_environment_context(self, db_name, table_name, environment_context): + def get_fields_with_environment_context(self, db_name, table_name, environment_context, validWriteIdList): """ Parameters: - db_name - table_name - environment_context + - validWriteIdList """ - self.send_get_fields_with_environment_context(db_name, table_name, environment_context) + self.send_get_fields_with_environment_context(db_name, table_name, environment_context, validWriteIdList) return self.recv_get_fields_with_environment_context() - def send_get_fields_with_environment_context(self, db_name, table_name, environment_context): + def send_get_fields_with_environment_context(self, db_name, table_name, environment_context, validWriteIdList): self._oprot.writeMessageBegin('get_fields_with_environment_context', TMessageType.CALL, self._seqid) args = get_fields_with_environment_context_args() args.db_name = db_name args.table_name = table_name args.environment_context = environment_context + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -2394,20 +2419,22 @@ def recv_get_fields_with_environment_context(self): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_fields_with_environment_context failed: unknown result") - def get_schema(self, db_name, table_name): + def get_schema(self, db_name, table_name, validWriteIdList): """ Parameters: - db_name - table_name + - validWriteIdList """ - self.send_get_schema(db_name, table_name) + self.send_get_schema(db_name, table_name, validWriteIdList) return self.recv_get_schema() - def send_get_schema(self, db_name, table_name): + def send_get_schema(self, db_name, table_name, validWriteIdList): self._oprot.writeMessageBegin('get_schema', TMessageType.CALL, self._seqid) args = get_schema_args() args.db_name = db_name args.table_name = table_name + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -2433,22 +2460,24 @@ def recv_get_schema(self): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_schema failed: unknown result") - def get_schema_with_environment_context(self, db_name, table_name, environment_context): + def get_schema_with_environment_context(self, db_name, table_name, environment_context, validWriteIdList): """ Parameters: - db_name - table_name - environment_context + - validWriteIdList """ - self.send_get_schema_with_environment_context(db_name, table_name, environment_context) + self.send_get_schema_with_environment_context(db_name, table_name, environment_context, validWriteIdList) return self.recv_get_schema_with_environment_context() - def send_get_schema_with_environment_context(self, db_name, table_name, environment_context): + def send_get_schema_with_environment_context(self, db_name, table_name, environment_context, validWriteIdList): self._oprot.writeMessageBegin('get_schema_with_environment_context', TMessageType.CALL, self._seqid) args = get_schema_with_environment_context_args() args.db_name = db_name args.table_name = table_name args.environment_context = environment_context + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -3177,20 +3206,22 @@ def recv_get_all_tables(self): raise result.o1 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_tables failed: unknown result") - def get_table(self, dbname, tbl_name): + def get_table(self, dbname, tbl_name, validWriteIdList): """ Parameters: - dbname - tbl_name + - validWriteIdList """ - self.send_get_table(dbname, tbl_name) + self.send_get_table(dbname, tbl_name, validWriteIdList) return self.recv_get_table() - def send_get_table(self, dbname, tbl_name): + def send_get_table(self, dbname, tbl_name, validWriteIdList): self._oprot.writeMessageBegin('get_table', TMessageType.CALL, self._seqid) args = get_table_args() args.dbname = dbname args.tbl_name = tbl_name + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4181,22 +4212,24 @@ def recv_drop_partitions_req(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_partitions_req failed: unknown result") - def get_partition(self, db_name, tbl_name, part_vals): + def get_partition(self, db_name, tbl_name, part_vals, validTxnList): """ Parameters: - db_name - tbl_name - part_vals + - validTxnList """ - self.send_get_partition(db_name, tbl_name, part_vals) + self.send_get_partition(db_name, tbl_name, part_vals, validTxnList) return self.recv_get_partition() - def send_get_partition(self, db_name, tbl_name, part_vals): + def send_get_partition(self, db_name, tbl_name, part_vals, validTxnList): self._oprot.writeMessageBegin('get_partition', TMessageType.CALL, self._seqid) args = get_partition_args() args.db_name = db_name args.tbl_name = tbl_name args.part_vals = part_vals + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4314,7 +4347,7 @@ def recv_exchange_partitions(self): raise result.o4 raise TApplicationException(TApplicationException.MISSING_RESULT, "exchange_partitions failed: unknown result") - def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names): + def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names, validTxnList): """ Parameters: - db_name @@ -4322,11 +4355,12 @@ def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group - part_vals - user_name - group_names + - validTxnList """ - self.send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names) + self.send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names, validTxnList) return self.recv_get_partition_with_auth() - def send_get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names): + def send_get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names, validTxnList): self._oprot.writeMessageBegin('get_partition_with_auth', TMessageType.CALL, self._seqid) args = get_partition_with_auth_args() args.db_name = db_name @@ -4334,6 +4368,7 @@ def send_get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, args.part_vals = part_vals args.user_name = user_name args.group_names = group_names + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4357,22 +4392,24 @@ def recv_get_partition_with_auth(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_with_auth failed: unknown result") - def get_partition_by_name(self, db_name, tbl_name, part_name): + def get_partition_by_name(self, db_name, tbl_name, part_name, validTxnList): """ Parameters: - db_name - tbl_name - part_name + - validTxnList """ - self.send_get_partition_by_name(db_name, tbl_name, part_name) + self.send_get_partition_by_name(db_name, tbl_name, part_name, validTxnList) return self.recv_get_partition_by_name() - def send_get_partition_by_name(self, db_name, tbl_name, part_name): + def send_get_partition_by_name(self, db_name, tbl_name, part_name, validTxnList): self._oprot.writeMessageBegin('get_partition_by_name', TMessageType.CALL, self._seqid) args = get_partition_by_name_args() args.db_name = db_name args.tbl_name = tbl_name args.part_name = part_name + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4396,22 +4433,24 @@ def recv_get_partition_by_name(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_by_name failed: unknown result") - def get_partitions(self, db_name, tbl_name, max_parts): + def get_partitions(self, db_name, tbl_name, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - max_parts + - validTxnList """ - self.send_get_partitions(db_name, tbl_name, max_parts) + self.send_get_partitions(db_name, tbl_name, max_parts, validTxnList) return self.recv_get_partitions() - def send_get_partitions(self, db_name, tbl_name, max_parts): + def send_get_partitions(self, db_name, tbl_name, max_parts, validTxnList): self._oprot.writeMessageBegin('get_partitions', TMessageType.CALL, self._seqid) args = get_partitions_args() args.db_name = db_name args.tbl_name = tbl_name args.max_parts = max_parts + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4435,7 +4474,7 @@ def recv_get_partitions(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions failed: unknown result") - def get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, group_names): + def get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, group_names, validTxnList): """ Parameters: - db_name @@ -4443,11 +4482,12 @@ def get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, grou - max_parts - user_name - group_names + - validTxnList """ - self.send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names) + self.send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names, validTxnList) return self.recv_get_partitions_with_auth() - def send_get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, group_names): + def send_get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, group_names, validTxnList): self._oprot.writeMessageBegin('get_partitions_with_auth', TMessageType.CALL, self._seqid) args = get_partitions_with_auth_args() args.db_name = db_name @@ -4455,6 +4495,7 @@ def send_get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, args.max_parts = max_parts args.user_name = user_name args.group_names = group_names + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4478,22 +4519,24 @@ def recv_get_partitions_with_auth(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_with_auth failed: unknown result") - def get_partitions_pspec(self, db_name, tbl_name, max_parts): + def get_partitions_pspec(self, db_name, tbl_name, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - max_parts + - validTxnList """ - self.send_get_partitions_pspec(db_name, tbl_name, max_parts) + self.send_get_partitions_pspec(db_name, tbl_name, max_parts, validTxnList) return self.recv_get_partitions_pspec() - def send_get_partitions_pspec(self, db_name, tbl_name, max_parts): + def send_get_partitions_pspec(self, db_name, tbl_name, max_parts, validTxnList): self._oprot.writeMessageBegin('get_partitions_pspec', TMessageType.CALL, self._seqid) args = get_partitions_pspec_args() args.db_name = db_name args.tbl_name = tbl_name args.max_parts = max_parts + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4517,22 +4560,24 @@ def recv_get_partitions_pspec(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_pspec failed: unknown result") - def get_partition_names(self, db_name, tbl_name, max_parts): + def get_partition_names(self, db_name, tbl_name, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - max_parts + - validTxnList """ - self.send_get_partition_names(db_name, tbl_name, max_parts) + self.send_get_partition_names(db_name, tbl_name, max_parts, validTxnList) return self.recv_get_partition_names() - def send_get_partition_names(self, db_name, tbl_name, max_parts): + def send_get_partition_names(self, db_name, tbl_name, max_parts, validTxnList): self._oprot.writeMessageBegin('get_partition_names', TMessageType.CALL, self._seqid) args = get_partition_names_args() args.db_name = db_name args.tbl_name = tbl_name args.max_parts = max_parts + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4591,24 +4636,26 @@ def recv_get_partition_values(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_values failed: unknown result") - def get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts): + def get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - part_vals - max_parts + - validTxnList """ - self.send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts) + self.send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts, validTxnList) return self.recv_get_partitions_ps() - def send_get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts): + def send_get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts, validTxnList): self._oprot.writeMessageBegin('get_partitions_ps', TMessageType.CALL, self._seqid) args = get_partitions_ps_args() args.db_name = db_name args.tbl_name = tbl_name args.part_vals = part_vals args.max_parts = max_parts + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4632,7 +4679,7 @@ def recv_get_partitions_ps(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_ps failed: unknown result") - def get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, user_name, group_names): + def get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, user_name, group_names, validTxnList): """ Parameters: - db_name @@ -4641,11 +4688,12 @@ def get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, u - max_parts - user_name - group_names + - validTxnList """ - self.send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names) + self.send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names, validTxnList) return self.recv_get_partitions_ps_with_auth() - def send_get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, user_name, group_names): + def send_get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, user_name, group_names, validTxnList): self._oprot.writeMessageBegin('get_partitions_ps_with_auth', TMessageType.CALL, self._seqid) args = get_partitions_ps_with_auth_args() args.db_name = db_name @@ -4654,6 +4702,7 @@ def send_get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_par args.max_parts = max_parts args.user_name = user_name args.group_names = group_names + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4677,24 +4726,26 @@ def recv_get_partitions_ps_with_auth(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_ps_with_auth failed: unknown result") - def get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts): + def get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - part_vals - max_parts + - validTxnList """ - self.send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts) + self.send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts, validTxnList) return self.recv_get_partition_names_ps() - def send_get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts): + def send_get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts, validTxnList): self._oprot.writeMessageBegin('get_partition_names_ps', TMessageType.CALL, self._seqid) args = get_partition_names_ps_args() args.db_name = db_name args.tbl_name = tbl_name args.part_vals = part_vals args.max_parts = max_parts + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4718,24 +4769,26 @@ def recv_get_partition_names_ps(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_names_ps failed: unknown result") - def get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts): + def get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - filter - max_parts + - validTxnList """ - self.send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts) + self.send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts, validTxnList) return self.recv_get_partitions_by_filter() - def send_get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts): + def send_get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts, validTxnList): self._oprot.writeMessageBegin('get_partitions_by_filter', TMessageType.CALL, self._seqid) args = get_partitions_by_filter_args() args.db_name = db_name args.tbl_name = tbl_name args.filter = filter args.max_parts = max_parts + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4759,24 +4812,26 @@ def recv_get_partitions_by_filter(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_filter failed: unknown result") - def get_part_specs_by_filter(self, db_name, tbl_name, filter, max_parts): + def get_part_specs_by_filter(self, db_name, tbl_name, filter, max_parts, validTxnList): """ Parameters: - db_name - tbl_name - filter - max_parts + - validTxnList """ - self.send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts) + self.send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts, validTxnList) return self.recv_get_part_specs_by_filter() - def send_get_part_specs_by_filter(self, db_name, tbl_name, filter, max_parts): + def send_get_part_specs_by_filter(self, db_name, tbl_name, filter, max_parts, validTxnList): self._oprot.writeMessageBegin('get_part_specs_by_filter', TMessageType.CALL, self._seqid) args = get_part_specs_by_filter_args() args.db_name = db_name args.tbl_name = tbl_name args.filter = filter args.max_parts = max_parts + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4835,22 +4890,24 @@ def recv_get_partitions_by_expr(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_expr failed: unknown result") - def get_num_partitions_by_filter(self, db_name, tbl_name, filter): + def get_num_partitions_by_filter(self, db_name, tbl_name, filter, validTxnList): """ Parameters: - db_name - tbl_name - filter + - validTxnList """ - self.send_get_num_partitions_by_filter(db_name, tbl_name, filter) + self.send_get_num_partitions_by_filter(db_name, tbl_name, filter, validTxnList) return self.recv_get_num_partitions_by_filter() - def send_get_num_partitions_by_filter(self, db_name, tbl_name, filter): + def send_get_num_partitions_by_filter(self, db_name, tbl_name, filter, validTxnList): self._oprot.writeMessageBegin('get_num_partitions_by_filter', TMessageType.CALL, self._seqid) args = get_num_partitions_by_filter_args() args.db_name = db_name args.tbl_name = tbl_name args.filter = filter + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4874,22 +4931,24 @@ def recv_get_num_partitions_by_filter(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_num_partitions_by_filter failed: unknown result") - def get_partitions_by_names(self, db_name, tbl_name, names): + def get_partitions_by_names(self, db_name, tbl_name, names, validTxnList): """ Parameters: - db_name - tbl_name - names + - validTxnList """ - self.send_get_partitions_by_names(db_name, tbl_name, names) + self.send_get_partitions_by_names(db_name, tbl_name, names, validTxnList) return self.recv_get_partitions_by_names() - def send_get_partitions_by_names(self, db_name, tbl_name, names): + def send_get_partitions_by_names(self, db_name, tbl_name, names, validTxnList): self._oprot.writeMessageBegin('get_partitions_by_names', TMessageType.CALL, self._seqid) args = get_partitions_by_names_args() args.db_name = db_name args.tbl_name = tbl_name args.names = names + args.validTxnList = validTxnList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -5807,22 +5866,24 @@ def recv_update_partition_column_statistics_req(self): raise result.o4 raise TApplicationException(TApplicationException.MISSING_RESULT, "update_partition_column_statistics_req failed: unknown result") - def get_table_column_statistics(self, db_name, tbl_name, col_name): + def get_table_column_statistics(self, db_name, tbl_name, col_name, validWriteIdList): """ Parameters: - db_name - tbl_name - col_name + - validWriteIdList """ - self.send_get_table_column_statistics(db_name, tbl_name, col_name) + self.send_get_table_column_statistics(db_name, tbl_name, col_name, validWriteIdList) return self.recv_get_table_column_statistics() - def send_get_table_column_statistics(self, db_name, tbl_name, col_name): + def send_get_table_column_statistics(self, db_name, tbl_name, col_name, validWriteIdList): self._oprot.writeMessageBegin('get_table_column_statistics', TMessageType.CALL, self._seqid) args = get_table_column_statistics_args() args.db_name = db_name args.tbl_name = tbl_name args.col_name = col_name + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -5850,24 +5911,26 @@ def recv_get_table_column_statistics(self): raise result.o4 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_column_statistics failed: unknown result") - def get_partition_column_statistics(self, db_name, tbl_name, part_name, col_name): + def get_partition_column_statistics(self, db_name, tbl_name, part_name, col_name, validWriteIdList): """ Parameters: - db_name - tbl_name - part_name - col_name + - validWriteIdList """ - self.send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name) + self.send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name, validWriteIdList) return self.recv_get_partition_column_statistics() - def send_get_partition_column_statistics(self, db_name, tbl_name, part_name, col_name): + def send_get_partition_column_statistics(self, db_name, tbl_name, part_name, col_name, validWriteIdList): self._oprot.writeMessageBegin('get_partition_column_statistics', TMessageType.CALL, self._seqid) args = get_partition_column_statistics_args() args.db_name = db_name args.tbl_name = tbl_name args.part_name = part_name args.col_name = col_name + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -10396,7 +10459,7 @@ def process_get_fields(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_fields_result() try: - result.success = self._handler.get_fields(args.db_name, args.table_name) + result.success = self._handler.get_fields(args.db_name, args.table_name, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -10424,7 +10487,7 @@ def process_get_fields_with_environment_context(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_fields_with_environment_context_result() try: - result.success = self._handler.get_fields_with_environment_context(args.db_name, args.table_name, args.environment_context) + result.success = self._handler.get_fields_with_environment_context(args.db_name, args.table_name, args.environment_context, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -10452,7 +10515,7 @@ def process_get_schema(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_schema_result() try: - result.success = self._handler.get_schema(args.db_name, args.table_name) + result.success = self._handler.get_schema(args.db_name, args.table_name, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -10480,7 +10543,7 @@ def process_get_schema_with_environment_context(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_schema_with_environment_context_result() try: - result.success = self._handler.get_schema_with_environment_context(args.db_name, args.table_name, args.environment_context) + result.success = self._handler.get_schema_with_environment_context(args.db_name, args.table_name, args.environment_context, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11002,7 +11065,7 @@ def process_get_table(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_table_result() try: - result.success = self._handler.get_table(args.dbname, args.tbl_name) + result.success = self._handler.get_table(args.dbname, args.tbl_name, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11682,7 +11745,7 @@ def process_get_partition(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partition_result() try: - result.success = self._handler.get_partition(args.db_name, args.tbl_name, args.part_vals) + result.success = self._handler.get_partition(args.db_name, args.tbl_name, args.part_vals, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11769,7 +11832,7 @@ def process_get_partition_with_auth(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partition_with_auth_result() try: - result.success = self._handler.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names) + result.success = self._handler.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11794,7 +11857,7 @@ def process_get_partition_by_name(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partition_by_name_result() try: - result.success = self._handler.get_partition_by_name(args.db_name, args.tbl_name, args.part_name) + result.success = self._handler.get_partition_by_name(args.db_name, args.tbl_name, args.part_name, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11819,7 +11882,7 @@ def process_get_partitions(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partitions_result() try: - result.success = self._handler.get_partitions(args.db_name, args.tbl_name, args.max_parts) + result.success = self._handler.get_partitions(args.db_name, args.tbl_name, args.max_parts, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11844,7 +11907,7 @@ def process_get_partitions_with_auth(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partitions_with_auth_result() try: - result.success = self._handler.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names) + result.success = self._handler.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11869,7 +11932,7 @@ def process_get_partitions_pspec(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partitions_pspec_result() try: - result.success = self._handler.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts) + result.success = self._handler.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11894,7 +11957,7 @@ def process_get_partition_names(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partition_names_result() try: - result.success = self._handler.get_partition_names(args.db_name, args.tbl_name, args.max_parts) + result.success = self._handler.get_partition_names(args.db_name, args.tbl_name, args.max_parts, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11944,7 +12007,7 @@ def process_get_partitions_ps(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partitions_ps_result() try: - result.success = self._handler.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts) + result.success = self._handler.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11969,7 +12032,7 @@ def process_get_partitions_ps_with_auth(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partitions_ps_with_auth_result() try: - result.success = self._handler.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names) + result.success = self._handler.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11994,7 +12057,7 @@ def process_get_partition_names_ps(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partition_names_ps_result() try: - result.success = self._handler.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts) + result.success = self._handler.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -12019,7 +12082,7 @@ def process_get_partitions_by_filter(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partitions_by_filter_result() try: - result.success = self._handler.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts) + result.success = self._handler.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -12044,7 +12107,7 @@ def process_get_part_specs_by_filter(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_part_specs_by_filter_result() try: - result.success = self._handler.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts) + result.success = self._handler.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -12094,7 +12157,7 @@ def process_get_num_partitions_by_filter(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_num_partitions_by_filter_result() try: - result.success = self._handler.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter) + result.success = self._handler.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -12119,7 +12182,7 @@ def process_get_partitions_by_names(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partitions_by_names_result() try: - result.success = self._handler.get_partitions_by_names(args.db_name, args.tbl_name, args.names) + result.success = self._handler.get_partitions_by_names(args.db_name, args.tbl_name, args.names, args.validTxnList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -12780,7 +12843,7 @@ def process_get_table_column_statistics(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_table_column_statistics_result() try: - result.success = self._handler.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name) + result.success = self._handler.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -12811,7 +12874,7 @@ def process_get_partition_column_statistics(self, seqid, iprot, oprot): iprot.readMessageEnd() result = get_partition_column_statistics_result() try: - result.success = self._handler.get_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name) + result.success = self._handler.get_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -17138,10 +17201,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype952, _size949) = iprot.readListBegin() - for _i953 in xrange(_size949): - _elem954 = iprot.readString() - self.success.append(_elem954) + (_etype959, _size956) = iprot.readListBegin() + for _i960 in xrange(_size956): + _elem961 = iprot.readString() + self.success.append(_elem961) iprot.readListEnd() else: iprot.skip(ftype) @@ -17164,8 +17227,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter955 in self.success: - oprot.writeString(iter955) + for iter962 in self.success: + oprot.writeString(iter962) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17270,10 +17333,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype959, _size956) = iprot.readListBegin() - for _i960 in xrange(_size956): - _elem961 = iprot.readString() - self.success.append(_elem961) + (_etype966, _size963) = iprot.readListBegin() + for _i967 in xrange(_size963): + _elem968 = iprot.readString() + self.success.append(_elem968) iprot.readListEnd() else: iprot.skip(ftype) @@ -17296,8 +17359,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter962 in self.success: - oprot.writeString(iter962) + for iter969 in self.success: + oprot.writeString(iter969) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18067,12 +18130,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype964, _vtype965, _size963 ) = iprot.readMapBegin() - for _i967 in xrange(_size963): - _key968 = iprot.readString() - _val969 = Type() - _val969.read(iprot) - self.success[_key968] = _val969 + (_ktype971, _vtype972, _size970 ) = iprot.readMapBegin() + for _i974 in xrange(_size970): + _key975 = iprot.readString() + _val976 = Type() + _val976.read(iprot) + self.success[_key975] = _val976 iprot.readMapEnd() else: iprot.skip(ftype) @@ -18095,9 +18158,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter970,viter971 in self.success.items(): - oprot.writeString(kiter970) - viter971.write(oprot) + for kiter977,viter978 in self.success.items(): + oprot.writeString(kiter977) + viter978.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -18133,17 +18196,20 @@ class get_fields_args: Attributes: - db_name - table_name + - validWriteIdList """ thrift_spec = ( None, # 0 (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'table_name', None, None, ), # 2 + (3, TType.STRING, 'validWriteIdList', None, None, ), # 3 ) - def __init__(self, db_name=None, table_name=None,): + def __init__(self, db_name=None, table_name=None, validWriteIdList=None,): self.db_name = db_name self.table_name = table_name + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -18164,6 +18230,11 @@ def read(self, iprot): self.table_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -18182,6 +18253,10 @@ def write(self, oprot): oprot.writeFieldBegin('table_name', TType.STRING, 2) oprot.writeString(self.table_name) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 3) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -18193,6 +18268,7 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.table_name) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -18240,11 +18316,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype975, _size972) = iprot.readListBegin() - for _i976 in xrange(_size972): - _elem977 = FieldSchema() - _elem977.read(iprot) - self.success.append(_elem977) + (_etype982, _size979) = iprot.readListBegin() + for _i983 in xrange(_size979): + _elem984 = FieldSchema() + _elem984.read(iprot) + self.success.append(_elem984) iprot.readListEnd() else: iprot.skip(ftype) @@ -18279,8 +18355,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter978 in self.success: - iter978.write(oprot) + for iter985 in self.success: + iter985.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18327,6 +18403,7 @@ class get_fields_with_environment_context_args: - db_name - table_name - environment_context + - validWriteIdList """ thrift_spec = ( @@ -18334,12 +18411,14 @@ class get_fields_with_environment_context_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'table_name', None, None, ), # 2 (3, TType.STRUCT, 'environment_context', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 3 + (4, TType.STRING, 'validWriteIdList', None, None, ), # 4 ) - def __init__(self, db_name=None, table_name=None, environment_context=None,): + def __init__(self, db_name=None, table_name=None, environment_context=None, validWriteIdList=None,): self.db_name = db_name self.table_name = table_name self.environment_context = environment_context + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -18366,6 +18445,11 @@ def read(self, iprot): self.environment_context.read(iprot) else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -18388,6 +18472,10 @@ def write(self, oprot): oprot.writeFieldBegin('environment_context', TType.STRUCT, 3) self.environment_context.write(oprot) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -18400,6 +18488,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.table_name) value = (value * 31) ^ hash(self.environment_context) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -18447,11 +18536,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype982, _size979) = iprot.readListBegin() - for _i983 in xrange(_size979): - _elem984 = FieldSchema() - _elem984.read(iprot) - self.success.append(_elem984) + (_etype989, _size986) = iprot.readListBegin() + for _i990 in xrange(_size986): + _elem991 = FieldSchema() + _elem991.read(iprot) + self.success.append(_elem991) iprot.readListEnd() else: iprot.skip(ftype) @@ -18486,8 +18575,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter985 in self.success: - iter985.write(oprot) + for iter992 in self.success: + iter992.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18533,17 +18622,20 @@ class get_schema_args: Attributes: - db_name - table_name + - validWriteIdList """ thrift_spec = ( None, # 0 (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'table_name', None, None, ), # 2 + (3, TType.STRING, 'validWriteIdList', None, None, ), # 3 ) - def __init__(self, db_name=None, table_name=None,): + def __init__(self, db_name=None, table_name=None, validWriteIdList=None,): self.db_name = db_name self.table_name = table_name + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -18564,6 +18656,11 @@ def read(self, iprot): self.table_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -18582,6 +18679,10 @@ def write(self, oprot): oprot.writeFieldBegin('table_name', TType.STRING, 2) oprot.writeString(self.table_name) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 3) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -18593,6 +18694,7 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.table_name) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -18640,11 +18742,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype989, _size986) = iprot.readListBegin() - for _i990 in xrange(_size986): - _elem991 = FieldSchema() - _elem991.read(iprot) - self.success.append(_elem991) + (_etype996, _size993) = iprot.readListBegin() + for _i997 in xrange(_size993): + _elem998 = FieldSchema() + _elem998.read(iprot) + self.success.append(_elem998) iprot.readListEnd() else: iprot.skip(ftype) @@ -18679,8 +18781,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter992 in self.success: - iter992.write(oprot) + for iter999 in self.success: + iter999.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18727,6 +18829,7 @@ class get_schema_with_environment_context_args: - db_name - table_name - environment_context + - validWriteIdList """ thrift_spec = ( @@ -18734,12 +18837,14 @@ class get_schema_with_environment_context_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'table_name', None, None, ), # 2 (3, TType.STRUCT, 'environment_context', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 3 + (4, TType.STRING, 'validWriteIdList', None, None, ), # 4 ) - def __init__(self, db_name=None, table_name=None, environment_context=None,): + def __init__(self, db_name=None, table_name=None, environment_context=None, validWriteIdList=None,): self.db_name = db_name self.table_name = table_name self.environment_context = environment_context + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -18766,6 +18871,11 @@ def read(self, iprot): self.environment_context.read(iprot) else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -18788,6 +18898,10 @@ def write(self, oprot): oprot.writeFieldBegin('environment_context', TType.STRUCT, 3) self.environment_context.write(oprot) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -18800,6 +18914,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.table_name) value = (value * 31) ^ hash(self.environment_context) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -18847,11 +18962,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype996, _size993) = iprot.readListBegin() - for _i997 in xrange(_size993): - _elem998 = FieldSchema() - _elem998.read(iprot) - self.success.append(_elem998) + (_etype1003, _size1000) = iprot.readListBegin() + for _i1004 in xrange(_size1000): + _elem1005 = FieldSchema() + _elem1005.read(iprot) + self.success.append(_elem1005) iprot.readListEnd() else: iprot.skip(ftype) @@ -18886,8 +19001,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter999 in self.success: - iter999.write(oprot) + for iter1006 in self.success: + iter1006.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19340,66 +19455,66 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.primaryKeys = [] - (_etype1003, _size1000) = iprot.readListBegin() - for _i1004 in xrange(_size1000): - _elem1005 = SQLPrimaryKey() - _elem1005.read(iprot) - self.primaryKeys.append(_elem1005) + (_etype1010, _size1007) = iprot.readListBegin() + for _i1011 in xrange(_size1007): + _elem1012 = SQLPrimaryKey() + _elem1012.read(iprot) + self.primaryKeys.append(_elem1012) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.foreignKeys = [] - (_etype1009, _size1006) = iprot.readListBegin() - for _i1010 in xrange(_size1006): - _elem1011 = SQLForeignKey() - _elem1011.read(iprot) - self.foreignKeys.append(_elem1011) + (_etype1016, _size1013) = iprot.readListBegin() + for _i1017 in xrange(_size1013): + _elem1018 = SQLForeignKey() + _elem1018.read(iprot) + self.foreignKeys.append(_elem1018) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.uniqueConstraints = [] - (_etype1015, _size1012) = iprot.readListBegin() - for _i1016 in xrange(_size1012): - _elem1017 = SQLUniqueConstraint() - _elem1017.read(iprot) - self.uniqueConstraints.append(_elem1017) + (_etype1022, _size1019) = iprot.readListBegin() + for _i1023 in xrange(_size1019): + _elem1024 = SQLUniqueConstraint() + _elem1024.read(iprot) + self.uniqueConstraints.append(_elem1024) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.notNullConstraints = [] - (_etype1021, _size1018) = iprot.readListBegin() - for _i1022 in xrange(_size1018): - _elem1023 = SQLNotNullConstraint() - _elem1023.read(iprot) - self.notNullConstraints.append(_elem1023) + (_etype1028, _size1025) = iprot.readListBegin() + for _i1029 in xrange(_size1025): + _elem1030 = SQLNotNullConstraint() + _elem1030.read(iprot) + self.notNullConstraints.append(_elem1030) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.LIST: self.defaultConstraints = [] - (_etype1027, _size1024) = iprot.readListBegin() - for _i1028 in xrange(_size1024): - _elem1029 = SQLDefaultConstraint() - _elem1029.read(iprot) - self.defaultConstraints.append(_elem1029) + (_etype1034, _size1031) = iprot.readListBegin() + for _i1035 in xrange(_size1031): + _elem1036 = SQLDefaultConstraint() + _elem1036.read(iprot) + self.defaultConstraints.append(_elem1036) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.LIST: self.checkConstraints = [] - (_etype1033, _size1030) = iprot.readListBegin() - for _i1034 in xrange(_size1030): - _elem1035 = SQLCheckConstraint() - _elem1035.read(iprot) - self.checkConstraints.append(_elem1035) + (_etype1040, _size1037) = iprot.readListBegin() + for _i1041 in xrange(_size1037): + _elem1042 = SQLCheckConstraint() + _elem1042.read(iprot) + self.checkConstraints.append(_elem1042) iprot.readListEnd() else: iprot.skip(ftype) @@ -19420,43 +19535,43 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter1036 in self.primaryKeys: - iter1036.write(oprot) + for iter1043 in self.primaryKeys: + iter1043.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter1037 in self.foreignKeys: - iter1037.write(oprot) + for iter1044 in self.foreignKeys: + iter1044.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.uniqueConstraints is not None: oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) - for iter1038 in self.uniqueConstraints: - iter1038.write(oprot) + for iter1045 in self.uniqueConstraints: + iter1045.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.notNullConstraints is not None: oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) - for iter1039 in self.notNullConstraints: - iter1039.write(oprot) + for iter1046 in self.notNullConstraints: + iter1046.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.defaultConstraints is not None: oprot.writeFieldBegin('defaultConstraints', TType.LIST, 6) oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) - for iter1040 in self.defaultConstraints: - iter1040.write(oprot) + for iter1047 in self.defaultConstraints: + iter1047.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.checkConstraints is not None: oprot.writeFieldBegin('checkConstraints', TType.LIST, 7) oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints)) - for iter1041 in self.checkConstraints: - iter1041.write(oprot) + for iter1048 in self.checkConstraints: + iter1048.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21016,10 +21131,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.partNames = [] - (_etype1045, _size1042) = iprot.readListBegin() - for _i1046 in xrange(_size1042): - _elem1047 = iprot.readString() - self.partNames.append(_elem1047) + (_etype1052, _size1049) = iprot.readListBegin() + for _i1053 in xrange(_size1049): + _elem1054 = iprot.readString() + self.partNames.append(_elem1054) iprot.readListEnd() else: iprot.skip(ftype) @@ -21044,8 +21159,8 @@ def write(self, oprot): if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter1048 in self.partNames: - oprot.writeString(iter1048) + for iter1055 in self.partNames: + oprot.writeString(iter1055) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21390,10 +21505,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1052, _size1049) = iprot.readListBegin() - for _i1053 in xrange(_size1049): - _elem1054 = iprot.readString() - self.success.append(_elem1054) + (_etype1059, _size1056) = iprot.readListBegin() + for _i1060 in xrange(_size1056): + _elem1061 = iprot.readString() + self.success.append(_elem1061) iprot.readListEnd() else: iprot.skip(ftype) @@ -21416,8 +21531,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1055 in self.success: - oprot.writeString(iter1055) + for iter1062 in self.success: + oprot.writeString(iter1062) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21567,10 +21682,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1059, _size1056) = iprot.readListBegin() - for _i1060 in xrange(_size1056): - _elem1061 = iprot.readString() - self.success.append(_elem1061) + (_etype1066, _size1063) = iprot.readListBegin() + for _i1067 in xrange(_size1063): + _elem1068 = iprot.readString() + self.success.append(_elem1068) iprot.readListEnd() else: iprot.skip(ftype) @@ -21593,8 +21708,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1062 in self.success: - oprot.writeString(iter1062) + for iter1069 in self.success: + oprot.writeString(iter1069) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21699,11 +21814,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1066, _size1063) = iprot.readListBegin() - for _i1067 in xrange(_size1063): - _elem1068 = Table() - _elem1068.read(iprot) - self.success.append(_elem1068) + (_etype1073, _size1070) = iprot.readListBegin() + for _i1074 in xrange(_size1070): + _elem1075 = Table() + _elem1075.read(iprot) + self.success.append(_elem1075) iprot.readListEnd() else: iprot.skip(ftype) @@ -21726,8 +21841,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1069 in self.success: - iter1069.write(oprot) + for iter1076 in self.success: + iter1076.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21851,10 +21966,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1073, _size1070) = iprot.readListBegin() - for _i1074 in xrange(_size1070): - _elem1075 = iprot.readString() - self.success.append(_elem1075) + (_etype1080, _size1077) = iprot.readListBegin() + for _i1081 in xrange(_size1077): + _elem1082 = iprot.readString() + self.success.append(_elem1082) iprot.readListEnd() else: iprot.skip(ftype) @@ -21877,8 +21992,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1076 in self.success: - oprot.writeString(iter1076) + for iter1083 in self.success: + oprot.writeString(iter1083) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21951,10 +22066,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.tbl_types = [] - (_etype1080, _size1077) = iprot.readListBegin() - for _i1081 in xrange(_size1077): - _elem1082 = iprot.readString() - self.tbl_types.append(_elem1082) + (_etype1087, _size1084) = iprot.readListBegin() + for _i1088 in xrange(_size1084): + _elem1089 = iprot.readString() + self.tbl_types.append(_elem1089) iprot.readListEnd() else: iprot.skip(ftype) @@ -21979,8 +22094,8 @@ def write(self, oprot): if self.tbl_types is not None: oprot.writeFieldBegin('tbl_types', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.tbl_types)) - for iter1083 in self.tbl_types: - oprot.writeString(iter1083) + for iter1090 in self.tbl_types: + oprot.writeString(iter1090) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22036,11 +22151,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1087, _size1084) = iprot.readListBegin() - for _i1088 in xrange(_size1084): - _elem1089 = TableMeta() - _elem1089.read(iprot) - self.success.append(_elem1089) + (_etype1094, _size1091) = iprot.readListBegin() + for _i1095 in xrange(_size1091): + _elem1096 = TableMeta() + _elem1096.read(iprot) + self.success.append(_elem1096) iprot.readListEnd() else: iprot.skip(ftype) @@ -22063,8 +22178,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1090 in self.success: - iter1090.write(oprot) + for iter1097 in self.success: + iter1097.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22188,10 +22303,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1094, _size1091) = iprot.readListBegin() - for _i1095 in xrange(_size1091): - _elem1096 = iprot.readString() - self.success.append(_elem1096) + (_etype1101, _size1098) = iprot.readListBegin() + for _i1102 in xrange(_size1098): + _elem1103 = iprot.readString() + self.success.append(_elem1103) iprot.readListEnd() else: iprot.skip(ftype) @@ -22214,8 +22329,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1097 in self.success: - oprot.writeString(iter1097) + for iter1104 in self.success: + oprot.writeString(iter1104) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22251,17 +22366,20 @@ class get_table_args: Attributes: - dbname - tbl_name + - validWriteIdList """ thrift_spec = ( None, # 0 (1, TType.STRING, 'dbname', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 + (3, TType.STRING, 'validWriteIdList', None, None, ), # 3 ) - def __init__(self, dbname=None, tbl_name=None,): + def __init__(self, dbname=None, tbl_name=None, validWriteIdList=None,): self.dbname = dbname self.tbl_name = tbl_name + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -22282,6 +22400,11 @@ def read(self, iprot): self.tbl_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -22300,6 +22423,10 @@ def write(self, oprot): oprot.writeFieldBegin('tbl_name', TType.STRING, 2) oprot.writeString(self.tbl_name) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 3) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -22311,6 +22438,7 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.dbname) value = (value * 31) ^ hash(self.tbl_name) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -22451,10 +22579,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype1101, _size1098) = iprot.readListBegin() - for _i1102 in xrange(_size1098): - _elem1103 = iprot.readString() - self.tbl_names.append(_elem1103) + (_etype1108, _size1105) = iprot.readListBegin() + for _i1109 in xrange(_size1105): + _elem1110 = iprot.readString() + self.tbl_names.append(_elem1110) iprot.readListEnd() else: iprot.skip(ftype) @@ -22475,8 +22603,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter1104 in self.tbl_names: - oprot.writeString(iter1104) + for iter1111 in self.tbl_names: + oprot.writeString(iter1111) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22528,11 +22656,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1108, _size1105) = iprot.readListBegin() - for _i1109 in xrange(_size1105): - _elem1110 = Table() - _elem1110.read(iprot) - self.success.append(_elem1110) + (_etype1115, _size1112) = iprot.readListBegin() + for _i1116 in xrange(_size1112): + _elem1117 = Table() + _elem1117.read(iprot) + self.success.append(_elem1117) iprot.readListEnd() else: iprot.skip(ftype) @@ -22549,8 +22677,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1111 in self.success: - iter1111.write(oprot) + for iter1118 in self.success: + iter1118.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22670,11 +22798,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1115, _size1112) = iprot.readListBegin() - for _i1116 in xrange(_size1112): - _elem1117 = ExtendedTableInfo() - _elem1117.read(iprot) - self.success.append(_elem1117) + (_etype1122, _size1119) = iprot.readListBegin() + for _i1123 in xrange(_size1119): + _elem1124 = ExtendedTableInfo() + _elem1124.read(iprot) + self.success.append(_elem1124) iprot.readListEnd() else: iprot.skip(ftype) @@ -22697,8 +22825,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1118 in self.success: - iter1118.write(oprot) + for iter1125 in self.success: + iter1125.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23571,10 +23699,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1122, _size1119) = iprot.readListBegin() - for _i1123 in xrange(_size1119): - _elem1124 = iprot.readString() - self.success.append(_elem1124) + (_etype1129, _size1126) = iprot.readListBegin() + for _i1130 in xrange(_size1126): + _elem1131 = iprot.readString() + self.success.append(_elem1131) iprot.readListEnd() else: iprot.skip(ftype) @@ -23609,8 +23737,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1125 in self.success: - oprot.writeString(iter1125) + for iter1132 in self.success: + oprot.writeString(iter1132) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24739,11 +24867,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype1129, _size1126) = iprot.readListBegin() - for _i1130 in xrange(_size1126): - _elem1131 = Partition() - _elem1131.read(iprot) - self.new_parts.append(_elem1131) + (_etype1136, _size1133) = iprot.readListBegin() + for _i1137 in xrange(_size1133): + _elem1138 = Partition() + _elem1138.read(iprot) + self.new_parts.append(_elem1138) iprot.readListEnd() else: iprot.skip(ftype) @@ -24760,8 +24888,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1132 in self.new_parts: - iter1132.write(oprot) + for iter1139 in self.new_parts: + iter1139.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -24919,11 +25047,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype1136, _size1133) = iprot.readListBegin() - for _i1137 in xrange(_size1133): - _elem1138 = PartitionSpec() - _elem1138.read(iprot) - self.new_parts.append(_elem1138) + (_etype1143, _size1140) = iprot.readListBegin() + for _i1144 in xrange(_size1140): + _elem1145 = PartitionSpec() + _elem1145.read(iprot) + self.new_parts.append(_elem1145) iprot.readListEnd() else: iprot.skip(ftype) @@ -24940,8 +25068,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1139 in self.new_parts: - iter1139.write(oprot) + for iter1146 in self.new_parts: + iter1146.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -25115,10 +25243,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1143, _size1140) = iprot.readListBegin() - for _i1144 in xrange(_size1140): - _elem1145 = iprot.readString() - self.part_vals.append(_elem1145) + (_etype1150, _size1147) = iprot.readListBegin() + for _i1151 in xrange(_size1147): + _elem1152 = iprot.readString() + self.part_vals.append(_elem1152) iprot.readListEnd() else: iprot.skip(ftype) @@ -25143,8 +25271,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1146 in self.part_vals: - oprot.writeString(iter1146) + for iter1153 in self.part_vals: + oprot.writeString(iter1153) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -25497,10 +25625,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1150, _size1147) = iprot.readListBegin() - for _i1151 in xrange(_size1147): - _elem1152 = iprot.readString() - self.part_vals.append(_elem1152) + (_etype1157, _size1154) = iprot.readListBegin() + for _i1158 in xrange(_size1154): + _elem1159 = iprot.readString() + self.part_vals.append(_elem1159) iprot.readListEnd() else: iprot.skip(ftype) @@ -25531,8 +25659,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1153 in self.part_vals: - oprot.writeString(iter1153) + for iter1160 in self.part_vals: + oprot.writeString(iter1160) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -26127,10 +26255,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1157, _size1154) = iprot.readListBegin() - for _i1158 in xrange(_size1154): - _elem1159 = iprot.readString() - self.part_vals.append(_elem1159) + (_etype1164, _size1161) = iprot.readListBegin() + for _i1165 in xrange(_size1161): + _elem1166 = iprot.readString() + self.part_vals.append(_elem1166) iprot.readListEnd() else: iprot.skip(ftype) @@ -26160,8 +26288,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1160 in self.part_vals: - oprot.writeString(iter1160) + for iter1167 in self.part_vals: + oprot.writeString(iter1167) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -26334,10 +26462,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1164, _size1161) = iprot.readListBegin() - for _i1165 in xrange(_size1161): - _elem1166 = iprot.readString() - self.part_vals.append(_elem1166) + (_etype1171, _size1168) = iprot.readListBegin() + for _i1172 in xrange(_size1168): + _elem1173 = iprot.readString() + self.part_vals.append(_elem1173) iprot.readListEnd() else: iprot.skip(ftype) @@ -26373,8 +26501,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1167 in self.part_vals: - oprot.writeString(iter1167) + for iter1174 in self.part_vals: + oprot.writeString(iter1174) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -27075,6 +27203,7 @@ class get_partition_args: - db_name - tbl_name - part_vals + - validTxnList """ thrift_spec = ( @@ -27082,12 +27211,14 @@ class get_partition_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.LIST, 'part_vals', (TType.STRING,None), None, ), # 3 + (4, TType.STRING, 'validTxnList', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, part_vals=None,): + def __init__(self, db_name=None, tbl_name=None, part_vals=None, validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_vals = part_vals + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -27111,13 +27242,18 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1171, _size1168) = iprot.readListBegin() - for _i1172 in xrange(_size1168): - _elem1173 = iprot.readString() - self.part_vals.append(_elem1173) + (_etype1178, _size1175) = iprot.readListBegin() + for _i1179 in xrange(_size1175): + _elem1180 = iprot.readString() + self.part_vals.append(_elem1180) iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -27139,10 +27275,14 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1174 in self.part_vals: - oprot.writeString(iter1174) + for iter1181 in self.part_vals: + oprot.writeString(iter1181) oprot.writeListEnd() oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 4) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -27155,6 +27295,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.part_vals) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -27299,11 +27440,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1176, _vtype1177, _size1175 ) = iprot.readMapBegin() - for _i1179 in xrange(_size1175): - _key1180 = iprot.readString() - _val1181 = iprot.readString() - self.partitionSpecs[_key1180] = _val1181 + (_ktype1183, _vtype1184, _size1182 ) = iprot.readMapBegin() + for _i1186 in xrange(_size1182): + _key1187 = iprot.readString() + _val1188 = iprot.readString() + self.partitionSpecs[_key1187] = _val1188 iprot.readMapEnd() else: iprot.skip(ftype) @@ -27340,9 +27481,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1182,viter1183 in self.partitionSpecs.items(): - oprot.writeString(kiter1182) - oprot.writeString(viter1183) + for kiter1189,viter1190 in self.partitionSpecs.items(): + oprot.writeString(kiter1189) + oprot.writeString(viter1190) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -27547,11 +27688,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1185, _vtype1186, _size1184 ) = iprot.readMapBegin() - for _i1188 in xrange(_size1184): - _key1189 = iprot.readString() - _val1190 = iprot.readString() - self.partitionSpecs[_key1189] = _val1190 + (_ktype1192, _vtype1193, _size1191 ) = iprot.readMapBegin() + for _i1195 in xrange(_size1191): + _key1196 = iprot.readString() + _val1197 = iprot.readString() + self.partitionSpecs[_key1196] = _val1197 iprot.readMapEnd() else: iprot.skip(ftype) @@ -27588,9 +27729,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1191,viter1192 in self.partitionSpecs.items(): - oprot.writeString(kiter1191) - oprot.writeString(viter1192) + for kiter1198,viter1199 in self.partitionSpecs.items(): + oprot.writeString(kiter1198) + oprot.writeString(viter1199) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -27673,11 +27814,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1196, _size1193) = iprot.readListBegin() - for _i1197 in xrange(_size1193): - _elem1198 = Partition() - _elem1198.read(iprot) - self.success.append(_elem1198) + (_etype1203, _size1200) = iprot.readListBegin() + for _i1204 in xrange(_size1200): + _elem1205 = Partition() + _elem1205.read(iprot) + self.success.append(_elem1205) iprot.readListEnd() else: iprot.skip(ftype) @@ -27718,8 +27859,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1199 in self.success: - iter1199.write(oprot) + for iter1206 in self.success: + iter1206.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27773,6 +27914,7 @@ class get_partition_with_auth_args: - part_vals - user_name - group_names + - validTxnList """ thrift_spec = ( @@ -27782,14 +27924,16 @@ class get_partition_with_auth_args: (3, TType.LIST, 'part_vals', (TType.STRING,None), None, ), # 3 (4, TType.STRING, 'user_name', None, None, ), # 4 (5, TType.LIST, 'group_names', (TType.STRING,None), None, ), # 5 + (6, TType.STRING, 'validTxnList', None, None, ), # 6 ) - def __init__(self, db_name=None, tbl_name=None, part_vals=None, user_name=None, group_names=None,): + def __init__(self, db_name=None, tbl_name=None, part_vals=None, user_name=None, group_names=None, validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_vals = part_vals self.user_name = user_name self.group_names = group_names + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -27813,10 +27957,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1203, _size1200) = iprot.readListBegin() - for _i1204 in xrange(_size1200): - _elem1205 = iprot.readString() - self.part_vals.append(_elem1205) + (_etype1210, _size1207) = iprot.readListBegin() + for _i1211 in xrange(_size1207): + _elem1212 = iprot.readString() + self.part_vals.append(_elem1212) iprot.readListEnd() else: iprot.skip(ftype) @@ -27828,13 +27972,18 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1209, _size1206) = iprot.readListBegin() - for _i1210 in xrange(_size1206): - _elem1211 = iprot.readString() - self.group_names.append(_elem1211) + (_etype1216, _size1213) = iprot.readListBegin() + for _i1217 in xrange(_size1213): + _elem1218 = iprot.readString() + self.group_names.append(_elem1218) iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -27856,8 +28005,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1212 in self.part_vals: - oprot.writeString(iter1212) + for iter1219 in self.part_vals: + oprot.writeString(iter1219) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -27867,10 +28016,14 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1213 in self.group_names: - oprot.writeString(iter1213) + for iter1220 in self.group_names: + oprot.writeString(iter1220) oprot.writeListEnd() oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 6) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -27885,6 +28038,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.part_vals) value = (value * 31) ^ hash(self.user_name) value = (value * 31) ^ hash(self.group_names) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -27997,6 +28151,7 @@ class get_partition_by_name_args: - db_name - tbl_name - part_name + - validTxnList """ thrift_spec = ( @@ -28004,12 +28159,14 @@ class get_partition_by_name_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRING, 'part_name', None, None, ), # 3 + (4, TType.STRING, 'validTxnList', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, part_name=None,): + def __init__(self, db_name=None, tbl_name=None, part_name=None, validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_name = part_name + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -28035,6 +28192,11 @@ def read(self, iprot): self.part_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -28057,6 +28219,10 @@ def write(self, oprot): oprot.writeFieldBegin('part_name', TType.STRING, 3) oprot.writeString(self.part_name) oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 4) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -28069,6 +28235,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.part_name) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -28181,6 +28348,7 @@ class get_partitions_args: - db_name - tbl_name - max_parts + - validTxnList """ thrift_spec = ( @@ -28188,12 +28356,14 @@ class get_partitions_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.I16, 'max_parts', None, -1, ), # 3 + (4, TType.STRING, 'validTxnList', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, max_parts=thrift_spec[3][4],): + def __init__(self, db_name=None, tbl_name=None, max_parts=thrift_spec[3][4], validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.max_parts = max_parts + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -28219,6 +28389,11 @@ def read(self, iprot): self.max_parts = iprot.readI16() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -28241,6 +28416,10 @@ def write(self, oprot): oprot.writeFieldBegin('max_parts', TType.I16, 3) oprot.writeI16(self.max_parts) oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 4) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -28253,6 +28432,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.max_parts) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -28297,11 +28477,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1217, _size1214) = iprot.readListBegin() - for _i1218 in xrange(_size1214): - _elem1219 = Partition() - _elem1219.read(iprot) - self.success.append(_elem1219) + (_etype1224, _size1221) = iprot.readListBegin() + for _i1225 in xrange(_size1221): + _elem1226 = Partition() + _elem1226.read(iprot) + self.success.append(_elem1226) iprot.readListEnd() else: iprot.skip(ftype) @@ -28330,8 +28510,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1220 in self.success: - iter1220.write(oprot) + for iter1227 in self.success: + iter1227.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28375,6 +28555,7 @@ class get_partitions_with_auth_args: - max_parts - user_name - group_names + - validTxnList """ thrift_spec = ( @@ -28384,14 +28565,16 @@ class get_partitions_with_auth_args: (3, TType.I16, 'max_parts', None, -1, ), # 3 (4, TType.STRING, 'user_name', None, None, ), # 4 (5, TType.LIST, 'group_names', (TType.STRING,None), None, ), # 5 + (6, TType.STRING, 'validTxnList', None, None, ), # 6 ) - def __init__(self, db_name=None, tbl_name=None, max_parts=thrift_spec[3][4], user_name=None, group_names=None,): + def __init__(self, db_name=None, tbl_name=None, max_parts=thrift_spec[3][4], user_name=None, group_names=None, validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.max_parts = max_parts self.user_name = user_name self.group_names = group_names + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -28425,13 +28608,18 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1224, _size1221) = iprot.readListBegin() - for _i1225 in xrange(_size1221): - _elem1226 = iprot.readString() - self.group_names.append(_elem1226) + (_etype1231, _size1228) = iprot.readListBegin() + for _i1232 in xrange(_size1228): + _elem1233 = iprot.readString() + self.group_names.append(_elem1233) iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -28461,10 +28649,14 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1227 in self.group_names: - oprot.writeString(iter1227) + for iter1234 in self.group_names: + oprot.writeString(iter1234) oprot.writeListEnd() oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 6) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -28479,6 +28671,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.max_parts) value = (value * 31) ^ hash(self.user_name) value = (value * 31) ^ hash(self.group_names) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -28523,11 +28716,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1231, _size1228) = iprot.readListBegin() - for _i1232 in xrange(_size1228): - _elem1233 = Partition() - _elem1233.read(iprot) - self.success.append(_elem1233) + (_etype1238, _size1235) = iprot.readListBegin() + for _i1239 in xrange(_size1235): + _elem1240 = Partition() + _elem1240.read(iprot) + self.success.append(_elem1240) iprot.readListEnd() else: iprot.skip(ftype) @@ -28556,8 +28749,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1234 in self.success: - iter1234.write(oprot) + for iter1241 in self.success: + iter1241.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28599,6 +28792,7 @@ class get_partitions_pspec_args: - db_name - tbl_name - max_parts + - validTxnList """ thrift_spec = ( @@ -28606,12 +28800,14 @@ class get_partitions_pspec_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.I32, 'max_parts', None, -1, ), # 3 + (4, TType.STRING, 'validTxnList', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, max_parts=thrift_spec[3][4],): + def __init__(self, db_name=None, tbl_name=None, max_parts=thrift_spec[3][4], validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.max_parts = max_parts + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -28637,6 +28833,11 @@ def read(self, iprot): self.max_parts = iprot.readI32() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -28659,6 +28860,10 @@ def write(self, oprot): oprot.writeFieldBegin('max_parts', TType.I32, 3) oprot.writeI32(self.max_parts) oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 4) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -28671,6 +28876,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.max_parts) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -28715,11 +28921,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1238, _size1235) = iprot.readListBegin() - for _i1239 in xrange(_size1235): - _elem1240 = PartitionSpec() - _elem1240.read(iprot) - self.success.append(_elem1240) + (_etype1245, _size1242) = iprot.readListBegin() + for _i1246 in xrange(_size1242): + _elem1247 = PartitionSpec() + _elem1247.read(iprot) + self.success.append(_elem1247) iprot.readListEnd() else: iprot.skip(ftype) @@ -28748,8 +28954,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1241 in self.success: - iter1241.write(oprot) + for iter1248 in self.success: + iter1248.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28791,6 +28997,7 @@ class get_partition_names_args: - db_name - tbl_name - max_parts + - validTxnList """ thrift_spec = ( @@ -28798,12 +29005,14 @@ class get_partition_names_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.I16, 'max_parts', None, -1, ), # 3 + (4, TType.STRING, 'validTxnList', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, max_parts=thrift_spec[3][4],): + def __init__(self, db_name=None, tbl_name=None, max_parts=thrift_spec[3][4], validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.max_parts = max_parts + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -28829,6 +29038,11 @@ def read(self, iprot): self.max_parts = iprot.readI16() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -28851,6 +29065,10 @@ def write(self, oprot): oprot.writeFieldBegin('max_parts', TType.I16, 3) oprot.writeI16(self.max_parts) oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 4) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -28863,6 +29081,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.max_parts) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -28907,10 +29126,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1245, _size1242) = iprot.readListBegin() - for _i1246 in xrange(_size1242): - _elem1247 = iprot.readString() - self.success.append(_elem1247) + (_etype1252, _size1249) = iprot.readListBegin() + for _i1253 in xrange(_size1249): + _elem1254 = iprot.readString() + self.success.append(_elem1254) iprot.readListEnd() else: iprot.skip(ftype) @@ -28939,8 +29158,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1248 in self.success: - oprot.writeString(iter1248) + for iter1255 in self.success: + oprot.writeString(iter1255) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29142,6 +29361,7 @@ class get_partitions_ps_args: - tbl_name - part_vals - max_parts + - validTxnList """ thrift_spec = ( @@ -29150,13 +29370,15 @@ class get_partitions_ps_args: (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.LIST, 'part_vals', (TType.STRING,None), None, ), # 3 (4, TType.I16, 'max_parts', None, -1, ), # 4 + (5, TType.STRING, 'validTxnList', None, None, ), # 5 ) - def __init__(self, db_name=None, tbl_name=None, part_vals=None, max_parts=thrift_spec[4][4],): + def __init__(self, db_name=None, tbl_name=None, part_vals=None, max_parts=thrift_spec[4][4], validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_vals = part_vals self.max_parts = max_parts + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -29180,10 +29402,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1252, _size1249) = iprot.readListBegin() - for _i1253 in xrange(_size1249): - _elem1254 = iprot.readString() - self.part_vals.append(_elem1254) + (_etype1259, _size1256) = iprot.readListBegin() + for _i1260 in xrange(_size1256): + _elem1261 = iprot.readString() + self.part_vals.append(_elem1261) iprot.readListEnd() else: iprot.skip(ftype) @@ -29192,6 +29414,11 @@ def read(self, iprot): self.max_parts = iprot.readI16() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -29213,14 +29440,18 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1255 in self.part_vals: - oprot.writeString(iter1255) + for iter1262 in self.part_vals: + oprot.writeString(iter1262) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: oprot.writeFieldBegin('max_parts', TType.I16, 4) oprot.writeI16(self.max_parts) oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 5) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -29234,6 +29465,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.part_vals) value = (value * 31) ^ hash(self.max_parts) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -29278,11 +29510,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1259, _size1256) = iprot.readListBegin() - for _i1260 in xrange(_size1256): - _elem1261 = Partition() - _elem1261.read(iprot) - self.success.append(_elem1261) + (_etype1266, _size1263) = iprot.readListBegin() + for _i1267 in xrange(_size1263): + _elem1268 = Partition() + _elem1268.read(iprot) + self.success.append(_elem1268) iprot.readListEnd() else: iprot.skip(ftype) @@ -29311,8 +29543,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1262 in self.success: - iter1262.write(oprot) + for iter1269 in self.success: + iter1269.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29357,6 +29589,7 @@ class get_partitions_ps_with_auth_args: - max_parts - user_name - group_names + - validTxnList """ thrift_spec = ( @@ -29367,15 +29600,17 @@ class get_partitions_ps_with_auth_args: (4, TType.I16, 'max_parts', None, -1, ), # 4 (5, TType.STRING, 'user_name', None, None, ), # 5 (6, TType.LIST, 'group_names', (TType.STRING,None), None, ), # 6 + (7, TType.STRING, 'validTxnList', None, None, ), # 7 ) - def __init__(self, db_name=None, tbl_name=None, part_vals=None, max_parts=thrift_spec[4][4], user_name=None, group_names=None,): + def __init__(self, db_name=None, tbl_name=None, part_vals=None, max_parts=thrift_spec[4][4], user_name=None, group_names=None, validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_vals = part_vals self.max_parts = max_parts self.user_name = user_name self.group_names = group_names + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -29399,10 +29634,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1266, _size1263) = iprot.readListBegin() - for _i1267 in xrange(_size1263): - _elem1268 = iprot.readString() - self.part_vals.append(_elem1268) + (_etype1273, _size1270) = iprot.readListBegin() + for _i1274 in xrange(_size1270): + _elem1275 = iprot.readString() + self.part_vals.append(_elem1275) iprot.readListEnd() else: iprot.skip(ftype) @@ -29419,13 +29654,18 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype1272, _size1269) = iprot.readListBegin() - for _i1273 in xrange(_size1269): - _elem1274 = iprot.readString() - self.group_names.append(_elem1274) + (_etype1279, _size1276) = iprot.readListBegin() + for _i1280 in xrange(_size1276): + _elem1281 = iprot.readString() + self.group_names.append(_elem1281) iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -29447,8 +29687,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1275 in self.part_vals: - oprot.writeString(iter1275) + for iter1282 in self.part_vals: + oprot.writeString(iter1282) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -29462,10 +29702,14 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1276 in self.group_names: - oprot.writeString(iter1276) + for iter1283 in self.group_names: + oprot.writeString(iter1283) oprot.writeListEnd() oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 7) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -29481,6 +29725,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.max_parts) value = (value * 31) ^ hash(self.user_name) value = (value * 31) ^ hash(self.group_names) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -29525,11 +29770,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1280, _size1277) = iprot.readListBegin() - for _i1281 in xrange(_size1277): - _elem1282 = Partition() - _elem1282.read(iprot) - self.success.append(_elem1282) + (_etype1287, _size1284) = iprot.readListBegin() + for _i1288 in xrange(_size1284): + _elem1289 = Partition() + _elem1289.read(iprot) + self.success.append(_elem1289) iprot.readListEnd() else: iprot.skip(ftype) @@ -29558,8 +29803,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1283 in self.success: - iter1283.write(oprot) + for iter1290 in self.success: + iter1290.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29602,6 +29847,7 @@ class get_partition_names_ps_args: - tbl_name - part_vals - max_parts + - validTxnList """ thrift_spec = ( @@ -29610,13 +29856,15 @@ class get_partition_names_ps_args: (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.LIST, 'part_vals', (TType.STRING,None), None, ), # 3 (4, TType.I16, 'max_parts', None, -1, ), # 4 + (5, TType.STRING, 'validTxnList', None, None, ), # 5 ) - def __init__(self, db_name=None, tbl_name=None, part_vals=None, max_parts=thrift_spec[4][4],): + def __init__(self, db_name=None, tbl_name=None, part_vals=None, max_parts=thrift_spec[4][4], validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_vals = part_vals self.max_parts = max_parts + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -29640,10 +29888,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1287, _size1284) = iprot.readListBegin() - for _i1288 in xrange(_size1284): - _elem1289 = iprot.readString() - self.part_vals.append(_elem1289) + (_etype1294, _size1291) = iprot.readListBegin() + for _i1295 in xrange(_size1291): + _elem1296 = iprot.readString() + self.part_vals.append(_elem1296) iprot.readListEnd() else: iprot.skip(ftype) @@ -29652,6 +29900,11 @@ def read(self, iprot): self.max_parts = iprot.readI16() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -29673,14 +29926,18 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1290 in self.part_vals: - oprot.writeString(iter1290) + for iter1297 in self.part_vals: + oprot.writeString(iter1297) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: oprot.writeFieldBegin('max_parts', TType.I16, 4) oprot.writeI16(self.max_parts) oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 5) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -29694,6 +29951,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.part_vals) value = (value * 31) ^ hash(self.max_parts) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -29738,10 +29996,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1294, _size1291) = iprot.readListBegin() - for _i1295 in xrange(_size1291): - _elem1296 = iprot.readString() - self.success.append(_elem1296) + (_etype1301, _size1298) = iprot.readListBegin() + for _i1302 in xrange(_size1298): + _elem1303 = iprot.readString() + self.success.append(_elem1303) iprot.readListEnd() else: iprot.skip(ftype) @@ -29770,8 +30028,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1297 in self.success: - oprot.writeString(iter1297) + for iter1304 in self.success: + oprot.writeString(iter1304) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29814,6 +30072,7 @@ class get_partitions_by_filter_args: - tbl_name - filter - max_parts + - validTxnList """ thrift_spec = ( @@ -29822,13 +30081,15 @@ class get_partitions_by_filter_args: (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRING, 'filter', None, None, ), # 3 (4, TType.I16, 'max_parts', None, -1, ), # 4 + (5, TType.STRING, 'validTxnList', None, None, ), # 5 ) - def __init__(self, db_name=None, tbl_name=None, filter=None, max_parts=thrift_spec[4][4],): + def __init__(self, db_name=None, tbl_name=None, filter=None, max_parts=thrift_spec[4][4], validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.filter = filter self.max_parts = max_parts + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -29859,6 +30120,11 @@ def read(self, iprot): self.max_parts = iprot.readI16() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -29885,6 +30151,10 @@ def write(self, oprot): oprot.writeFieldBegin('max_parts', TType.I16, 4) oprot.writeI16(self.max_parts) oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 5) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -29898,6 +30168,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.filter) value = (value * 31) ^ hash(self.max_parts) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -29942,11 +30213,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1301, _size1298) = iprot.readListBegin() - for _i1302 in xrange(_size1298): - _elem1303 = Partition() - _elem1303.read(iprot) - self.success.append(_elem1303) + (_etype1308, _size1305) = iprot.readListBegin() + for _i1309 in xrange(_size1305): + _elem1310 = Partition() + _elem1310.read(iprot) + self.success.append(_elem1310) iprot.readListEnd() else: iprot.skip(ftype) @@ -29975,8 +30246,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1304 in self.success: - iter1304.write(oprot) + for iter1311 in self.success: + iter1311.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30019,6 +30290,7 @@ class get_part_specs_by_filter_args: - tbl_name - filter - max_parts + - validTxnList """ thrift_spec = ( @@ -30027,13 +30299,15 @@ class get_part_specs_by_filter_args: (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRING, 'filter', None, None, ), # 3 (4, TType.I32, 'max_parts', None, -1, ), # 4 + (5, TType.STRING, 'validTxnList', None, None, ), # 5 ) - def __init__(self, db_name=None, tbl_name=None, filter=None, max_parts=thrift_spec[4][4],): + def __init__(self, db_name=None, tbl_name=None, filter=None, max_parts=thrift_spec[4][4], validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.filter = filter self.max_parts = max_parts + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -30064,6 +30338,11 @@ def read(self, iprot): self.max_parts = iprot.readI32() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -30090,6 +30369,10 @@ def write(self, oprot): oprot.writeFieldBegin('max_parts', TType.I32, 4) oprot.writeI32(self.max_parts) oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 5) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -30103,6 +30386,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.filter) value = (value * 31) ^ hash(self.max_parts) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -30147,11 +30431,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1308, _size1305) = iprot.readListBegin() - for _i1309 in xrange(_size1305): - _elem1310 = PartitionSpec() - _elem1310.read(iprot) - self.success.append(_elem1310) + (_etype1315, _size1312) = iprot.readListBegin() + for _i1316 in xrange(_size1312): + _elem1317 = PartitionSpec() + _elem1317.read(iprot) + self.success.append(_elem1317) iprot.readListEnd() else: iprot.skip(ftype) @@ -30180,8 +30464,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1311 in self.success: - iter1311.write(oprot) + for iter1318 in self.success: + iter1318.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30382,6 +30666,7 @@ class get_num_partitions_by_filter_args: - db_name - tbl_name - filter + - validTxnList """ thrift_spec = ( @@ -30389,12 +30674,14 @@ class get_num_partitions_by_filter_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRING, 'filter', None, None, ), # 3 + (4, TType.STRING, 'validTxnList', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, filter=None,): + def __init__(self, db_name=None, tbl_name=None, filter=None, validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.filter = filter + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -30420,6 +30707,11 @@ def read(self, iprot): self.filter = iprot.readString() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -30442,6 +30734,10 @@ def write(self, oprot): oprot.writeFieldBegin('filter', TType.STRING, 3) oprot.writeString(self.filter) oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 4) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -30454,6 +30750,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.filter) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -30565,6 +30862,7 @@ class get_partitions_by_names_args: - db_name - tbl_name - names + - validTxnList """ thrift_spec = ( @@ -30572,12 +30870,14 @@ class get_partitions_by_names_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.LIST, 'names', (TType.STRING,None), None, ), # 3 + (4, TType.STRING, 'validTxnList', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, names=None,): + def __init__(self, db_name=None, tbl_name=None, names=None, validTxnList=None,): self.db_name = db_name self.tbl_name = tbl_name self.names = names + self.validTxnList = validTxnList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -30601,13 +30901,18 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype1315, _size1312) = iprot.readListBegin() - for _i1316 in xrange(_size1312): - _elem1317 = iprot.readString() - self.names.append(_elem1317) + (_etype1322, _size1319) = iprot.readListBegin() + for _i1323 in xrange(_size1319): + _elem1324 = iprot.readString() + self.names.append(_elem1324) iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validTxnList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -30629,10 +30934,14 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter1318 in self.names: - oprot.writeString(iter1318) + for iter1325 in self.names: + oprot.writeString(iter1325) oprot.writeListEnd() oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin('validTxnList', TType.STRING, 4) + oprot.writeString(self.validTxnList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -30645,6 +30954,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.names) + value = (value * 31) ^ hash(self.validTxnList) return value def __repr__(self): @@ -30689,11 +30999,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1322, _size1319) = iprot.readListBegin() - for _i1323 in xrange(_size1319): - _elem1324 = Partition() - _elem1324.read(iprot) - self.success.append(_elem1324) + (_etype1329, _size1326) = iprot.readListBegin() + for _i1330 in xrange(_size1326): + _elem1331 = Partition() + _elem1331.read(iprot) + self.success.append(_elem1331) iprot.readListEnd() else: iprot.skip(ftype) @@ -30722,8 +31032,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1325 in self.success: - iter1325.write(oprot) + for iter1332 in self.success: + iter1332.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -31132,11 +31442,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1329, _size1326) = iprot.readListBegin() - for _i1330 in xrange(_size1326): - _elem1331 = Partition() - _elem1331.read(iprot) - self.new_parts.append(_elem1331) + (_etype1336, _size1333) = iprot.readListBegin() + for _i1337 in xrange(_size1333): + _elem1338 = Partition() + _elem1338.read(iprot) + self.new_parts.append(_elem1338) iprot.readListEnd() else: iprot.skip(ftype) @@ -31161,8 +31471,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1332 in self.new_parts: - iter1332.write(oprot) + for iter1339 in self.new_parts: + iter1339.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -31315,11 +31625,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1336, _size1333) = iprot.readListBegin() - for _i1337 in xrange(_size1333): - _elem1338 = Partition() - _elem1338.read(iprot) - self.new_parts.append(_elem1338) + (_etype1343, _size1340) = iprot.readListBegin() + for _i1344 in xrange(_size1340): + _elem1345 = Partition() + _elem1345.read(iprot) + self.new_parts.append(_elem1345) iprot.readListEnd() else: iprot.skip(ftype) @@ -31350,8 +31660,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1339 in self.new_parts: - iter1339.write(oprot) + for iter1346 in self.new_parts: + iter1346.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -31854,10 +32164,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1343, _size1340) = iprot.readListBegin() - for _i1344 in xrange(_size1340): - _elem1345 = iprot.readString() - self.part_vals.append(_elem1345) + (_etype1350, _size1347) = iprot.readListBegin() + for _i1351 in xrange(_size1347): + _elem1352 = iprot.readString() + self.part_vals.append(_elem1352) iprot.readListEnd() else: iprot.skip(ftype) @@ -31888,8 +32198,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1346 in self.part_vals: - oprot.writeString(iter1346) + for iter1353 in self.part_vals: + oprot.writeString(iter1353) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -32190,10 +32500,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype1350, _size1347) = iprot.readListBegin() - for _i1351 in xrange(_size1347): - _elem1352 = iprot.readString() - self.part_vals.append(_elem1352) + (_etype1357, _size1354) = iprot.readListBegin() + for _i1358 in xrange(_size1354): + _elem1359 = iprot.readString() + self.part_vals.append(_elem1359) iprot.readListEnd() else: iprot.skip(ftype) @@ -32215,8 +32525,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1353 in self.part_vals: - oprot.writeString(iter1353) + for iter1360 in self.part_vals: + oprot.writeString(iter1360) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -32574,10 +32884,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1357, _size1354) = iprot.readListBegin() - for _i1358 in xrange(_size1354): - _elem1359 = iprot.readString() - self.success.append(_elem1359) + (_etype1364, _size1361) = iprot.readListBegin() + for _i1365 in xrange(_size1361): + _elem1366 = iprot.readString() + self.success.append(_elem1366) iprot.readListEnd() else: iprot.skip(ftype) @@ -32600,8 +32910,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1360 in self.success: - oprot.writeString(iter1360) + for iter1367 in self.success: + oprot.writeString(iter1367) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -32725,11 +33035,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1362, _vtype1363, _size1361 ) = iprot.readMapBegin() - for _i1365 in xrange(_size1361): - _key1366 = iprot.readString() - _val1367 = iprot.readString() - self.success[_key1366] = _val1367 + (_ktype1369, _vtype1370, _size1368 ) = iprot.readMapBegin() + for _i1372 in xrange(_size1368): + _key1373 = iprot.readString() + _val1374 = iprot.readString() + self.success[_key1373] = _val1374 iprot.readMapEnd() else: iprot.skip(ftype) @@ -32752,9 +33062,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter1368,viter1369 in self.success.items(): - oprot.writeString(kiter1368) - oprot.writeString(viter1369) + for kiter1375,viter1376 in self.success.items(): + oprot.writeString(kiter1375) + oprot.writeString(viter1376) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -32830,11 +33140,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1371, _vtype1372, _size1370 ) = iprot.readMapBegin() - for _i1374 in xrange(_size1370): - _key1375 = iprot.readString() - _val1376 = iprot.readString() - self.part_vals[_key1375] = _val1376 + (_ktype1378, _vtype1379, _size1377 ) = iprot.readMapBegin() + for _i1381 in xrange(_size1377): + _key1382 = iprot.readString() + _val1383 = iprot.readString() + self.part_vals[_key1382] = _val1383 iprot.readMapEnd() else: iprot.skip(ftype) @@ -32864,9 +33174,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1377,viter1378 in self.part_vals.items(): - oprot.writeString(kiter1377) - oprot.writeString(viter1378) + for kiter1384,viter1385 in self.part_vals.items(): + oprot.writeString(kiter1384) + oprot.writeString(viter1385) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -33080,11 +33390,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1380, _vtype1381, _size1379 ) = iprot.readMapBegin() - for _i1383 in xrange(_size1379): - _key1384 = iprot.readString() - _val1385 = iprot.readString() - self.part_vals[_key1384] = _val1385 + (_ktype1387, _vtype1388, _size1386 ) = iprot.readMapBegin() + for _i1390 in xrange(_size1386): + _key1391 = iprot.readString() + _val1392 = iprot.readString() + self.part_vals[_key1391] = _val1392 iprot.readMapEnd() else: iprot.skip(ftype) @@ -33114,9 +33424,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1386,viter1387 in self.part_vals.items(): - oprot.writeString(kiter1386) - oprot.writeString(viter1387) + for kiter1393,viter1394 in self.part_vals.items(): + oprot.writeString(kiter1393) + oprot.writeString(viter1394) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -35003,6 +35313,7 @@ class get_table_column_statistics_args: - db_name - tbl_name - col_name + - validWriteIdList """ thrift_spec = ( @@ -35010,12 +35321,14 @@ class get_table_column_statistics_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRING, 'col_name', None, None, ), # 3 + (4, TType.STRING, 'validWriteIdList', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, col_name=None,): + def __init__(self, db_name=None, tbl_name=None, col_name=None, validWriteIdList=None,): self.db_name = db_name self.tbl_name = tbl_name self.col_name = col_name + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35041,6 +35354,11 @@ def read(self, iprot): self.col_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -35063,6 +35381,10 @@ def write(self, oprot): oprot.writeFieldBegin('col_name', TType.STRING, 3) oprot.writeString(self.col_name) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35075,6 +35397,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.col_name) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -35216,6 +35539,7 @@ class get_partition_column_statistics_args: - tbl_name - part_name - col_name + - validWriteIdList """ thrift_spec = ( @@ -35224,13 +35548,15 @@ class get_partition_column_statistics_args: (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRING, 'part_name', None, None, ), # 3 (4, TType.STRING, 'col_name', None, None, ), # 4 + (5, TType.STRING, 'validWriteIdList', None, None, ), # 5 ) - def __init__(self, db_name=None, tbl_name=None, part_name=None, col_name=None,): + def __init__(self, db_name=None, tbl_name=None, part_name=None, col_name=None, validWriteIdList=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_name = part_name self.col_name = col_name + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35261,6 +35587,11 @@ def read(self, iprot): self.col_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -35287,6 +35618,10 @@ def write(self, oprot): oprot.writeFieldBegin('col_name', TType.STRING, 4) oprot.writeString(self.col_name) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 5) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35300,6 +35635,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.part_name) value = (value * 31) ^ hash(self.col_name) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -37142,10 +37478,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1391, _size1388) = iprot.readListBegin() - for _i1392 in xrange(_size1388): - _elem1393 = iprot.readString() - self.success.append(_elem1393) + (_etype1398, _size1395) = iprot.readListBegin() + for _i1399 in xrange(_size1395): + _elem1400 = iprot.readString() + self.success.append(_elem1400) iprot.readListEnd() else: iprot.skip(ftype) @@ -37168,8 +37504,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1394 in self.success: - oprot.writeString(iter1394) + for iter1401 in self.success: + oprot.writeString(iter1401) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -37857,10 +38193,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1398, _size1395) = iprot.readListBegin() - for _i1399 in xrange(_size1395): - _elem1400 = iprot.readString() - self.success.append(_elem1400) + (_etype1405, _size1402) = iprot.readListBegin() + for _i1406 in xrange(_size1402): + _elem1407 = iprot.readString() + self.success.append(_elem1407) iprot.readListEnd() else: iprot.skip(ftype) @@ -37883,8 +38219,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1401 in self.success: - oprot.writeString(iter1401) + for iter1408 in self.success: + oprot.writeString(iter1408) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -38398,11 +38734,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1405, _size1402) = iprot.readListBegin() - for _i1406 in xrange(_size1402): - _elem1407 = Role() - _elem1407.read(iprot) - self.success.append(_elem1407) + (_etype1412, _size1409) = iprot.readListBegin() + for _i1413 in xrange(_size1409): + _elem1414 = Role() + _elem1414.read(iprot) + self.success.append(_elem1414) iprot.readListEnd() else: iprot.skip(ftype) @@ -38425,8 +38761,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1408 in self.success: - iter1408.write(oprot) + for iter1415 in self.success: + iter1415.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -38935,10 +39271,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype1412, _size1409) = iprot.readListBegin() - for _i1413 in xrange(_size1409): - _elem1414 = iprot.readString() - self.group_names.append(_elem1414) + (_etype1419, _size1416) = iprot.readListBegin() + for _i1420 in xrange(_size1416): + _elem1421 = iprot.readString() + self.group_names.append(_elem1421) iprot.readListEnd() else: iprot.skip(ftype) @@ -38963,8 +39299,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1415 in self.group_names: - oprot.writeString(iter1415) + for iter1422 in self.group_names: + oprot.writeString(iter1422) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -39191,11 +39527,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1419, _size1416) = iprot.readListBegin() - for _i1420 in xrange(_size1416): - _elem1421 = HiveObjectPrivilege() - _elem1421.read(iprot) - self.success.append(_elem1421) + (_etype1426, _size1423) = iprot.readListBegin() + for _i1427 in xrange(_size1423): + _elem1428 = HiveObjectPrivilege() + _elem1428.read(iprot) + self.success.append(_elem1428) iprot.readListEnd() else: iprot.skip(ftype) @@ -39218,8 +39554,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1422 in self.success: - iter1422.write(oprot) + for iter1429 in self.success: + iter1429.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -39889,10 +40225,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype1426, _size1423) = iprot.readListBegin() - for _i1427 in xrange(_size1423): - _elem1428 = iprot.readString() - self.group_names.append(_elem1428) + (_etype1433, _size1430) = iprot.readListBegin() + for _i1434 in xrange(_size1430): + _elem1435 = iprot.readString() + self.group_names.append(_elem1435) iprot.readListEnd() else: iprot.skip(ftype) @@ -39913,8 +40249,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1429 in self.group_names: - oprot.writeString(iter1429) + for iter1436 in self.group_names: + oprot.writeString(iter1436) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -39969,10 +40305,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1433, _size1430) = iprot.readListBegin() - for _i1434 in xrange(_size1430): - _elem1435 = iprot.readString() - self.success.append(_elem1435) + (_etype1440, _size1437) = iprot.readListBegin() + for _i1441 in xrange(_size1437): + _elem1442 = iprot.readString() + self.success.append(_elem1442) iprot.readListEnd() else: iprot.skip(ftype) @@ -39995,8 +40331,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1436 in self.success: - oprot.writeString(iter1436) + for iter1443 in self.success: + oprot.writeString(iter1443) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -40928,10 +41264,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1440, _size1437) = iprot.readListBegin() - for _i1441 in xrange(_size1437): - _elem1442 = iprot.readString() - self.success.append(_elem1442) + (_etype1447, _size1444) = iprot.readListBegin() + for _i1448 in xrange(_size1444): + _elem1449 = iprot.readString() + self.success.append(_elem1449) iprot.readListEnd() else: iprot.skip(ftype) @@ -40948,8 +41284,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1443 in self.success: - oprot.writeString(iter1443) + for iter1450 in self.success: + oprot.writeString(iter1450) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -41476,10 +41812,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1447, _size1444) = iprot.readListBegin() - for _i1448 in xrange(_size1444): - _elem1449 = iprot.readString() - self.success.append(_elem1449) + (_etype1454, _size1451) = iprot.readListBegin() + for _i1455 in xrange(_size1451): + _elem1456 = iprot.readString() + self.success.append(_elem1456) iprot.readListEnd() else: iprot.skip(ftype) @@ -41496,8 +41832,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1450 in self.success: - oprot.writeString(iter1450) + for iter1457 in self.success: + oprot.writeString(iter1457) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -44510,10 +44846,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1454, _size1451) = iprot.readListBegin() - for _i1455 in xrange(_size1451): - _elem1456 = iprot.readString() - self.success.append(_elem1456) + (_etype1461, _size1458) = iprot.readListBegin() + for _i1462 in xrange(_size1458): + _elem1463 = iprot.readString() + self.success.append(_elem1463) iprot.readListEnd() else: iprot.skip(ftype) @@ -44530,8 +44866,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1457 in self.success: - oprot.writeString(iter1457) + for iter1464 in self.success: + oprot.writeString(iter1464) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -50841,11 +51177,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1461, _size1458) = iprot.readListBegin() - for _i1462 in xrange(_size1458): - _elem1463 = SchemaVersion() - _elem1463.read(iprot) - self.success.append(_elem1463) + (_etype1468, _size1465) = iprot.readListBegin() + for _i1469 in xrange(_size1465): + _elem1470 = SchemaVersion() + _elem1470.read(iprot) + self.success.append(_elem1470) iprot.readListEnd() else: iprot.skip(ftype) @@ -50874,8 +51210,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1464 in self.success: - iter1464.write(oprot) + for iter1471 in self.success: + iter1471.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -52350,11 +52686,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1468, _size1465) = iprot.readListBegin() - for _i1469 in xrange(_size1465): - _elem1470 = RuntimeStat() - _elem1470.read(iprot) - self.success.append(_elem1470) + (_etype1475, _size1472) = iprot.readListBegin() + for _i1476 in xrange(_size1472): + _elem1477 = RuntimeStat() + _elem1477.read(iprot) + self.success.append(_elem1477) iprot.readListEnd() else: iprot.skip(ftype) @@ -52377,8 +52713,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1471 in self.success: - iter1471.write(oprot) + for iter1478 in self.success: + iter1478.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py index d6a08bbe32..d44b1d5268 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -9654,6 +9654,7 @@ class PartitionsByExprRequest: - defaultPartitionName - maxParts - catName + - validWriteIdList """ thrift_spec = ( @@ -9664,15 +9665,17 @@ class PartitionsByExprRequest: (4, TType.STRING, 'defaultPartitionName', None, None, ), # 4 (5, TType.I16, 'maxParts', None, -1, ), # 5 (6, TType.STRING, 'catName', None, None, ), # 6 + (7, TType.STRING, 'validWriteIdList', None, None, ), # 7 ) - def __init__(self, dbName=None, tblName=None, expr=None, defaultPartitionName=None, maxParts=thrift_spec[5][4], catName=None,): + def __init__(self, dbName=None, tblName=None, expr=None, defaultPartitionName=None, maxParts=thrift_spec[5][4], catName=None, validWriteIdList=None,): self.dbName = dbName self.tblName = tblName self.expr = expr self.defaultPartitionName = defaultPartitionName self.maxParts = maxParts self.catName = catName + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9713,6 +9716,11 @@ def read(self, iprot): self.catName = iprot.readString() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9747,6 +9755,10 @@ def write(self, oprot): oprot.writeFieldBegin('catName', TType.STRING, 6) oprot.writeString(self.catName) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 7) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9768,6 +9780,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.defaultPartitionName) value = (value * 31) ^ hash(self.maxParts) value = (value * 31) ^ hash(self.catName) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -10939,6 +10952,7 @@ class PartitionValuesRequest: - ascending - maxParts - catName + - validWriteIdList """ thrift_spec = ( @@ -10952,9 +10966,10 @@ class PartitionValuesRequest: (7, TType.BOOL, 'ascending', None, True, ), # 7 (8, TType.I64, 'maxParts', None, -1, ), # 8 (9, TType.STRING, 'catName', None, None, ), # 9 + (10, TType.STRING, 'validWriteIdList', None, None, ), # 10 ) - def __init__(self, dbName=None, tblName=None, partitionKeys=None, applyDistinct=thrift_spec[4][4], filter=None, partitionOrder=None, ascending=thrift_spec[7][4], maxParts=thrift_spec[8][4], catName=None,): + def __init__(self, dbName=None, tblName=None, partitionKeys=None, applyDistinct=thrift_spec[4][4], filter=None, partitionOrder=None, ascending=thrift_spec[7][4], maxParts=thrift_spec[8][4], catName=None, validWriteIdList=None,): self.dbName = dbName self.tblName = tblName self.partitionKeys = partitionKeys @@ -10964,6 +10979,7 @@ def __init__(self, dbName=None, tblName=None, partitionKeys=None, applyDistinct= self.ascending = ascending self.maxParts = maxParts self.catName = catName + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -11031,6 +11047,11 @@ def read(self, iprot): self.catName = iprot.readString() else: iprot.skip(ftype) + elif fid == 10: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -11083,6 +11104,10 @@ def write(self, oprot): oprot.writeFieldBegin('catName', TType.STRING, 9) oprot.writeString(self.catName) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 10) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -11107,6 +11132,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.ascending) value = (value * 31) ^ hash(self.maxParts) value = (value * 31) ^ hash(self.catName) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -11280,6 +11306,7 @@ class GetPartitionsByNamesRequest: - get_col_stats - processorCapabilities - processorIdentifier + - validWriteIdList """ thrift_spec = ( @@ -11290,15 +11317,17 @@ class GetPartitionsByNamesRequest: (4, TType.BOOL, 'get_col_stats', None, None, ), # 4 (5, TType.LIST, 'processorCapabilities', (TType.STRING,None), None, ), # 5 (6, TType.STRING, 'processorIdentifier', None, None, ), # 6 + (7, TType.STRING, 'validWriteIdList', None, None, ), # 7 ) - def __init__(self, db_name=None, tbl_name=None, names=None, get_col_stats=None, processorCapabilities=None, processorIdentifier=None,): + def __init__(self, db_name=None, tbl_name=None, names=None, get_col_stats=None, processorCapabilities=None, processorIdentifier=None, validWriteIdList=None,): self.db_name = db_name self.tbl_name = tbl_name self.names = names self.get_col_stats = get_col_stats self.processorCapabilities = processorCapabilities self.processorIdentifier = processorIdentifier + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -11349,6 +11378,11 @@ def read(self, iprot): self.processorIdentifier = iprot.readString() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -11389,6 +11423,10 @@ def write(self, oprot): oprot.writeFieldBegin('processorIdentifier', TType.STRING, 6) oprot.writeString(self.processorIdentifier) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 7) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -11408,6 +11446,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.get_col_stats) value = (value * 31) ^ hash(self.processorCapabilities) value = (value * 31) ^ hash(self.processorIdentifier) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -13415,6 +13454,164 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class TableWriteId: + """ + Attributes: + - fullTableName + - writeId + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'fullTableName', None, None, ), # 1 + (2, TType.I64, 'writeId', None, None, ), # 2 + ) + + def __init__(self, fullTableName=None, writeId=None,): + self.fullTableName = fullTableName + self.writeId = writeId + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.fullTableName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('TableWriteId') + if self.fullTableName is not None: + oprot.writeFieldBegin('fullTableName', TType.STRING, 1) + oprot.writeString(self.fullTableName) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 2) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.fullTableName is None: + raise TProtocol.TProtocolException(message='Required field fullTableName is unset!') + if self.writeId is None: + raise TProtocol.TProtocolException(message='Required field writeId is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.fullTableName) + value = (value * 31) ^ hash(self.writeId) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetTxnTableWriteIdsResponse: + """ + Attributes: + - tableWriteIds + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'tableWriteIds', (TType.STRUCT,(TableWriteId, TableWriteId.thrift_spec)), None, ), # 1 + ) + + def __init__(self, tableWriteIds=None,): + self.tableWriteIds = tableWriteIds + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.tableWriteIds = [] + (_etype596, _size593) = iprot.readListBegin() + for _i597 in xrange(_size593): + _elem598 = TableWriteId() + _elem598.read(iprot) + self.tableWriteIds.append(_elem598) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetTxnTableWriteIdsResponse') + if self.tableWriteIds is not None: + oprot.writeFieldBegin('tableWriteIds', TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.tableWriteIds)) + for iter599 in self.tableWriteIds: + iter599.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.tableWriteIds is None: + raise TProtocol.TProtocolException(message='Required field tableWriteIds is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.tableWriteIds) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class GetValidWriteIdsResponse: """ Attributes: @@ -13441,11 +13638,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tblValidWriteIds = [] - (_etype596, _size593) = iprot.readListBegin() - for _i597 in xrange(_size593): - _elem598 = TableValidWriteIds() - _elem598.read(iprot) - self.tblValidWriteIds.append(_elem598) + (_etype603, _size600) = iprot.readListBegin() + for _i604 in xrange(_size600): + _elem605 = TableValidWriteIds() + _elem605.read(iprot) + self.tblValidWriteIds.append(_elem605) iprot.readListEnd() else: iprot.skip(ftype) @@ -13462,8 +13659,8 @@ def write(self, oprot): if self.tblValidWriteIds is not None: oprot.writeFieldBegin('tblValidWriteIds', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tblValidWriteIds)) - for iter599 in self.tblValidWriteIds: - iter599.write(oprot) + for iter606 in self.tblValidWriteIds: + iter606.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13621,10 +13818,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.txnIds = [] - (_etype603, _size600) = iprot.readListBegin() - for _i604 in xrange(_size600): - _elem605 = iprot.readI64() - self.txnIds.append(_elem605) + (_etype610, _size607) = iprot.readListBegin() + for _i611 in xrange(_size607): + _elem612 = iprot.readI64() + self.txnIds.append(_elem612) iprot.readListEnd() else: iprot.skip(ftype) @@ -13636,11 +13833,11 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.srcTxnToWriteIdList = [] - (_etype609, _size606) = iprot.readListBegin() - for _i610 in xrange(_size606): - _elem611 = TxnToWriteId() - _elem611.read(iprot) - self.srcTxnToWriteIdList.append(_elem611) + (_etype616, _size613) = iprot.readListBegin() + for _i617 in xrange(_size613): + _elem618 = TxnToWriteId() + _elem618.read(iprot) + self.srcTxnToWriteIdList.append(_elem618) iprot.readListEnd() else: iprot.skip(ftype) @@ -13665,8 +13862,8 @@ def write(self, oprot): if self.txnIds is not None: oprot.writeFieldBegin('txnIds', TType.LIST, 3) oprot.writeListBegin(TType.I64, len(self.txnIds)) - for iter612 in self.txnIds: - oprot.writeI64(iter612) + for iter619 in self.txnIds: + oprot.writeI64(iter619) oprot.writeListEnd() oprot.writeFieldEnd() if self.replPolicy is not None: @@ -13676,8 +13873,8 @@ def write(self, oprot): if self.srcTxnToWriteIdList is not None: oprot.writeFieldBegin('srcTxnToWriteIdList', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.srcTxnToWriteIdList)) - for iter613 in self.srcTxnToWriteIdList: - iter613.write(oprot) + for iter620 in self.srcTxnToWriteIdList: + iter620.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13737,11 +13934,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.txnToWriteIds = [] - (_etype617, _size614) = iprot.readListBegin() - for _i618 in xrange(_size614): - _elem619 = TxnToWriteId() - _elem619.read(iprot) - self.txnToWriteIds.append(_elem619) + (_etype624, _size621) = iprot.readListBegin() + for _i625 in xrange(_size621): + _elem626 = TxnToWriteId() + _elem626.read(iprot) + self.txnToWriteIds.append(_elem626) iprot.readListEnd() else: iprot.skip(ftype) @@ -13758,8 +13955,8 @@ def write(self, oprot): if self.txnToWriteIds is not None: oprot.writeFieldBegin('txnToWriteIds', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.txnToWriteIds)) - for iter620 in self.txnToWriteIds: - iter620.write(oprot) + for iter627 in self.txnToWriteIds: + iter627.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13987,11 +14184,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.component = [] - (_etype624, _size621) = iprot.readListBegin() - for _i625 in xrange(_size621): - _elem626 = LockComponent() - _elem626.read(iprot) - self.component.append(_elem626) + (_etype631, _size628) = iprot.readListBegin() + for _i632 in xrange(_size628): + _elem633 = LockComponent() + _elem633.read(iprot) + self.component.append(_elem633) iprot.readListEnd() else: iprot.skip(ftype) @@ -14028,8 +14225,8 @@ def write(self, oprot): if self.component is not None: oprot.writeFieldBegin('component', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.component)) - for iter627 in self.component: - iter627.write(oprot) + for iter634 in self.component: + iter634.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.txnid is not None: @@ -14727,11 +14924,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.locks = [] - (_etype631, _size628) = iprot.readListBegin() - for _i632 in xrange(_size628): - _elem633 = ShowLocksResponseElement() - _elem633.read(iprot) - self.locks.append(_elem633) + (_etype638, _size635) = iprot.readListBegin() + for _i639 in xrange(_size635): + _elem640 = ShowLocksResponseElement() + _elem640.read(iprot) + self.locks.append(_elem640) iprot.readListEnd() else: iprot.skip(ftype) @@ -14748,8 +14945,8 @@ def write(self, oprot): if self.locks is not None: oprot.writeFieldBegin('locks', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.locks)) - for iter634 in self.locks: - iter634.write(oprot) + for iter641 in self.locks: + iter641.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14964,20 +15161,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.SET: self.aborted = set() - (_etype638, _size635) = iprot.readSetBegin() - for _i639 in xrange(_size635): - _elem640 = iprot.readI64() - self.aborted.add(_elem640) + (_etype645, _size642) = iprot.readSetBegin() + for _i646 in xrange(_size642): + _elem647 = iprot.readI64() + self.aborted.add(_elem647) iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.SET: self.nosuch = set() - (_etype644, _size641) = iprot.readSetBegin() - for _i645 in xrange(_size641): - _elem646 = iprot.readI64() - self.nosuch.add(_elem646) + (_etype651, _size648) = iprot.readSetBegin() + for _i652 in xrange(_size648): + _elem653 = iprot.readI64() + self.nosuch.add(_elem653) iprot.readSetEnd() else: iprot.skip(ftype) @@ -14994,15 +15191,15 @@ def write(self, oprot): if self.aborted is not None: oprot.writeFieldBegin('aborted', TType.SET, 1) oprot.writeSetBegin(TType.I64, len(self.aborted)) - for iter647 in self.aborted: - oprot.writeI64(iter647) + for iter654 in self.aborted: + oprot.writeI64(iter654) oprot.writeSetEnd() oprot.writeFieldEnd() if self.nosuch is not None: oprot.writeFieldBegin('nosuch', TType.SET, 2) oprot.writeSetBegin(TType.I64, len(self.nosuch)) - for iter648 in self.nosuch: - oprot.writeI64(iter648) + for iter655 in self.nosuch: + oprot.writeI64(iter655) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15099,11 +15296,11 @@ def read(self, iprot): elif fid == 6: if ftype == TType.MAP: self.properties = {} - (_ktype650, _vtype651, _size649 ) = iprot.readMapBegin() - for _i653 in xrange(_size649): - _key654 = iprot.readString() - _val655 = iprot.readString() - self.properties[_key654] = _val655 + (_ktype657, _vtype658, _size656 ) = iprot.readMapBegin() + for _i660 in xrange(_size656): + _key661 = iprot.readString() + _val662 = iprot.readString() + self.properties[_key661] = _val662 iprot.readMapEnd() else: iprot.skip(ftype) @@ -15140,9 +15337,9 @@ def write(self, oprot): if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 6) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter656,viter657 in self.properties.items(): - oprot.writeString(kiter656) - oprot.writeString(viter657) + for kiter663,viter664 in self.properties.items(): + oprot.writeString(kiter663) + oprot.writeString(viter664) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15859,11 +16056,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.compacts = [] - (_etype661, _size658) = iprot.readListBegin() - for _i662 in xrange(_size658): - _elem663 = ShowCompactResponseElement() - _elem663.read(iprot) - self.compacts.append(_elem663) + (_etype668, _size665) = iprot.readListBegin() + for _i669 in xrange(_size665): + _elem670 = ShowCompactResponseElement() + _elem670.read(iprot) + self.compacts.append(_elem670) iprot.readListEnd() else: iprot.skip(ftype) @@ -15880,8 +16077,8 @@ def write(self, oprot): if self.compacts is not None: oprot.writeFieldBegin('compacts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.compacts)) - for iter664 in self.compacts: - iter664.write(oprot) + for iter671 in self.compacts: + iter671.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15970,10 +16167,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.partitionnames = [] - (_etype668, _size665) = iprot.readListBegin() - for _i669 in xrange(_size665): - _elem670 = iprot.readString() - self.partitionnames.append(_elem670) + (_etype675, _size672) = iprot.readListBegin() + for _i676 in xrange(_size672): + _elem677 = iprot.readString() + self.partitionnames.append(_elem677) iprot.readListEnd() else: iprot.skip(ftype) @@ -16011,8 +16208,8 @@ def write(self, oprot): if self.partitionnames is not None: oprot.writeFieldBegin('partitionnames', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.partitionnames)) - for iter671 in self.partitionnames: - oprot.writeString(iter671) + for iter678 in self.partitionnames: + oprot.writeString(iter678) oprot.writeListEnd() oprot.writeFieldEnd() if self.operationType is not None: @@ -16231,10 +16428,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.eventTypeSkipList = [] - (_etype675, _size672) = iprot.readListBegin() - for _i676 in xrange(_size672): - _elem677 = iprot.readString() - self.eventTypeSkipList.append(_elem677) + (_etype682, _size679) = iprot.readListBegin() + for _i683 in xrange(_size679): + _elem684 = iprot.readString() + self.eventTypeSkipList.append(_elem684) iprot.readListEnd() else: iprot.skip(ftype) @@ -16259,8 +16456,8 @@ def write(self, oprot): if self.eventTypeSkipList is not None: oprot.writeFieldBegin('eventTypeSkipList', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.eventTypeSkipList)) - for iter678 in self.eventTypeSkipList: - oprot.writeString(iter678) + for iter685 in self.eventTypeSkipList: + oprot.writeString(iter685) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16480,11 +16677,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.events = [] - (_etype682, _size679) = iprot.readListBegin() - for _i683 in xrange(_size679): - _elem684 = NotificationEvent() - _elem684.read(iprot) - self.events.append(_elem684) + (_etype689, _size686) = iprot.readListBegin() + for _i690 in xrange(_size686): + _elem691 = NotificationEvent() + _elem691.read(iprot) + self.events.append(_elem691) iprot.readListEnd() else: iprot.skip(ftype) @@ -16501,8 +16698,8 @@ def write(self, oprot): if self.events is not None: oprot.writeFieldBegin('events', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.events)) - for iter685 in self.events: - iter685.write(oprot) + for iter692 in self.events: + iter692.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16825,30 +17022,30 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.filesAdded = [] - (_etype689, _size686) = iprot.readListBegin() - for _i690 in xrange(_size686): - _elem691 = iprot.readString() - self.filesAdded.append(_elem691) + (_etype696, _size693) = iprot.readListBegin() + for _i697 in xrange(_size693): + _elem698 = iprot.readString() + self.filesAdded.append(_elem698) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.filesAddedChecksum = [] - (_etype695, _size692) = iprot.readListBegin() - for _i696 in xrange(_size692): - _elem697 = iprot.readString() - self.filesAddedChecksum.append(_elem697) + (_etype702, _size699) = iprot.readListBegin() + for _i703 in xrange(_size699): + _elem704 = iprot.readString() + self.filesAddedChecksum.append(_elem704) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.subDirectoryList = [] - (_etype701, _size698) = iprot.readListBegin() - for _i702 in xrange(_size698): - _elem703 = iprot.readString() - self.subDirectoryList.append(_elem703) + (_etype708, _size705) = iprot.readListBegin() + for _i709 in xrange(_size705): + _elem710 = iprot.readString() + self.subDirectoryList.append(_elem710) iprot.readListEnd() else: iprot.skip(ftype) @@ -16869,22 +17066,22 @@ def write(self, oprot): if self.filesAdded is not None: oprot.writeFieldBegin('filesAdded', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.filesAdded)) - for iter704 in self.filesAdded: - oprot.writeString(iter704) + for iter711 in self.filesAdded: + oprot.writeString(iter711) oprot.writeListEnd() oprot.writeFieldEnd() if self.filesAddedChecksum is not None: oprot.writeFieldBegin('filesAddedChecksum', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.filesAddedChecksum)) - for iter705 in self.filesAddedChecksum: - oprot.writeString(iter705) + for iter712 in self.filesAddedChecksum: + oprot.writeString(iter712) oprot.writeListEnd() oprot.writeFieldEnd() if self.subDirectoryList is not None: oprot.writeFieldBegin('subDirectoryList', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.subDirectoryList)) - for iter706 in self.subDirectoryList: - oprot.writeString(iter706) + for iter713 in self.subDirectoryList: + oprot.writeString(iter713) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17043,10 +17240,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.partitionVals = [] - (_etype710, _size707) = iprot.readListBegin() - for _i711 in xrange(_size707): - _elem712 = iprot.readString() - self.partitionVals.append(_elem712) + (_etype717, _size714) = iprot.readListBegin() + for _i718 in xrange(_size714): + _elem719 = iprot.readString() + self.partitionVals.append(_elem719) iprot.readListEnd() else: iprot.skip(ftype) @@ -17084,8 +17281,8 @@ def write(self, oprot): if self.partitionVals is not None: oprot.writeFieldBegin('partitionVals', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.partitionVals)) - for iter713 in self.partitionVals: - oprot.writeString(iter713) + for iter720 in self.partitionVals: + oprot.writeString(iter720) oprot.writeListEnd() oprot.writeFieldEnd() if self.catName is not None: @@ -17237,10 +17434,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.partitionVals = [] - (_etype717, _size714) = iprot.readListBegin() - for _i718 in xrange(_size714): - _elem719 = iprot.readString() - self.partitionVals.append(_elem719) + (_etype724, _size721) = iprot.readListBegin() + for _i725 in xrange(_size721): + _elem726 = iprot.readString() + self.partitionVals.append(_elem726) iprot.readListEnd() else: iprot.skip(ftype) @@ -17277,8 +17474,8 @@ def write(self, oprot): if self.partitionVals is not None: oprot.writeFieldBegin('partitionVals', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.partitionVals)) - for iter720 in self.partitionVals: - oprot.writeString(iter720) + for iter727 in self.partitionVals: + oprot.writeString(iter727) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17472,12 +17669,12 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.metadata = {} - (_ktype722, _vtype723, _size721 ) = iprot.readMapBegin() - for _i725 in xrange(_size721): - _key726 = iprot.readI64() - _val727 = MetadataPpdResult() - _val727.read(iprot) - self.metadata[_key726] = _val727 + (_ktype729, _vtype730, _size728 ) = iprot.readMapBegin() + for _i732 in xrange(_size728): + _key733 = iprot.readI64() + _val734 = MetadataPpdResult() + _val734.read(iprot) + self.metadata[_key733] = _val734 iprot.readMapEnd() else: iprot.skip(ftype) @@ -17499,9 +17696,9 @@ def write(self, oprot): if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.MAP, 1) oprot.writeMapBegin(TType.I64, TType.STRUCT, len(self.metadata)) - for kiter728,viter729 in self.metadata.items(): - oprot.writeI64(kiter728) - viter729.write(oprot) + for kiter735,viter736 in self.metadata.items(): + oprot.writeI64(kiter735) + viter736.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.isSupported is not None: @@ -17571,10 +17768,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype733, _size730) = iprot.readListBegin() - for _i734 in xrange(_size730): - _elem735 = iprot.readI64() - self.fileIds.append(_elem735) + (_etype740, _size737) = iprot.readListBegin() + for _i741 in xrange(_size737): + _elem742 = iprot.readI64() + self.fileIds.append(_elem742) iprot.readListEnd() else: iprot.skip(ftype) @@ -17606,8 +17803,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter736 in self.fileIds: - oprot.writeI64(iter736) + for iter743 in self.fileIds: + oprot.writeI64(iter743) oprot.writeListEnd() oprot.writeFieldEnd() if self.expr is not None: @@ -17681,11 +17878,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.metadata = {} - (_ktype738, _vtype739, _size737 ) = iprot.readMapBegin() - for _i741 in xrange(_size737): - _key742 = iprot.readI64() - _val743 = iprot.readString() - self.metadata[_key742] = _val743 + (_ktype745, _vtype746, _size744 ) = iprot.readMapBegin() + for _i748 in xrange(_size744): + _key749 = iprot.readI64() + _val750 = iprot.readString() + self.metadata[_key749] = _val750 iprot.readMapEnd() else: iprot.skip(ftype) @@ -17707,9 +17904,9 @@ def write(self, oprot): if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.MAP, 1) oprot.writeMapBegin(TType.I64, TType.STRING, len(self.metadata)) - for kiter744,viter745 in self.metadata.items(): - oprot.writeI64(kiter744) - oprot.writeString(viter745) + for kiter751,viter752 in self.metadata.items(): + oprot.writeI64(kiter751) + oprot.writeString(viter752) oprot.writeMapEnd() oprot.writeFieldEnd() if self.isSupported is not None: @@ -17770,10 +17967,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype749, _size746) = iprot.readListBegin() - for _i750 in xrange(_size746): - _elem751 = iprot.readI64() - self.fileIds.append(_elem751) + (_etype756, _size753) = iprot.readListBegin() + for _i757 in xrange(_size753): + _elem758 = iprot.readI64() + self.fileIds.append(_elem758) iprot.readListEnd() else: iprot.skip(ftype) @@ -17790,8 +17987,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter752 in self.fileIds: - oprot.writeI64(iter752) + for iter759 in self.fileIds: + oprot.writeI64(iter759) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17897,20 +18094,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype756, _size753) = iprot.readListBegin() - for _i757 in xrange(_size753): - _elem758 = iprot.readI64() - self.fileIds.append(_elem758) + (_etype763, _size760) = iprot.readListBegin() + for _i764 in xrange(_size760): + _elem765 = iprot.readI64() + self.fileIds.append(_elem765) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.metadata = [] - (_etype762, _size759) = iprot.readListBegin() - for _i763 in xrange(_size759): - _elem764 = iprot.readString() - self.metadata.append(_elem764) + (_etype769, _size766) = iprot.readListBegin() + for _i770 in xrange(_size766): + _elem771 = iprot.readString() + self.metadata.append(_elem771) iprot.readListEnd() else: iprot.skip(ftype) @@ -17932,15 +18129,15 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter765 in self.fileIds: - oprot.writeI64(iter765) + for iter772 in self.fileIds: + oprot.writeI64(iter772) oprot.writeListEnd() oprot.writeFieldEnd() if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.metadata)) - for iter766 in self.metadata: - oprot.writeString(iter766) + for iter773 in self.metadata: + oprot.writeString(iter773) oprot.writeListEnd() oprot.writeFieldEnd() if self.type is not None: @@ -18048,10 +18245,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype770, _size767) = iprot.readListBegin() - for _i771 in xrange(_size767): - _elem772 = iprot.readI64() - self.fileIds.append(_elem772) + (_etype777, _size774) = iprot.readListBegin() + for _i778 in xrange(_size774): + _elem779 = iprot.readI64() + self.fileIds.append(_elem779) iprot.readListEnd() else: iprot.skip(ftype) @@ -18068,8 +18265,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter773 in self.fileIds: - oprot.writeI64(iter773) + for iter780 in self.fileIds: + oprot.writeI64(iter780) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18298,11 +18495,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.functions = [] - (_etype777, _size774) = iprot.readListBegin() - for _i778 in xrange(_size774): - _elem779 = Function() - _elem779.read(iprot) - self.functions.append(_elem779) + (_etype784, _size781) = iprot.readListBegin() + for _i785 in xrange(_size781): + _elem786 = Function() + _elem786.read(iprot) + self.functions.append(_elem786) iprot.readListEnd() else: iprot.skip(ftype) @@ -18319,8 +18516,8 @@ def write(self, oprot): if self.functions is not None: oprot.writeFieldBegin('functions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.functions)) - for iter780 in self.functions: - iter780.write(oprot) + for iter787 in self.functions: + iter787.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18372,10 +18569,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.values = [] - (_etype784, _size781) = iprot.readListBegin() - for _i785 in xrange(_size781): - _elem786 = iprot.readI32() - self.values.append(_elem786) + (_etype791, _size788) = iprot.readListBegin() + for _i792 in xrange(_size788): + _elem793 = iprot.readI32() + self.values.append(_elem793) iprot.readListEnd() else: iprot.skip(ftype) @@ -18392,8 +18589,8 @@ def write(self, oprot): if self.values is not None: oprot.writeFieldBegin('values', TType.LIST, 1) oprot.writeListBegin(TType.I32, len(self.values)) - for iter787 in self.values: - oprot.writeI32(iter787) + for iter794 in self.values: + oprot.writeI32(iter794) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18500,10 +18697,10 @@ def read(self, iprot): elif fid == 8: if ftype == TType.LIST: self.processorCapabilities = [] - (_etype791, _size788) = iprot.readListBegin() - for _i792 in xrange(_size788): - _elem793 = iprot.readString() - self.processorCapabilities.append(_elem793) + (_etype798, _size795) = iprot.readListBegin() + for _i799 in xrange(_size795): + _elem800 = iprot.readString() + self.processorCapabilities.append(_elem800) iprot.readListEnd() else: iprot.skip(ftype) @@ -18549,8 +18746,8 @@ def write(self, oprot): if self.processorCapabilities is not None: oprot.writeFieldBegin('processorCapabilities', TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) - for iter794 in self.processorCapabilities: - oprot.writeString(iter794) + for iter801 in self.processorCapabilities: + oprot.writeString(iter801) oprot.writeListEnd() oprot.writeFieldEnd() if self.processorIdentifier is not None: @@ -18718,10 +18915,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tblNames = [] - (_etype798, _size795) = iprot.readListBegin() - for _i799 in xrange(_size795): - _elem800 = iprot.readString() - self.tblNames.append(_elem800) + (_etype805, _size802) = iprot.readListBegin() + for _i806 in xrange(_size802): + _elem807 = iprot.readString() + self.tblNames.append(_elem807) iprot.readListEnd() else: iprot.skip(ftype) @@ -18739,10 +18936,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.processorCapabilities = [] - (_etype804, _size801) = iprot.readListBegin() - for _i805 in xrange(_size801): - _elem806 = iprot.readString() - self.processorCapabilities.append(_elem806) + (_etype811, _size808) = iprot.readListBegin() + for _i812 in xrange(_size808): + _elem813 = iprot.readString() + self.processorCapabilities.append(_elem813) iprot.readListEnd() else: iprot.skip(ftype) @@ -18768,8 +18965,8 @@ def write(self, oprot): if self.tblNames is not None: oprot.writeFieldBegin('tblNames', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tblNames)) - for iter807 in self.tblNames: - oprot.writeString(iter807) + for iter814 in self.tblNames: + oprot.writeString(iter814) oprot.writeListEnd() oprot.writeFieldEnd() if self.capabilities is not None: @@ -18783,8 +18980,8 @@ def write(self, oprot): if self.processorCapabilities is not None: oprot.writeFieldBegin('processorCapabilities', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) - for iter808 in self.processorCapabilities: - oprot.writeString(iter808) + for iter815 in self.processorCapabilities: + oprot.writeString(iter815) oprot.writeListEnd() oprot.writeFieldEnd() if self.processorIdentifier is not None: @@ -18847,11 +19044,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tables = [] - (_etype812, _size809) = iprot.readListBegin() - for _i813 in xrange(_size809): - _elem814 = Table() - _elem814.read(iprot) - self.tables.append(_elem814) + (_etype819, _size816) = iprot.readListBegin() + for _i820 in xrange(_size816): + _elem821 = Table() + _elem821.read(iprot) + self.tables.append(_elem821) iprot.readListEnd() else: iprot.skip(ftype) @@ -18868,8 +19065,8 @@ def write(self, oprot): if self.tables is not None: oprot.writeFieldBegin('tables', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tables)) - for iter815 in self.tables: - iter815.write(oprot) + for iter822 in self.tables: + iter822.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18966,10 +19163,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.processorCapabilities = [] - (_etype819, _size816) = iprot.readListBegin() - for _i820 in xrange(_size816): - _elem821 = iprot.readString() - self.processorCapabilities.append(_elem821) + (_etype826, _size823) = iprot.readListBegin() + for _i827 in xrange(_size823): + _elem828 = iprot.readString() + self.processorCapabilities.append(_elem828) iprot.readListEnd() else: iprot.skip(ftype) @@ -19011,8 +19208,8 @@ def write(self, oprot): if self.processorCapabilities is not None: oprot.writeFieldBegin('processorCapabilities', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) - for iter822 in self.processorCapabilities: - oprot.writeString(iter822) + for iter829 in self.processorCapabilities: + oprot.writeString(iter829) oprot.writeListEnd() oprot.writeFieldEnd() if self.processorIdentifier is not None: @@ -19098,10 +19295,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.processorCapabilities = [] - (_etype826, _size823) = iprot.readListBegin() - for _i827 in xrange(_size823): - _elem828 = iprot.readString() - self.processorCapabilities.append(_elem828) + (_etype833, _size830) = iprot.readListBegin() + for _i834 in xrange(_size830): + _elem835 = iprot.readString() + self.processorCapabilities.append(_elem835) iprot.readListEnd() else: iprot.skip(ftype) @@ -19126,8 +19323,8 @@ def write(self, oprot): if self.processorCapabilities is not None: oprot.writeFieldBegin('processorCapabilities', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) - for iter829 in self.processorCapabilities: - oprot.writeString(iter829) + for iter836 in self.processorCapabilities: + oprot.writeString(iter836) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20428,44 +20625,44 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.pools = [] - (_etype833, _size830) = iprot.readListBegin() - for _i834 in xrange(_size830): - _elem835 = WMPool() - _elem835.read(iprot) - self.pools.append(_elem835) + (_etype840, _size837) = iprot.readListBegin() + for _i841 in xrange(_size837): + _elem842 = WMPool() + _elem842.read(iprot) + self.pools.append(_elem842) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.mappings = [] - (_etype839, _size836) = iprot.readListBegin() - for _i840 in xrange(_size836): - _elem841 = WMMapping() - _elem841.read(iprot) - self.mappings.append(_elem841) + (_etype846, _size843) = iprot.readListBegin() + for _i847 in xrange(_size843): + _elem848 = WMMapping() + _elem848.read(iprot) + self.mappings.append(_elem848) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.triggers = [] - (_etype845, _size842) = iprot.readListBegin() - for _i846 in xrange(_size842): - _elem847 = WMTrigger() - _elem847.read(iprot) - self.triggers.append(_elem847) + (_etype852, _size849) = iprot.readListBegin() + for _i853 in xrange(_size849): + _elem854 = WMTrigger() + _elem854.read(iprot) + self.triggers.append(_elem854) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.poolTriggers = [] - (_etype851, _size848) = iprot.readListBegin() - for _i852 in xrange(_size848): - _elem853 = WMPoolTrigger() - _elem853.read(iprot) - self.poolTriggers.append(_elem853) + (_etype858, _size855) = iprot.readListBegin() + for _i859 in xrange(_size855): + _elem860 = WMPoolTrigger() + _elem860.read(iprot) + self.poolTriggers.append(_elem860) iprot.readListEnd() else: iprot.skip(ftype) @@ -20486,29 +20683,29 @@ def write(self, oprot): if self.pools is not None: oprot.writeFieldBegin('pools', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.pools)) - for iter854 in self.pools: - iter854.write(oprot) + for iter861 in self.pools: + iter861.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.mappings is not None: oprot.writeFieldBegin('mappings', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.mappings)) - for iter855 in self.mappings: - iter855.write(oprot) + for iter862 in self.mappings: + iter862.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.triggers is not None: oprot.writeFieldBegin('triggers', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.triggers)) - for iter856 in self.triggers: - iter856.write(oprot) + for iter863 in self.triggers: + iter863.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.poolTriggers is not None: oprot.writeFieldBegin('poolTriggers', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.poolTriggers)) - for iter857 in self.poolTriggers: - iter857.write(oprot) + for iter864 in self.poolTriggers: + iter864.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21033,11 +21230,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.resourcePlans = [] - (_etype861, _size858) = iprot.readListBegin() - for _i862 in xrange(_size858): - _elem863 = WMResourcePlan() - _elem863.read(iprot) - self.resourcePlans.append(_elem863) + (_etype868, _size865) = iprot.readListBegin() + for _i869 in xrange(_size865): + _elem870 = WMResourcePlan() + _elem870.read(iprot) + self.resourcePlans.append(_elem870) iprot.readListEnd() else: iprot.skip(ftype) @@ -21054,8 +21251,8 @@ def write(self, oprot): if self.resourcePlans is not None: oprot.writeFieldBegin('resourcePlans', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.resourcePlans)) - for iter864 in self.resourcePlans: - iter864.write(oprot) + for iter871 in self.resourcePlans: + iter871.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21385,20 +21582,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.errors = [] - (_etype868, _size865) = iprot.readListBegin() - for _i869 in xrange(_size865): - _elem870 = iprot.readString() - self.errors.append(_elem870) + (_etype875, _size872) = iprot.readListBegin() + for _i876 in xrange(_size872): + _elem877 = iprot.readString() + self.errors.append(_elem877) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.warnings = [] - (_etype874, _size871) = iprot.readListBegin() - for _i875 in xrange(_size871): - _elem876 = iprot.readString() - self.warnings.append(_elem876) + (_etype881, _size878) = iprot.readListBegin() + for _i882 in xrange(_size878): + _elem883 = iprot.readString() + self.warnings.append(_elem883) iprot.readListEnd() else: iprot.skip(ftype) @@ -21415,15 +21612,15 @@ def write(self, oprot): if self.errors is not None: oprot.writeFieldBegin('errors', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.errors)) - for iter877 in self.errors: - oprot.writeString(iter877) + for iter884 in self.errors: + oprot.writeString(iter884) oprot.writeListEnd() oprot.writeFieldEnd() if self.warnings is not None: oprot.writeFieldBegin('warnings', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.warnings)) - for iter878 in self.warnings: - oprot.writeString(iter878) + for iter885 in self.warnings: + oprot.writeString(iter885) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22039,11 +22236,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.triggers = [] - (_etype882, _size879) = iprot.readListBegin() - for _i883 in xrange(_size879): - _elem884 = WMTrigger() - _elem884.read(iprot) - self.triggers.append(_elem884) + (_etype889, _size886) = iprot.readListBegin() + for _i890 in xrange(_size886): + _elem891 = WMTrigger() + _elem891.read(iprot) + self.triggers.append(_elem891) iprot.readListEnd() else: iprot.skip(ftype) @@ -22060,8 +22257,8 @@ def write(self, oprot): if self.triggers is not None: oprot.writeFieldBegin('triggers', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.triggers)) - for iter885 in self.triggers: - iter885.write(oprot) + for iter892 in self.triggers: + iter892.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23271,11 +23468,11 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.cols = [] - (_etype889, _size886) = iprot.readListBegin() - for _i890 in xrange(_size886): - _elem891 = FieldSchema() - _elem891.read(iprot) - self.cols.append(_elem891) + (_etype896, _size893) = iprot.readListBegin() + for _i897 in xrange(_size893): + _elem898 = FieldSchema() + _elem898.read(iprot) + self.cols.append(_elem898) iprot.readListEnd() else: iprot.skip(ftype) @@ -23335,8 +23532,8 @@ def write(self, oprot): if self.cols is not None: oprot.writeFieldBegin('cols', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.cols)) - for iter892 in self.cols: - iter892.write(oprot) + for iter899 in self.cols: + iter899.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.state is not None: @@ -23591,11 +23788,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.schemaVersions = [] - (_etype896, _size893) = iprot.readListBegin() - for _i897 in xrange(_size893): - _elem898 = SchemaVersionDescriptor() - _elem898.read(iprot) - self.schemaVersions.append(_elem898) + (_etype903, _size900) = iprot.readListBegin() + for _i904 in xrange(_size900): + _elem905 = SchemaVersionDescriptor() + _elem905.read(iprot) + self.schemaVersions.append(_elem905) iprot.readListEnd() else: iprot.skip(ftype) @@ -23612,8 +23809,8 @@ def write(self, oprot): if self.schemaVersions is not None: oprot.writeFieldBegin('schemaVersions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.schemaVersions)) - for iter899 in self.schemaVersions: - iter899.write(oprot) + for iter906 in self.schemaVersions: + iter906.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -24098,11 +24295,11 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.partitions = [] - (_etype903, _size900) = iprot.readListBegin() - for _i904 in xrange(_size900): - _elem905 = Partition() - _elem905.read(iprot) - self.partitions.append(_elem905) + (_etype910, _size907) = iprot.readListBegin() + for _i911 in xrange(_size907): + _elem912 = Partition() + _elem912.read(iprot) + self.partitions.append(_elem912) iprot.readListEnd() else: iprot.skip(ftype) @@ -24147,8 +24344,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter906 in self.partitions: - iter906.write(oprot) + for iter913 in self.partitions: + iter913.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environmentContext is not None: @@ -24300,10 +24497,10 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.partVals = [] - (_etype910, _size907) = iprot.readListBegin() - for _i911 in xrange(_size907): - _elem912 = iprot.readString() - self.partVals.append(_elem912) + (_etype917, _size914) = iprot.readListBegin() + for _i918 in xrange(_size914): + _elem919 = iprot.readString() + self.partVals.append(_elem919) iprot.readListEnd() else: iprot.skip(ftype) @@ -24343,8 +24540,8 @@ def write(self, oprot): if self.partVals is not None: oprot.writeFieldBegin('partVals', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.partVals)) - for iter913 in self.partVals: - oprot.writeString(iter913) + for iter920 in self.partVals: + oprot.writeString(iter920) oprot.writeListEnd() oprot.writeFieldEnd() if self.newPart is not None: @@ -24666,10 +24863,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fieldList = [] - (_etype917, _size914) = iprot.readListBegin() - for _i918 in xrange(_size914): - _elem919 = iprot.readString() - self.fieldList.append(_elem919) + (_etype924, _size921) = iprot.readListBegin() + for _i925 in xrange(_size921): + _elem926 = iprot.readString() + self.fieldList.append(_elem926) iprot.readListEnd() else: iprot.skip(ftype) @@ -24696,8 +24893,8 @@ def write(self, oprot): if self.fieldList is not None: oprot.writeFieldBegin('fieldList', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.fieldList)) - for iter920 in self.fieldList: - oprot.writeString(iter920) + for iter927 in self.fieldList: + oprot.writeString(iter927) oprot.writeListEnd() oprot.writeFieldEnd() if self.includeParamKeyPattern is not None: @@ -24773,10 +24970,10 @@ def read(self, iprot): elif fid == 8: if ftype == TType.LIST: self.filters = [] - (_etype924, _size921) = iprot.readListBegin() - for _i925 in xrange(_size921): - _elem926 = iprot.readString() - self.filters.append(_elem926) + (_etype931, _size928) = iprot.readListBegin() + for _i932 in xrange(_size928): + _elem933 = iprot.readString() + self.filters.append(_elem933) iprot.readListEnd() else: iprot.skip(ftype) @@ -24797,8 +24994,8 @@ def write(self, oprot): if self.filters is not None: oprot.writeFieldBegin('filters', TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.filters)) - for iter927 in self.filters: - oprot.writeString(iter927) + for iter934 in self.filters: + oprot.writeString(iter934) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -24851,11 +25048,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitionSpec = [] - (_etype931, _size928) = iprot.readListBegin() - for _i932 in xrange(_size928): - _elem933 = PartitionSpec() - _elem933.read(iprot) - self.partitionSpec.append(_elem933) + (_etype938, _size935) = iprot.readListBegin() + for _i939 in xrange(_size935): + _elem940 = PartitionSpec() + _elem940.read(iprot) + self.partitionSpec.append(_elem940) iprot.readListEnd() else: iprot.skip(ftype) @@ -24872,8 +25069,8 @@ def write(self, oprot): if self.partitionSpec is not None: oprot.writeFieldBegin('partitionSpec', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitionSpec)) - for iter934 in self.partitionSpec: - iter934.write(oprot) + for iter941 in self.partitionSpec: + iter941.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -24912,6 +25109,7 @@ class GetPartitionsRequest: - filterSpec - processorCapabilities - processorIdentifier + - validWriteIdList """ thrift_spec = ( @@ -24926,9 +25124,10 @@ class GetPartitionsRequest: (8, TType.STRUCT, 'filterSpec', (GetPartitionsFilterSpec, GetPartitionsFilterSpec.thrift_spec), None, ), # 8 (9, TType.LIST, 'processorCapabilities', (TType.STRING,None), None, ), # 9 (10, TType.STRING, 'processorIdentifier', None, None, ), # 10 + (11, TType.STRING, 'validWriteIdList', None, None, ), # 11 ) - def __init__(self, catName=None, dbName=None, tblName=None, withAuth=None, user=None, groupNames=None, projectionSpec=None, filterSpec=None, processorCapabilities=None, processorIdentifier=None,): + def __init__(self, catName=None, dbName=None, tblName=None, withAuth=None, user=None, groupNames=None, projectionSpec=None, filterSpec=None, processorCapabilities=None, processorIdentifier=None, validWriteIdList=None,): self.catName = catName self.dbName = dbName self.tblName = tblName @@ -24939,6 +25138,7 @@ def __init__(self, catName=None, dbName=None, tblName=None, withAuth=None, user= self.filterSpec = filterSpec self.processorCapabilities = processorCapabilities self.processorIdentifier = processorIdentifier + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -24977,10 +25177,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.groupNames = [] - (_etype938, _size935) = iprot.readListBegin() - for _i939 in xrange(_size935): - _elem940 = iprot.readString() - self.groupNames.append(_elem940) + (_etype945, _size942) = iprot.readListBegin() + for _i946 in xrange(_size942): + _elem947 = iprot.readString() + self.groupNames.append(_elem947) iprot.readListEnd() else: iprot.skip(ftype) @@ -24999,10 +25199,10 @@ def read(self, iprot): elif fid == 9: if ftype == TType.LIST: self.processorCapabilities = [] - (_etype944, _size941) = iprot.readListBegin() - for _i945 in xrange(_size941): - _elem946 = iprot.readString() - self.processorCapabilities.append(_elem946) + (_etype951, _size948) = iprot.readListBegin() + for _i952 in xrange(_size948): + _elem953 = iprot.readString() + self.processorCapabilities.append(_elem953) iprot.readListEnd() else: iprot.skip(ftype) @@ -25011,6 +25211,11 @@ def read(self, iprot): self.processorIdentifier = iprot.readString() else: iprot.skip(ftype) + elif fid == 11: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -25044,8 +25249,8 @@ def write(self, oprot): if self.groupNames is not None: oprot.writeFieldBegin('groupNames', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.groupNames)) - for iter947 in self.groupNames: - oprot.writeString(iter947) + for iter954 in self.groupNames: + oprot.writeString(iter954) oprot.writeListEnd() oprot.writeFieldEnd() if self.projectionSpec is not None: @@ -25059,14 +25264,18 @@ def write(self, oprot): if self.processorCapabilities is not None: oprot.writeFieldBegin('processorCapabilities', TType.LIST, 9) oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) - for iter948 in self.processorCapabilities: - oprot.writeString(iter948) + for iter955 in self.processorCapabilities: + oprot.writeString(iter955) oprot.writeListEnd() oprot.writeFieldEnd() if self.processorIdentifier is not None: oprot.writeFieldBegin('processorIdentifier', TType.STRING, 10) oprot.writeString(self.processorIdentifier) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 11) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -25086,6 +25295,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.filterSpec) value = (value * 31) ^ hash(self.processorCapabilities) value = (value * 31) ^ hash(self.processorIdentifier) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb index e7a121a424..cd98e218dd 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -2176,6 +2176,7 @@ class PartitionsByExprRequest DEFAULTPARTITIONNAME = 4 MAXPARTS = 5 CATNAME = 6 + VALIDWRITEIDLIST = 7 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, @@ -2183,7 +2184,8 @@ class PartitionsByExprRequest EXPR => {:type => ::Thrift::Types::STRING, :name => 'expr', :binary => true}, DEFAULTPARTITIONNAME => {:type => ::Thrift::Types::STRING, :name => 'defaultPartitionName', :optional => true}, MAXPARTS => {:type => ::Thrift::Types::I16, :name => 'maxParts', :default => -1, :optional => true}, - CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } def struct_fields; FIELDS; end @@ -2452,6 +2454,7 @@ class PartitionValuesRequest ASCENDING = 7 MAXPARTS = 8 CATNAME = 9 + VALIDWRITEIDLIST = 10 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, @@ -2462,7 +2465,8 @@ class PartitionValuesRequest PARTITIONORDER => {:type => ::Thrift::Types::LIST, :name => 'partitionOrder', :element => {:type => ::Thrift::Types::STRUCT, :class => ::FieldSchema}, :optional => true}, ASCENDING => {:type => ::Thrift::Types::BOOL, :name => 'ascending', :default => true, :optional => true}, MAXPARTS => {:type => ::Thrift::Types::I64, :name => 'maxParts', :default => -1, :optional => true}, - CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } def struct_fields; FIELDS; end @@ -2518,6 +2522,7 @@ class GetPartitionsByNamesRequest GET_COL_STATS = 4 PROCESSORCAPABILITIES = 5 PROCESSORIDENTIFIER = 6 + VALIDWRITEIDLIST = 7 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, @@ -2525,7 +2530,8 @@ class GetPartitionsByNamesRequest NAMES => {:type => ::Thrift::Types::LIST, :name => 'names', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, GET_COL_STATS => {:type => ::Thrift::Types::BOOL, :name => 'get_col_stats', :optional => true}, PROCESSORCAPABILITIES => {:type => ::Thrift::Types::LIST, :name => 'processorCapabilities', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, - PROCESSORIDENTIFIER => {:type => ::Thrift::Types::STRING, :name => 'processorIdentifier', :optional => true} + PROCESSORIDENTIFIER => {:type => ::Thrift::Types::STRING, :name => 'processorIdentifier', :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } def struct_fields; FIELDS; end @@ -2971,6 +2977,43 @@ class TableValidWriteIds ::Thrift::Struct.generate_accessors self end +class TableWriteId + include ::Thrift::Struct, ::Thrift::Struct_Union + FULLTABLENAME = 1 + WRITEID = 2 + + FIELDS = { + FULLTABLENAME => {:type => ::Thrift::Types::STRING, :name => 'fullTableName'}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field fullTableName is unset!') unless @fullTableName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field writeId is unset!') unless @writeId + end + + ::Thrift::Struct.generate_accessors self +end + +class GetTxnTableWriteIdsResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + TABLEWRITEIDS = 1 + + FIELDS = { + TABLEWRITEIDS => {:type => ::Thrift::Types::LIST, :name => 'tableWriteIds', :element => {:type => ::Thrift::Types::STRUCT, :class => ::TableWriteId}} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableWriteIds is unset!') unless @tableWriteIds + end + + ::Thrift::Struct.generate_accessors self +end + class GetValidWriteIdsResponse include ::Thrift::Struct, ::Thrift::Struct_Union TBLVALIDWRITEIDS = 1 @@ -5604,6 +5647,7 @@ class GetPartitionsRequest FILTERSPEC = 8 PROCESSORCAPABILITIES = 9 PROCESSORIDENTIFIER = 10 + VALIDWRITEIDLIST = 11 FIELDS = { CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, @@ -5615,7 +5659,8 @@ class GetPartitionsRequest PROJECTIONSPEC => {:type => ::Thrift::Types::STRUCT, :name => 'projectionSpec', :class => ::GetPartitionsProjectionSpec}, FILTERSPEC => {:type => ::Thrift::Types::STRUCT, :name => 'filterSpec', :class => ::GetPartitionsFilterSpec}, PROCESSORCAPABILITIES => {:type => ::Thrift::Types::LIST, :name => 'processorCapabilities', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, - PROCESSORIDENTIFIER => {:type => ::Thrift::Types::STRING, :name => 'processorIdentifier', :optional => true} + PROCESSORIDENTIFIER => {:type => ::Thrift::Types::STRING, :name => 'processorIdentifier', :optional => true}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true} } def struct_fields; FIELDS; end diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index b8b725bbac..cd380875b7 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -294,13 +294,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_type_all failed: unknown result') end - def get_fields(db_name, table_name) - send_get_fields(db_name, table_name) + def get_fields(db_name, table_name, validWriteIdList) + send_get_fields(db_name, table_name, validWriteIdList) return recv_get_fields() end - def send_get_fields(db_name, table_name) - send_message('get_fields', Get_fields_args, :db_name => db_name, :table_name => table_name) + def send_get_fields(db_name, table_name, validWriteIdList) + send_message('get_fields', Get_fields_args, :db_name => db_name, :table_name => table_name, :validWriteIdList => validWriteIdList) end def recv_get_fields() @@ -312,13 +312,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_fields failed: unknown result') end - def get_fields_with_environment_context(db_name, table_name, environment_context) - send_get_fields_with_environment_context(db_name, table_name, environment_context) + def get_fields_with_environment_context(db_name, table_name, environment_context, validWriteIdList) + send_get_fields_with_environment_context(db_name, table_name, environment_context, validWriteIdList) return recv_get_fields_with_environment_context() end - def send_get_fields_with_environment_context(db_name, table_name, environment_context) - send_message('get_fields_with_environment_context', Get_fields_with_environment_context_args, :db_name => db_name, :table_name => table_name, :environment_context => environment_context) + def send_get_fields_with_environment_context(db_name, table_name, environment_context, validWriteIdList) + send_message('get_fields_with_environment_context', Get_fields_with_environment_context_args, :db_name => db_name, :table_name => table_name, :environment_context => environment_context, :validWriteIdList => validWriteIdList) end def recv_get_fields_with_environment_context() @@ -330,13 +330,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_fields_with_environment_context failed: unknown result') end - def get_schema(db_name, table_name) - send_get_schema(db_name, table_name) + def get_schema(db_name, table_name, validWriteIdList) + send_get_schema(db_name, table_name, validWriteIdList) return recv_get_schema() end - def send_get_schema(db_name, table_name) - send_message('get_schema', Get_schema_args, :db_name => db_name, :table_name => table_name) + def send_get_schema(db_name, table_name, validWriteIdList) + send_message('get_schema', Get_schema_args, :db_name => db_name, :table_name => table_name, :validWriteIdList => validWriteIdList) end def recv_get_schema() @@ -348,13 +348,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_schema failed: unknown result') end - def get_schema_with_environment_context(db_name, table_name, environment_context) - send_get_schema_with_environment_context(db_name, table_name, environment_context) + def get_schema_with_environment_context(db_name, table_name, environment_context, validWriteIdList) + send_get_schema_with_environment_context(db_name, table_name, environment_context, validWriteIdList) return recv_get_schema_with_environment_context() end - def send_get_schema_with_environment_context(db_name, table_name, environment_context) - send_message('get_schema_with_environment_context', Get_schema_with_environment_context_args, :db_name => db_name, :table_name => table_name, :environment_context => environment_context) + def send_get_schema_with_environment_context(db_name, table_name, environment_context, validWriteIdList) + send_message('get_schema_with_environment_context', Get_schema_with_environment_context_args, :db_name => db_name, :table_name => table_name, :environment_context => environment_context, :validWriteIdList => validWriteIdList) end def recv_get_schema_with_environment_context() @@ -691,13 +691,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_all_tables failed: unknown result') end - def get_table(dbname, tbl_name) - send_get_table(dbname, tbl_name) + def get_table(dbname, tbl_name, validWriteIdList) + send_get_table(dbname, tbl_name, validWriteIdList) return recv_get_table() end - def send_get_table(dbname, tbl_name) - send_message('get_table', Get_table_args, :dbname => dbname, :tbl_name => tbl_name) + def send_get_table(dbname, tbl_name, validWriteIdList) + send_message('get_table', Get_table_args, :dbname => dbname, :tbl_name => tbl_name, :validWriteIdList => validWriteIdList) end def recv_get_table() @@ -1139,13 +1139,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'drop_partitions_req failed: unknown result') end - def get_partition(db_name, tbl_name, part_vals) - send_get_partition(db_name, tbl_name, part_vals) + def get_partition(db_name, tbl_name, part_vals, validTxnList) + send_get_partition(db_name, tbl_name, part_vals, validTxnList) return recv_get_partition() end - def send_get_partition(db_name, tbl_name, part_vals) - send_message('get_partition', Get_partition_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals) + def send_get_partition(db_name, tbl_name, part_vals, validTxnList) + send_message('get_partition', Get_partition_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :validTxnList => validTxnList) end def recv_get_partition() @@ -1194,13 +1194,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'exchange_partitions failed: unknown result') end - def get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names) - send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names) + def get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names, validTxnList) + send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names, validTxnList) return recv_get_partition_with_auth() end - def send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names) - send_message('get_partition_with_auth', Get_partition_with_auth_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :user_name => user_name, :group_names => group_names) + def send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names, validTxnList) + send_message('get_partition_with_auth', Get_partition_with_auth_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :user_name => user_name, :group_names => group_names, :validTxnList => validTxnList) end def recv_get_partition_with_auth() @@ -1211,13 +1211,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition_with_auth failed: unknown result') end - def get_partition_by_name(db_name, tbl_name, part_name) - send_get_partition_by_name(db_name, tbl_name, part_name) + def get_partition_by_name(db_name, tbl_name, part_name, validTxnList) + send_get_partition_by_name(db_name, tbl_name, part_name, validTxnList) return recv_get_partition_by_name() end - def send_get_partition_by_name(db_name, tbl_name, part_name) - send_message('get_partition_by_name', Get_partition_by_name_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name) + def send_get_partition_by_name(db_name, tbl_name, part_name, validTxnList) + send_message('get_partition_by_name', Get_partition_by_name_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name, :validTxnList => validTxnList) end def recv_get_partition_by_name() @@ -1228,13 +1228,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition_by_name failed: unknown result') end - def get_partitions(db_name, tbl_name, max_parts) - send_get_partitions(db_name, tbl_name, max_parts) + def get_partitions(db_name, tbl_name, max_parts, validTxnList) + send_get_partitions(db_name, tbl_name, max_parts, validTxnList) return recv_get_partitions() end - def send_get_partitions(db_name, tbl_name, max_parts) - send_message('get_partitions', Get_partitions_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts) + def send_get_partitions(db_name, tbl_name, max_parts, validTxnList) + send_message('get_partitions', Get_partitions_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts, :validTxnList => validTxnList) end def recv_get_partitions() @@ -1245,13 +1245,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions failed: unknown result') end - def get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names) - send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names) + def get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names, validTxnList) + send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names, validTxnList) return recv_get_partitions_with_auth() end - def send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names) - send_message('get_partitions_with_auth', Get_partitions_with_auth_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts, :user_name => user_name, :group_names => group_names) + def send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names, validTxnList) + send_message('get_partitions_with_auth', Get_partitions_with_auth_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts, :user_name => user_name, :group_names => group_names, :validTxnList => validTxnList) end def recv_get_partitions_with_auth() @@ -1262,13 +1262,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_with_auth failed: unknown result') end - def get_partitions_pspec(db_name, tbl_name, max_parts) - send_get_partitions_pspec(db_name, tbl_name, max_parts) + def get_partitions_pspec(db_name, tbl_name, max_parts, validTxnList) + send_get_partitions_pspec(db_name, tbl_name, max_parts, validTxnList) return recv_get_partitions_pspec() end - def send_get_partitions_pspec(db_name, tbl_name, max_parts) - send_message('get_partitions_pspec', Get_partitions_pspec_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts) + def send_get_partitions_pspec(db_name, tbl_name, max_parts, validTxnList) + send_message('get_partitions_pspec', Get_partitions_pspec_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts, :validTxnList => validTxnList) end def recv_get_partitions_pspec() @@ -1279,13 +1279,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_pspec failed: unknown result') end - def get_partition_names(db_name, tbl_name, max_parts) - send_get_partition_names(db_name, tbl_name, max_parts) + def get_partition_names(db_name, tbl_name, max_parts, validTxnList) + send_get_partition_names(db_name, tbl_name, max_parts, validTxnList) return recv_get_partition_names() end - def send_get_partition_names(db_name, tbl_name, max_parts) - send_message('get_partition_names', Get_partition_names_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts) + def send_get_partition_names(db_name, tbl_name, max_parts, validTxnList) + send_message('get_partition_names', Get_partition_names_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts, :validTxnList => validTxnList) end def recv_get_partition_names() @@ -1313,13 +1313,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition_values failed: unknown result') end - def get_partitions_ps(db_name, tbl_name, part_vals, max_parts) - send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts) + def get_partitions_ps(db_name, tbl_name, part_vals, max_parts, validTxnList) + send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts, validTxnList) return recv_get_partitions_ps() end - def send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts) - send_message('get_partitions_ps', Get_partitions_ps_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :max_parts => max_parts) + def send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts, validTxnList) + send_message('get_partitions_ps', Get_partitions_ps_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :max_parts => max_parts, :validTxnList => validTxnList) end def recv_get_partitions_ps() @@ -1330,13 +1330,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_ps failed: unknown result') end - def get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names) - send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names) + def get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names, validTxnList) + send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names, validTxnList) return recv_get_partitions_ps_with_auth() end - def send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names) - send_message('get_partitions_ps_with_auth', Get_partitions_ps_with_auth_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :max_parts => max_parts, :user_name => user_name, :group_names => group_names) + def send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names, validTxnList) + send_message('get_partitions_ps_with_auth', Get_partitions_ps_with_auth_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :max_parts => max_parts, :user_name => user_name, :group_names => group_names, :validTxnList => validTxnList) end def recv_get_partitions_ps_with_auth() @@ -1347,13 +1347,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_ps_with_auth failed: unknown result') end - def get_partition_names_ps(db_name, tbl_name, part_vals, max_parts) - send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts) + def get_partition_names_ps(db_name, tbl_name, part_vals, max_parts, validTxnList) + send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts, validTxnList) return recv_get_partition_names_ps() end - def send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts) - send_message('get_partition_names_ps', Get_partition_names_ps_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :max_parts => max_parts) + def send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts, validTxnList) + send_message('get_partition_names_ps', Get_partition_names_ps_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :max_parts => max_parts, :validTxnList => validTxnList) end def recv_get_partition_names_ps() @@ -1364,13 +1364,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition_names_ps failed: unknown result') end - def get_partitions_by_filter(db_name, tbl_name, filter, max_parts) - send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts) + def get_partitions_by_filter(db_name, tbl_name, filter, max_parts, validTxnList) + send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts, validTxnList) return recv_get_partitions_by_filter() end - def send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts) - send_message('get_partitions_by_filter', Get_partitions_by_filter_args, :db_name => db_name, :tbl_name => tbl_name, :filter => filter, :max_parts => max_parts) + def send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts, validTxnList) + send_message('get_partitions_by_filter', Get_partitions_by_filter_args, :db_name => db_name, :tbl_name => tbl_name, :filter => filter, :max_parts => max_parts, :validTxnList => validTxnList) end def recv_get_partitions_by_filter() @@ -1381,13 +1381,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_by_filter failed: unknown result') end - def get_part_specs_by_filter(db_name, tbl_name, filter, max_parts) - send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts) + def get_part_specs_by_filter(db_name, tbl_name, filter, max_parts, validTxnList) + send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts, validTxnList) return recv_get_part_specs_by_filter() end - def send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts) - send_message('get_part_specs_by_filter', Get_part_specs_by_filter_args, :db_name => db_name, :tbl_name => tbl_name, :filter => filter, :max_parts => max_parts) + def send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts, validTxnList) + send_message('get_part_specs_by_filter', Get_part_specs_by_filter_args, :db_name => db_name, :tbl_name => tbl_name, :filter => filter, :max_parts => max_parts, :validTxnList => validTxnList) end def recv_get_part_specs_by_filter() @@ -1415,13 +1415,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_by_expr failed: unknown result') end - def get_num_partitions_by_filter(db_name, tbl_name, filter) - send_get_num_partitions_by_filter(db_name, tbl_name, filter) + def get_num_partitions_by_filter(db_name, tbl_name, filter, validTxnList) + send_get_num_partitions_by_filter(db_name, tbl_name, filter, validTxnList) return recv_get_num_partitions_by_filter() end - def send_get_num_partitions_by_filter(db_name, tbl_name, filter) - send_message('get_num_partitions_by_filter', Get_num_partitions_by_filter_args, :db_name => db_name, :tbl_name => tbl_name, :filter => filter) + def send_get_num_partitions_by_filter(db_name, tbl_name, filter, validTxnList) + send_message('get_num_partitions_by_filter', Get_num_partitions_by_filter_args, :db_name => db_name, :tbl_name => tbl_name, :filter => filter, :validTxnList => validTxnList) end def recv_get_num_partitions_by_filter() @@ -1432,13 +1432,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_num_partitions_by_filter failed: unknown result') end - def get_partitions_by_names(db_name, tbl_name, names) - send_get_partitions_by_names(db_name, tbl_name, names) + def get_partitions_by_names(db_name, tbl_name, names, validTxnList) + send_get_partitions_by_names(db_name, tbl_name, names, validTxnList) return recv_get_partitions_by_names() end - def send_get_partitions_by_names(db_name, tbl_name, names) - send_message('get_partitions_by_names', Get_partitions_by_names_args, :db_name => db_name, :tbl_name => tbl_name, :names => names) + def send_get_partitions_by_names(db_name, tbl_name, names, validTxnList) + send_message('get_partitions_by_names', Get_partitions_by_names_args, :db_name => db_name, :tbl_name => tbl_name, :names => names, :validTxnList => validTxnList) end def recv_get_partitions_by_names() @@ -1863,13 +1863,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'update_partition_column_statistics_req failed: unknown result') end - def get_table_column_statistics(db_name, tbl_name, col_name) - send_get_table_column_statistics(db_name, tbl_name, col_name) + def get_table_column_statistics(db_name, tbl_name, col_name, validWriteIdList) + send_get_table_column_statistics(db_name, tbl_name, col_name, validWriteIdList) return recv_get_table_column_statistics() end - def send_get_table_column_statistics(db_name, tbl_name, col_name) - send_message('get_table_column_statistics', Get_table_column_statistics_args, :db_name => db_name, :tbl_name => tbl_name, :col_name => col_name) + def send_get_table_column_statistics(db_name, tbl_name, col_name, validWriteIdList) + send_message('get_table_column_statistics', Get_table_column_statistics_args, :db_name => db_name, :tbl_name => tbl_name, :col_name => col_name, :validWriteIdList => validWriteIdList) end def recv_get_table_column_statistics() @@ -1882,13 +1882,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_table_column_statistics failed: unknown result') end - def get_partition_column_statistics(db_name, tbl_name, part_name, col_name) - send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name) + def get_partition_column_statistics(db_name, tbl_name, part_name, col_name, validWriteIdList) + send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name, validWriteIdList) return recv_get_partition_column_statistics() end - def send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name) - send_message('get_partition_column_statistics', Get_partition_column_statistics_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name, :col_name => col_name) + def send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name, validWriteIdList) + send_message('get_partition_column_statistics', Get_partition_column_statistics_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name, :col_name => col_name, :validWriteIdList => validWriteIdList) end def recv_get_partition_column_statistics() @@ -3975,7 +3975,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_fields_args) result = Get_fields_result.new() begin - result.success = @handler.get_fields(args.db_name, args.table_name) + result.success = @handler.get_fields(args.db_name, args.table_name, args.validWriteIdList) rescue ::MetaException => o1 result.o1 = o1 rescue ::UnknownTableException => o2 @@ -3990,7 +3990,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_fields_with_environment_context_args) result = Get_fields_with_environment_context_result.new() begin - result.success = @handler.get_fields_with_environment_context(args.db_name, args.table_name, args.environment_context) + result.success = @handler.get_fields_with_environment_context(args.db_name, args.table_name, args.environment_context, args.validWriteIdList) rescue ::MetaException => o1 result.o1 = o1 rescue ::UnknownTableException => o2 @@ -4005,7 +4005,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_schema_args) result = Get_schema_result.new() begin - result.success = @handler.get_schema(args.db_name, args.table_name) + result.success = @handler.get_schema(args.db_name, args.table_name, args.validWriteIdList) rescue ::MetaException => o1 result.o1 = o1 rescue ::UnknownTableException => o2 @@ -4020,7 +4020,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_schema_with_environment_context_args) result = Get_schema_with_environment_context_result.new() begin - result.success = @handler.get_schema_with_environment_context(args.db_name, args.table_name, args.environment_context) + result.success = @handler.get_schema_with_environment_context(args.db_name, args.table_name, args.environment_context, args.validWriteIdList) rescue ::MetaException => o1 result.o1 = o1 rescue ::UnknownTableException => o2 @@ -4291,7 +4291,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_table_args) result = Get_table_result.new() begin - result.success = @handler.get_table(args.dbname, args.tbl_name) + result.success = @handler.get_table(args.dbname, args.tbl_name, args.validWriteIdList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4647,7 +4647,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partition_args) result = Get_partition_result.new() begin - result.success = @handler.get_partition(args.db_name, args.tbl_name, args.part_vals) + result.success = @handler.get_partition(args.db_name, args.tbl_name, args.part_vals, args.validTxnList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4694,7 +4694,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partition_with_auth_args) result = Get_partition_with_auth_result.new() begin - result.success = @handler.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names) + result.success = @handler.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names, args.validTxnList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4707,7 +4707,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partition_by_name_args) result = Get_partition_by_name_result.new() begin - result.success = @handler.get_partition_by_name(args.db_name, args.tbl_name, args.part_name) + result.success = @handler.get_partition_by_name(args.db_name, args.tbl_name, args.part_name, args.validTxnList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4720,7 +4720,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partitions_args) result = Get_partitions_result.new() begin - result.success = @handler.get_partitions(args.db_name, args.tbl_name, args.max_parts) + result.success = @handler.get_partitions(args.db_name, args.tbl_name, args.max_parts, args.validTxnList) rescue ::NoSuchObjectException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -4733,7 +4733,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partitions_with_auth_args) result = Get_partitions_with_auth_result.new() begin - result.success = @handler.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names) + result.success = @handler.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names, args.validTxnList) rescue ::NoSuchObjectException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -4746,7 +4746,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partitions_pspec_args) result = Get_partitions_pspec_result.new() begin - result.success = @handler.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts) + result.success = @handler.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts, args.validTxnList) rescue ::NoSuchObjectException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -4759,7 +4759,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partition_names_args) result = Get_partition_names_result.new() begin - result.success = @handler.get_partition_names(args.db_name, args.tbl_name, args.max_parts) + result.success = @handler.get_partition_names(args.db_name, args.tbl_name, args.max_parts, args.validTxnList) rescue ::NoSuchObjectException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -4785,7 +4785,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partitions_ps_args) result = Get_partitions_ps_result.new() begin - result.success = @handler.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts) + result.success = @handler.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.validTxnList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4798,7 +4798,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partitions_ps_with_auth_args) result = Get_partitions_ps_with_auth_result.new() begin - result.success = @handler.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names) + result.success = @handler.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names, args.validTxnList) rescue ::NoSuchObjectException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -4811,7 +4811,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partition_names_ps_args) result = Get_partition_names_ps_result.new() begin - result.success = @handler.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts) + result.success = @handler.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.validTxnList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4824,7 +4824,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partitions_by_filter_args) result = Get_partitions_by_filter_result.new() begin - result.success = @handler.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts) + result.success = @handler.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts, args.validTxnList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4837,7 +4837,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_part_specs_by_filter_args) result = Get_part_specs_by_filter_result.new() begin - result.success = @handler.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts) + result.success = @handler.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts, args.validTxnList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4863,7 +4863,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_num_partitions_by_filter_args) result = Get_num_partitions_by_filter_result.new() begin - result.success = @handler.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter) + result.success = @handler.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.validTxnList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4876,7 +4876,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partitions_by_names_args) result = Get_partitions_by_names_result.new() begin - result.success = @handler.get_partitions_by_names(args.db_name, args.tbl_name, args.names) + result.success = @handler.get_partitions_by_names(args.db_name, args.tbl_name, args.names, args.validTxnList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -5225,7 +5225,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_table_column_statistics_args) result = Get_table_column_statistics_result.new() begin - result.success = @handler.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name) + result.success = @handler.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name, args.validWriteIdList) rescue ::NoSuchObjectException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -5242,7 +5242,7 @@ module ThriftHiveMetastore args = read_args(iprot, Get_partition_column_statistics_args) result = Get_partition_column_statistics_result.new() begin - result.success = @handler.get_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name) + result.success = @handler.get_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name, args.validWriteIdList) rescue ::NoSuchObjectException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -7155,10 +7155,12 @@ module ThriftHiveMetastore include ::Thrift::Struct, ::Thrift::Struct_Union DB_NAME = 1 TABLE_NAME = 2 + VALIDWRITEIDLIST = 3 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, - TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'} + TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -7196,11 +7198,13 @@ module ThriftHiveMetastore DB_NAME = 1 TABLE_NAME = 2 ENVIRONMENT_CONTEXT = 3 + VALIDWRITEIDLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'}, - ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext} + ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -7237,10 +7241,12 @@ module ThriftHiveMetastore include ::Thrift::Struct, ::Thrift::Struct_Union DB_NAME = 1 TABLE_NAME = 2 + VALIDWRITEIDLIST = 3 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, - TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'} + TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -7278,11 +7284,13 @@ module ThriftHiveMetastore DB_NAME = 1 TABLE_NAME = 2 ENVIRONMENT_CONTEXT = 3 + VALIDWRITEIDLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'}, - ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext} + ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -8046,10 +8054,12 @@ module ThriftHiveMetastore include ::Thrift::Struct, ::Thrift::Struct_Union DBNAME = 1 TBL_NAME = 2 + VALIDWRITEIDLIST = 3 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, - TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'} + TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -9077,11 +9087,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 PART_VALS = 3 + VALIDTXNLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}} + PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9215,13 +9227,15 @@ module ThriftHiveMetastore PART_VALS = 3 USER_NAME = 4 GROUP_NAMES = 5 + VALIDTXNLIST = 6 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}}, USER_NAME => {:type => ::Thrift::Types::STRING, :name => 'user_name'}, - GROUP_NAMES => {:type => ::Thrift::Types::LIST, :name => 'group_names', :element => {:type => ::Thrift::Types::STRING}} + GROUP_NAMES => {:type => ::Thrift::Types::LIST, :name => 'group_names', :element => {:type => ::Thrift::Types::STRING}}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9257,11 +9271,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 PART_NAME = 3 + VALIDTXNLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - PART_NAME => {:type => ::Thrift::Types::STRING, :name => 'part_name'} + PART_NAME => {:type => ::Thrift::Types::STRING, :name => 'part_name'}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9297,11 +9313,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 MAX_PARTS = 3 + VALIDTXNLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1} + MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9339,13 +9357,15 @@ module ThriftHiveMetastore MAX_PARTS = 3 USER_NAME = 4 GROUP_NAMES = 5 + VALIDTXNLIST = 6 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1}, USER_NAME => {:type => ::Thrift::Types::STRING, :name => 'user_name'}, - GROUP_NAMES => {:type => ::Thrift::Types::LIST, :name => 'group_names', :element => {:type => ::Thrift::Types::STRING}} + GROUP_NAMES => {:type => ::Thrift::Types::LIST, :name => 'group_names', :element => {:type => ::Thrift::Types::STRING}}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9381,11 +9401,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 MAX_PARTS = 3 + VALIDTXNLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - MAX_PARTS => {:type => ::Thrift::Types::I32, :name => 'max_parts', :default => -1} + MAX_PARTS => {:type => ::Thrift::Types::I32, :name => 'max_parts', :default => -1}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9421,11 +9443,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 MAX_PARTS = 3 + VALIDTXNLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1} + MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9498,12 +9522,14 @@ module ThriftHiveMetastore TBL_NAME = 2 PART_VALS = 3 MAX_PARTS = 4 + VALIDTXNLIST = 5 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}}, - MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1} + MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9542,6 +9568,7 @@ module ThriftHiveMetastore MAX_PARTS = 4 USER_NAME = 5 GROUP_NAMES = 6 + VALIDTXNLIST = 7 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, @@ -9549,7 +9576,8 @@ module ThriftHiveMetastore PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}}, MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1}, USER_NAME => {:type => ::Thrift::Types::STRING, :name => 'user_name'}, - GROUP_NAMES => {:type => ::Thrift::Types::LIST, :name => 'group_names', :element => {:type => ::Thrift::Types::STRING}} + GROUP_NAMES => {:type => ::Thrift::Types::LIST, :name => 'group_names', :element => {:type => ::Thrift::Types::STRING}}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9586,12 +9614,14 @@ module ThriftHiveMetastore TBL_NAME = 2 PART_VALS = 3 MAX_PARTS = 4 + VALIDTXNLIST = 5 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}}, - MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1} + MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9628,12 +9658,14 @@ module ThriftHiveMetastore TBL_NAME = 2 FILTER = 3 MAX_PARTS = 4 + VALIDTXNLIST = 5 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, FILTER => {:type => ::Thrift::Types::STRING, :name => 'filter'}, - MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1} + MAX_PARTS => {:type => ::Thrift::Types::I16, :name => 'max_parts', :default => -1}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9670,12 +9702,14 @@ module ThriftHiveMetastore TBL_NAME = 2 FILTER = 3 MAX_PARTS = 4 + VALIDTXNLIST = 5 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, FILTER => {:type => ::Thrift::Types::STRING, :name => 'filter'}, - MAX_PARTS => {:type => ::Thrift::Types::I32, :name => 'max_parts', :default => -1} + MAX_PARTS => {:type => ::Thrift::Types::I32, :name => 'max_parts', :default => -1}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9747,11 +9781,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 FILTER = 3 + VALIDTXNLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - FILTER => {:type => ::Thrift::Types::STRING, :name => 'filter'} + FILTER => {:type => ::Thrift::Types::STRING, :name => 'filter'}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -9787,11 +9823,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 NAMES = 3 + VALIDTXNLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - NAMES => {:type => ::Thrift::Types::LIST, :name => 'names', :element => {:type => ::Thrift::Types::STRING}} + NAMES => {:type => ::Thrift::Types::LIST, :name => 'names', :element => {:type => ::Thrift::Types::STRING}}, + VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'} } def struct_fields; FIELDS; end @@ -10751,11 +10789,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 COL_NAME = 3 + VALIDWRITEIDLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - COL_NAME => {:type => ::Thrift::Types::STRING, :name => 'col_name'} + COL_NAME => {:type => ::Thrift::Types::STRING, :name => 'col_name'}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -10796,12 +10836,14 @@ module ThriftHiveMetastore TBL_NAME = 2 PART_NAME = 3 COL_NAME = 4 + VALIDWRITEIDLIST = 5 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, PART_NAME => {:type => ::Thrift::Types::STRING, :name => 'part_name'}, - COL_NAME => {:type => ::Thrift::Types::STRING, :name => 'col_name'} + COL_NAME => {:type => ::Thrift::Types::STRING, :name => 'col_name'}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 153f4b8fd1..af92302eb5 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -19,9 +19,7 @@ package org.apache.hadoop.hive.metastore; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; -import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.CAT_NAME; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; -import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.parseDbName; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName; import java.io.IOException; @@ -58,7 +56,9 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -145,6 +145,9 @@ private static String[] processorCapabilities; private static String processorIdentifier; + private ValidTxnWriteIdList txnWriteIdList; + private long tableId; + //copied from ErrorMsg.java private static final String REPL_EVENTS_MISSING_IN_METASTORE = "Notification events are missing in the meta store."; @@ -167,10 +170,8 @@ public HiveMetaStoreClient(Configuration conf, HiveMetaHookLoader hookLoader, Bo this.hookLoader = hookLoader; if (conf == null) { conf = MetastoreConf.newMetastoreConf(); - this.conf = conf; - } else { - this.conf = new Configuration(conf); } + this.conf = conf; version = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) ? TEST_VERSION : VERSION; filterHook = loadFilterHooks(); isClientFilterEnabled = getIfClientFilterEnabled(); @@ -732,6 +733,10 @@ public static void setProcessorIdentifier(final String id) { processorIdentifier = id; } + public void setTableId(long id) { + tableId = id; + } + @Override public void setMetaConf(String key, String value) throws TException { client.setMetaConf(key, value); @@ -1674,7 +1679,7 @@ public boolean dropType(String type) throws NoSuchObjectException, MetaException int max_parts) throws TException { // TODO should we add capabilities here as well as it returns Partition objects List parts = client.get_partitions(prependCatalogToDbName(catName, db_name, conf), - tbl_name, shrinkMaxtoShort(max_parts)); + tbl_name, shrinkMaxtoShort(max_parts), getValidWriteIdList(TableName.getDbTable(db_name, tbl_name))); return deepCopyPartitions( FilterUtils.filterPartitionsIfEnabled(isClientFilterEnabled, filterHook, parts)); } @@ -1688,7 +1693,7 @@ public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, in public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tableName, int maxParts) throws TException { List partitionSpecs = - client.get_partitions_pspec(prependCatalogToDbName(catName, dbName, conf), tableName, maxParts); + client.get_partitions_pspec(prependCatalogToDbName(catName, dbName, conf), tableName, maxParts, getValidWriteIdList(TableName.getDbTable(dbName, tableName))); partitionSpecs = FilterUtils.filterPartitionSpecsIfEnabled(isClientFilterEnabled, filterHook, partitionSpecs); return PartitionSpecProxy.Factory.get(partitionSpecs); } @@ -1705,7 +1710,7 @@ public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, Stri List part_vals, int max_parts) throws TException { // TODO should we add capabilities here as well as it returns Partition objects List parts = client.get_partitions_ps(prependCatalogToDbName(catName, db_name, conf), - tbl_name, part_vals, shrinkMaxtoShort(max_parts)); + tbl_name, part_vals, shrinkMaxtoShort(max_parts), getValidWriteIdList(TableName.getDbTable(db_name, tbl_name))); return deepCopyPartitions(FilterUtils.filterPartitionsIfEnabled(isClientFilterEnabled, filterHook, parts)); } @@ -1724,7 +1729,7 @@ public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, Stri List groupNames) throws TException { // TODO should we add capabilities here as well as it returns Partition objects List parts = client.get_partitions_with_auth(prependCatalogToDbName(catName, - dbName, conf), tableName, shrinkMaxtoShort(maxParts), userName, groupNames); + dbName, conf), tableName, shrinkMaxtoShort(maxParts), userName, groupNames, getValidWriteIdList(TableName.getDbTable(dbName, tableName))); return deepCopyPartitions(FilterUtils.filterPartitionsIfEnabled(isClientFilterEnabled, filterHook, parts)); } @@ -1745,7 +1750,7 @@ public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, Stri throws TException { // TODO should we add capabilities here as well as it returns Partition objects List parts = client.get_partitions_ps_with_auth(prependCatalogToDbName(catName, - dbName, conf), tableName, partialPvals, shrinkMaxtoShort(maxParts), userName, groupNames); + dbName, conf), tableName, partialPvals, shrinkMaxtoShort(maxParts), userName, groupNames, getValidWriteIdList(TableName.getDbTable(dbName, tableName))); return deepCopyPartitions(FilterUtils.filterPartitionsIfEnabled(isClientFilterEnabled, filterHook, parts)); } @@ -1760,7 +1765,7 @@ public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, Stri String filter, int max_parts) throws TException { // TODO should we add capabilities here as well as it returns Partition objects List parts =client.get_partitions_by_filter(prependCatalogToDbName( - catName, db_name, conf), tbl_name, filter, shrinkMaxtoShort(max_parts)); + catName, db_name, conf), tbl_name, filter, shrinkMaxtoShort(max_parts), getValidWriteIdList(TableName.getDbTable(db_name, tbl_name))); return deepCopyPartitions(FilterUtils.filterPartitionsIfEnabled(isClientFilterEnabled, filterHook, parts)); } @@ -1777,7 +1782,7 @@ public PartitionSpecProxy listPartitionSpecsByFilter(String catName, String db_n int max_parts) throws TException { List partitionSpecs = client.get_part_specs_by_filter(prependCatalogToDbName(catName, db_name, conf), tbl_name, filter, - max_parts); + max_parts, getValidWriteIdList(TableName.getDbTable(db_name, tbl_name))); return PartitionSpecProxy.Factory.get( FilterUtils.filterPartitionSpecsIfEnabled(isClientFilterEnabled, filterHook, partitionSpecs)); } @@ -1842,7 +1847,7 @@ public Partition getPartition(String db_name, String tbl_name, List part @Override public Partition getPartition(String catName, String dbName, String tblName, List partVals) throws TException { - Partition p = client.get_partition(prependCatalogToDbName(catName, dbName, conf), tblName, partVals); + Partition p = client.get_partition(prependCatalogToDbName(catName, dbName, conf), tblName, partVals, getValidWriteIdList(TableName.getDbTable(dbName, tblName))); return deepCopy(FilterUtils.filterPartitionIfEnabled(isClientFilterEnabled, filterHook, p)); } @@ -1911,7 +1916,7 @@ public Partition getPartitionWithAuthInfo(String catName, String dbName, String List pvals, String userName, List groupNames) throws TException { Partition p = client.get_partition_with_auth(prependCatalogToDbName(catName, dbName, conf), tableName, - pvals, userName, groupNames); + pvals, userName, groupNames, getValidWriteIdList(TableName.getDbTable(dbName, tableName))); return deepCopy(FilterUtils.filterPartitionIfEnabled(isClientFilterEnabled, filterHook, p)); } @@ -1931,17 +1936,8 @@ public Table getTable(String catName, String dbName, String tableName) throws TE } public Table getTable(String catName, String dbName, String tableName, boolean getColumnStats) throws TException { - GetTableRequest req = new GetTableRequest(dbName, tableName); - req.setCatName(catName); - req.setCapabilities(version); - req.setGetColumnStats(getColumnStats); - if (processorCapabilities != null) - req.setProcessorCapabilities(Arrays.asList(processorCapabilities)); - if (processorIdentifier != null) - req.setProcessorIdentifier(processorIdentifier); - - Table t = client.get_table_req(req).getTable(); - return deepCopy(FilterUtils.filterTableIfEnabled(isClientFilterEnabled, filterHook, t)); + String validWriteIdList = getValidWriteIdList(TableName.getDbTable(dbName, tableName)); + return getTable(catName, dbName, tableName, validWriteIdList, getColumnStats); } @Override @@ -1956,7 +1952,9 @@ public Table getTable(String catName, String dbName, String tableName, String va GetTableRequest req = new GetTableRequest(dbName, tableName); req.setCatName(catName); req.setCapabilities(version); - req.setValidWriteIdList(validWriteIdList); + if (validWriteIdList != null) { + req.setValidWriteIdList(validWriteIdList); + } req.setGetColumnStats(getColumnStats); if (processorCapabilities != null) req.setProcessorCapabilities(Arrays.asList(processorCapabilities)); @@ -2179,7 +2177,7 @@ public boolean tableExists(String catName, String dbName, String tableName) thro int maxParts) throws TException { List partNames = client.get_partition_names( - prependCatalogToDbName(catName, dbName, conf), tableName, shrinkMaxtoShort(maxParts)); + prependCatalogToDbName(catName, dbName, conf), tableName, shrinkMaxtoShort(maxParts), getValidWriteIdList(TableName.getDbTable(dbName, tableName))); return FilterUtils.filterPartitionNamesIfEnabled( isClientFilterEnabled, filterHook, catName, dbName, tableName, partNames); } @@ -2194,7 +2192,7 @@ public boolean tableExists(String catName, String dbName, String tableName) thro public List listPartitionNames(String catName, String db_name, String tbl_name, List part_vals, int max_parts) throws TException { List partNames = client.get_partition_names_ps(prependCatalogToDbName(catName, db_name, conf), tbl_name, - part_vals, shrinkMaxtoShort(max_parts)); + part_vals, shrinkMaxtoShort(max_parts), getValidWriteIdList(TableName.getDbTable(db_name, tbl_name))); return FilterUtils.filterPartitionNamesIfEnabled( isClientFilterEnabled, filterHook, catName, db_name, tbl_name, partNames); } @@ -2209,7 +2207,7 @@ public int getNumPartitionsByFilter(String db_name, String tbl_name, public int getNumPartitionsByFilter(String catName, String dbName, String tableName, String filter) throws TException { return client.get_num_partitions_by_filter(prependCatalogToDbName(catName, dbName, conf), tableName, - filter); + filter, getValidWriteIdList(TableName.getDbTable(dbName, tableName))); } @Override @@ -2305,7 +2303,7 @@ public void alterDatabase(String catName, String dbName, Database newDb) throws @Override public List getFields(String catName, String db, String tableName) throws TException { - List fields = client.get_fields(prependCatalogToDbName(catName, db, conf), tableName); + List fields = client.get_fields(prependCatalogToDbName(catName, db, conf), tableName, getValidWriteIdList(TableName.getDbTable(db, tableName))); return deepCopyFieldSchemas(fields); } @@ -2503,7 +2501,7 @@ public boolean deleteTableColumnStatistics(String catName, String dbName, String } List fields = client.get_schema_with_environment_context(prependCatalogToDbName( - catName, db, conf), tableName, envCxt); + catName, db, conf), tableName, envCxt, getValidWriteIdList(TableName.getDbTable(db, tableName))); return deepCopyFieldSchemas(fields); } @@ -2522,7 +2520,7 @@ public Partition getPartition(String db, String tableName, String partName) thro public Partition getPartition(String catName, String dbName, String tblName, String name) throws TException { Partition p = client.get_partition_by_name(prependCatalogToDbName(catName, dbName, conf), tblName, - name); + name, getValidWriteIdList(TableName.getDbTable(dbName, tblName))); return deepCopy(FilterUtils.filterPartitionIfEnabled(isClientFilterEnabled, filterHook, p)); } @@ -3841,6 +3839,12 @@ public SerDeInfo getSerDe(String serDeName) throws TException { return client.get_serde(new GetSerdeRequest(serDeName)); } + private String getValidWriteIdList(String fullTableName) { + ValidTxnWriteIdList validTxnWriteIdList = new ValidTxnWriteIdList(conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY)); + ValidWriteIdList writeIdList = validTxnWriteIdList.getTableValidWriteIdList(fullTableName); + return writeIdList!=null?writeIdList.toString():null; + } + private short shrinkMaxtoShort(int max) { if (max < 0) { return -1; @@ -3924,6 +3928,11 @@ public String getServerVersion() throws TException { return client.getVersion(); } + @Override + public void setValidWriteIdList(String txnWriteIdList) { + conf.set(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY, txnWriteIdList); + } + /** * Builder for requiredFields bitmask to be sent via GetTablesExtRequest */ diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index aa7e8dfcbd..29c1ad9947 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.common.classification.RetrySemantics; import org.apache.hadoop.hive.metastore.annotation.NoReconnect; @@ -3982,4 +3983,6 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @return String representation of the version number of Metastore server (eg: 3.1.0-SNAPSHOT) */ String getServerVersion() throws TException; + + void setValidWriteIdList(String txnWriteIdList); } diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index 927324e29e..0926dafe3a 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -349,9 +349,6 @@ public static ConfVars getMetaConf(String name) { CATALOG_DEFAULT("metastore.catalog.default", "metastore.catalog.default", "hive", "The default catalog to use when a catalog is not specified. Default is 'hive' (the " + "default catalog)."), - CATALOGS_TO_CACHE("metastore.cached.rawstore.catalogs", "metastore.cached.rawstore.catalogs", - "hive", "Comma separated list of catalogs to cache in the CachedStore. Default is 'hive' " + - "(the default catalog). Empty string means all catalogs will be cached."), CLIENT_CONNECT_RETRY_DELAY("metastore.client.connect.retry.delay", "hive.metastore.client.connect.retry.delay", 1, TimeUnit.SECONDS, "Number of seconds for the client to wait between consecutive connection attempts"), @@ -990,8 +987,6 @@ public static ConfVars getMetaConf(String name) { "Time interval describing how often the reaper runs"), TOKEN_SIGNATURE("metastore.token.signature", "hive.metastore.token.signature", "", "The delegation token service name to match when selecting a token from the current user's tokens."), - METASTORE_CACHE_CAN_USE_EVENT("metastore.cache.can.use.event", "hive.metastore.cache.can.use.event", false, - "Can notification events from notification log table be used for updating the metastore cache."), TRANSACTIONAL_EVENT_LISTENERS("metastore.transactional.event.listeners", "hive.metastore.transactional.event.listeners", "", "A comma separated list of Java classes that implement the org.apache.riven.MetaStoreEventListener" + diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index ca46a8bb3b..b32cd3b9dd 100644 --- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -745,8 +745,9 @@ struct PartitionsByExprRequest { 2: required string tblName, 3: required binary expr, 4: optional string defaultPartitionName, - 5: optional i16 maxParts=-1 - 6: optional string catName + 5: optional i16 maxParts=-1, + 6: optional string catName, + 7: optional string validWriteIdList } struct TableStatsResult { @@ -831,7 +832,8 @@ struct PartitionValuesRequest { 6: optional list partitionOrder; 7: optional bool ascending = true; 8: optional i64 maxParts = -1; - 9: optional string catName + 9: optional string catName, + 10: optional string validWriteIdList } struct PartitionValuesRow { @@ -848,7 +850,8 @@ struct GetPartitionsByNamesRequest { 3: optional list names, 4: optional bool get_col_stats, 5: optional list processorCapabilities, - 6: optional string processorIdentifier + 6: optional string processorIdentifier, + 7: optional string validWriteIdList } struct GetPartitionsByNamesResult { @@ -1007,6 +1010,16 @@ struct TableValidWriteIds { 5: required binary abortedBits, // Bit array to identify the aborted write ids in invalidWriteIds list } +struct TableWriteId { + 1: required string fullTableName, // Full table name of format . + 2: required i64 writeId, // current write id of the table +} + +// Current Write ID for changed tables of the txn +struct GetTxnTableWriteIdsResponse { + 1: required list tableWriteIds, +} + // Valid Write ID list for all the input tables wrt to current txn struct GetValidWriteIdsResponse { 1: required list tblValidWriteIds, @@ -1824,7 +1837,8 @@ struct GetPartitionsRequest { 7: GetPartitionsProjectionSpec projectionSpec 8: GetPartitionsFilterSpec filterSpec, // TODO not yet implemented. Must be present but ignored 9: optional list processorCapabilities, - 10: optional string processorIdentifier + 10: optional string processorIdentifier, + 11: optional string validWriteIdList } // Exceptions. @@ -1919,12 +1933,12 @@ service ThriftHiveMetastore extends fb303.FacebookService throws(1:MetaException o2) // Gets a list of FieldSchemas describing the columns of a particular table - list get_fields(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3), - list get_fields_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) + list get_fields(1: string db_name, 2: string table_name, 3: string validWriteIdList) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3), + list get_fields_with_environment_context(1: string db_name, 2: string table_name, 3: EnvironmentContext environment_context, 4: string validWriteIdList) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) // Gets a list of FieldSchemas describing both the columns and the partition keys of a particular table - list get_schema(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) - list get_schema_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) + list get_schema(1: string db_name, 2: string table_name, 3: string validWriteIdList) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) + list get_schema_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context, 4:string validWriteIdList) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) // create a Hive table. Following fields must be set // tableName @@ -1980,7 +1994,7 @@ service ThriftHiveMetastore extends fb303.FacebookService throws (1: MetaException o1) list get_all_tables(1: string db_name) throws (1: MetaException o1) - Table get_table(1:string dbname, 2:string tbl_name) + Table get_table(1:string dbname, 2:string tbl_name, 3: string validWriteIdList) throws (1:MetaException o1, 2:NoSuchObjectException o2) list
get_table_objects_by_name(1:string dbname, 2:list tbl_names) list get_tables_ext(1: GetTablesExtRequest req) throws (1: MetaException o1) @@ -2081,7 +2095,7 @@ service ThriftHiveMetastore extends fb303.FacebookService DropPartitionsResult drop_partitions_req(1: DropPartitionsRequest req) throws(1:NoSuchObjectException o1, 2:MetaException o2) - Partition get_partition(1:string db_name, 2:string tbl_name, 3:list part_vals) + Partition get_partition(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:string validTxnList) throws(1:MetaException o1, 2:NoSuchObjectException o2) Partition exchange_partition(1:map partitionSpecs, 2:string source_db, 3:string source_table_name, 4:string dest_db, 5:string dest_table_name) @@ -2094,22 +2108,22 @@ service ThriftHiveMetastore extends fb303.FacebookService 4:InvalidInputException o4) Partition get_partition_with_auth(1:string db_name, 2:string tbl_name, 3:list part_vals, - 4: string user_name, 5: list group_names) throws(1:MetaException o1, 2:NoSuchObjectException o2) + 4: string user_name, 5: list group_names, 6: string validTxnList) throws(1:MetaException o1, 2:NoSuchObjectException o2) - Partition get_partition_by_name(1:string db_name 2:string tbl_name, 3:string part_name) + Partition get_partition_by_name(1:string db_name 2:string tbl_name, 3:string part_name, 4:string validTxnList) throws(1:MetaException o1, 2:NoSuchObjectException o2) // returns all the partitions for this table in reverse chronological order. // If max parts is given then it will return only that many. - list get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) + list get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1, 4:string validTxnList) throws(1:NoSuchObjectException o1, 2:MetaException o2) list get_partitions_with_auth(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1, - 4: string user_name, 5: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2) + 4: string user_name, 5: list group_names, 6: string validTxnList) throws(1:NoSuchObjectException o1, 2:MetaException o2) - list get_partitions_pspec(1:string db_name, 2:string tbl_name, 3:i32 max_parts=-1) + list get_partitions_pspec(1:string db_name, 2:string tbl_name, 3:i32 max_parts=-1, 4:string validTxnList) throws(1:NoSuchObjectException o1, 2:MetaException o2) - list get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) + list get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1, 4:string validTxnList) throws(1:NoSuchObjectException o1, 2:MetaException o2) PartitionValuesResponse get_partition_values(1:PartitionValuesRequest request) @@ -2122,23 +2136,23 @@ service ThriftHiveMetastore extends fb303.FacebookService // number of partition columns - the unspecified values are considered the same // as "". list get_partitions_ps(1:string db_name 2:string tbl_name - 3:list part_vals, 4:i16 max_parts=-1) + 3:list part_vals, 4:i16 max_parts=-1, 5:string validTxnList) throws(1:MetaException o1, 2:NoSuchObjectException o2) list get_partitions_ps_with_auth(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1, - 5: string user_name, 6: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2) + 5: string user_name, 6: list group_names, 7: string validTxnList) throws(1:NoSuchObjectException o1, 2:MetaException o2) list get_partition_names_ps(1:string db_name, - 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1) + 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1, 5:string validTxnList) throws(1:MetaException o1, 2:NoSuchObjectException o2) // get the partitions matching the given partition filter list get_partitions_by_filter(1:string db_name 2:string tbl_name - 3:string filter, 4:i16 max_parts=-1) + 3:string filter, 4:i16 max_parts=-1, 5:string validTxnList) throws(1:MetaException o1, 2:NoSuchObjectException o2) // List partitions as PartitionSpec instances. list get_part_specs_by_filter(1:string db_name 2:string tbl_name - 3:string filter, 4:i32 max_parts=-1) + 3:string filter, 4:i32 max_parts=-1, 5:string validTxnList) throws(1:MetaException o1, 2:NoSuchObjectException o2) // get the partitions matching the given partition filter @@ -2148,11 +2162,11 @@ service ThriftHiveMetastore extends fb303.FacebookService throws(1:MetaException o1, 2:NoSuchObjectException o2) // get the partitions matching the given partition filter - i32 get_num_partitions_by_filter(1:string db_name 2:string tbl_name 3:string filter) + i32 get_num_partitions_by_filter(1:string db_name 2:string tbl_name 3:string filter 4:string validTxnList) throws(1:MetaException o1, 2:NoSuchObjectException o2) // get partitions give a list of partition names - list get_partitions_by_names(1:string db_name 2:string tbl_name 3:list names) + list get_partitions_by_names(1:string db_name 2:string tbl_name 3:list names 4:string validTxnList) throws(1:MetaException o1, 2:NoSuchObjectException o2) GetPartitionsByNamesResult get_partitions_by_names_req(1:GetPartitionsByNamesRequest req) throws(1:MetaException o1, 2:NoSuchObjectException o2) @@ -2252,10 +2266,10 @@ service ThriftHiveMetastore extends fb303.FacebookService // such statistics exists. If the required statistics doesn't exist, get APIs throw NoSuchObjectException // For instance, if get_table_column_statistics is called on a partitioned table for which only // partition level column stats exist, get_table_column_statistics will throw NoSuchObjectException - ColumnStatistics get_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name) throws + ColumnStatistics get_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name, 4:string validWriteIdList) throws (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidInputException o3, 4:InvalidObjectException o4) ColumnStatistics get_partition_column_statistics(1:string db_name, 2:string tbl_name, 3:string part_name, - 4:string col_name) throws (1:NoSuchObjectException o1, 2:MetaException o2, + 4:string col_name, 5:string validWriteIdList) throws (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidInputException o3, 4:InvalidObjectException o4) TableStatsResult get_table_statistics_req(1:TableStatsRequest request) throws (1:NoSuchObjectException o1, 2:MetaException o2) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 97564255d2..6f5b30d2e0 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -288,7 +288,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam String newTblLocPath = dataWasMoved ? destPath.toUri().getPath() : null; // also the location field in partition - parts = msdb.getPartitions(catName, dbname, name, -1); + parts = msdb.getPartitions(catName, dbname, name, -1, null); Map columnStatsNeedUpdated = new HashMap<>(); for (Partition part : parts) { String oldPartLoc = part.getSd().getLocation(); @@ -353,7 +353,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam if (isPartitionedTable) { //Currently only column related changes can be cascaded in alter table if(!MetaStoreServerUtils.areSameColumns(oldt.getSd().getCols(), newt.getSd().getCols())) { - parts = msdb.getPartitions(catName, dbname, name, -1); + parts = msdb.getPartitions(catName, dbname, name, -1, null); for (Partition part : parts) { Partition oldPart = new Partition(part); List oldCols = part.getSd().getCols(); @@ -506,6 +506,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str //alter partition if (part_vals == null || part_vals.size() == 0) { try { + msdb.openTransaction(); Table tbl = msdb.getTable(catName, dbname, name, null); @@ -513,7 +514,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str throw new InvalidObjectException( "Unable to alter partition because table or database does not exist."); } - oldPart = msdb.getPartition(catName, dbname, name, new_part.getValues()); + oldPart = msdb.getPartition(catName, dbname, name, new_part.getValues(), null); if (MetaStoreServerUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) { // if stats are same, no need to update if (MetaStoreServerUtils.isFastStatsSame(oldPart, new_part)) { @@ -565,6 +566,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str boolean dataWasMoved = false; Database db; try { + msdb.openTransaction(); Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name, null); if (tbl == null) { @@ -572,7 +574,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str "Unable to alter partition because table or database does not exist."); } try { - oldPart = msdb.getPartition(catName, dbname, name, part_vals); + oldPart = msdb.getPartition(catName, dbname, name, part_vals, null); } catch (NoSuchObjectException e) { // this means there is no existing partition throw new InvalidObjectException( @@ -581,7 +583,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str Partition check_part; try { - check_part = msdb.getPartition(catName, dbname, name, new_part.getValues()); + check_part = msdb.getPartition(catName, dbname, name, new_part.getValues(), null); } catch(NoSuchObjectException e) { // this means there is no existing partition check_part = null; @@ -739,6 +741,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str boolean success = false; try { + msdb.openTransaction(); // Note: should we pass in write ID here? We only update stats on parts so probably not. @@ -760,7 +763,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str .currentTimeMillis() / 1000)); } - Partition oldTmpPart = msdb.getPartition(catName, dbname, name, tmpPart.getValues()); + Partition oldTmpPart = msdb.getPartition(catName, dbname, name, tmpPart.getValues(), null); oldParts.add(oldTmpPart); partValsList.add(tmpPart.getValues()); @@ -955,7 +958,7 @@ private Path constructRenamedPath(Path defaultNewPath, Path currentPath) { // NOTE: this doesn't check stats being compliant, but the alterTable call below does. // The worst we can do is delete the stats. // Collect column stats which need to be rewritten and remove old stats. - colStats = msdb.getTableColumnStatistics(catName, dbName, tableName, oldColNames); + colStats = msdb.getTableColumnStatistics(catName, dbName, tableName, oldColNames, null); if (colStats == null) { updateColumnStats = false; } else { @@ -1044,7 +1047,7 @@ public static ColumnStatistics updateOrGetPartitionColumnStats( List oldPartNames = Lists.newArrayList(oldPartName); // TODO: doesn't take txn stats into account. This method can only remove stats. List partsColStats = msdb.getPartitionColumnStatistics(catName, dbname, tblname, - oldPartNames, oldColNames); + oldPartNames, oldColNames, null); assert (partsColStats.size() <= 1); // for out para, this value is initialized by caller. diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 7e97f8d9dd..7a857cd03e 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -585,19 +585,6 @@ public void init() throws MetaException { listeners.add(new HMSMetricsListener(conf)); } - boolean canCachedStoreCanUseEvent = false; - for (MetaStoreEventListener listener : transactionalListeners) { - if (listener.doesAddEventsToNotificationLogTable()) { - canCachedStoreCanUseEvent = true; - break; - } - } - if (conf.getBoolean(ConfVars.METASTORE_CACHE_CAN_USE_EVENT.getVarname(), false) && - !canCachedStoreCanUseEvent) { - throw new MetaException("CahcedStore can not use events for invalidation as there is no " + - " TransactionalMetaStoreEventListener to add events to notification table"); - } - endFunctionListeners = MetaStoreServerUtils.getMetaStoreListeners( MetaStoreEndFunctionListener.class, conf, MetastoreConf.getVar(conf, ConfVars.END_FUNCTION_LISTENERS)); @@ -2648,7 +2635,7 @@ private boolean drop_table_core(final RawStore ms, final String catName, final S db = ms.getDatabase(catName, dbname); // drop any partitions - tbl = get_table_core(catName, dbname, name); + tbl = get_table_core(catName, dbname, name, null); if (tbl == null) { throw new NoSuchObjectException(name + " doesn't exist"); } @@ -2796,7 +2783,7 @@ private void deletePartitionData(List partPaths, boolean ifPurge, Database List partPaths = new ArrayList<>(); while (true) { Map partitionLocations = ms.getPartitionLocations(catName, dbName, tableName, - tableDnsPath, batchSize); + tableDnsPath, batchSize, null); if (partitionLocations == null || partitionLocations.isEmpty()) { // No more partitions left to drop. Return with the collected path list to delete. return partPaths; @@ -2916,7 +2903,7 @@ private void alterTableStatsForTruncate(RawStore ms, String catName, String dbNa String validWriteIds, long writeId) throws Exception { if (partNames == null) { if (0 != table.getPartitionKeysSize()) { - for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) { + for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE, null)) { alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition, validWriteIds, writeId); } @@ -2946,7 +2933,7 @@ private void alterTableStatsForTruncate(RawStore ms, String catName, String dbNa environmentContext, this, validWriteIds); } } else { - for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames)) { + for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames, null)) { alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition, validWriteIds, writeId); } @@ -2963,14 +2950,14 @@ private void alterTableStatsForTruncate(RawStore ms, String catName, String dbNa List locations = new ArrayList<>(); if (partNames == null) { if (0 != table.getPartitionKeysSize()) { - for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) { + for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE, null)) { locations.add(new Path(partition.getSd().getLocation())); } } else { locations.add(new Path(table.getSd().getLocation())); } } else { - for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames)) { + for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames, null)) { locations.add(new Path(partition.getSd().getLocation())); } } @@ -3002,7 +2989,7 @@ private void truncateTableInternal(String dbName, String tableName, List String validWriteIds, long writeId) throws MetaException, NoSuchObjectException { try { String[] parsedDbName = parseDbName(dbName, conf); - Table tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName); + Table tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, null); boolean isAutopurge = (tbl.isSetParameters() && "true".equalsIgnoreCase(tbl.getParameters().get("auto.purge"))); Database db = get_database_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]); @@ -3058,7 +3045,7 @@ private boolean isExternalTablePurge(Table table) { @Override @Deprecated - public Table get_table(final String dbname, final String name) throws MetaException, + public Table get_table(final String dbname, final String name, String validWriteIdList) throws MetaException, NoSuchObjectException { String[] parsedDbName = parseDbName(dbname, conf); return getTableInternal( @@ -3212,15 +3199,6 @@ private Table getTableInternal(String catName, String dbname, String name, return t; } - @Override - public Table get_table_core( - final String catName, - final String dbname, - final String name) - throws MetaException, NoSuchObjectException { - return get_table_core(catName, dbname, name, null); - } - @Override public Table get_table_core( final String catName, @@ -3457,7 +3435,7 @@ private Partition append_partition_common(RawStore ms, String catName, String db Partition old_part; try { old_part = ms.getPartition(part.getCatName(), part.getDbName(), part - .getTableName(), part.getValues()); + .getTableName(), part.getValues(), null); } catch (NoSuchObjectException e) { // this means there is no existing partition old_part = null; @@ -4076,7 +4054,7 @@ private boolean startAddPartition( MetaStoreServerUtils.validatePartitionNameCharacters(part.getValues(), partitionValidationPattern); boolean doesExist = ms.doesPartitionExist(part.getCatName(), - part.getDbName(), part.getTableName(), partitionKeys, part.getValues()); + part.getDbName(), part.getTableName(), partitionKeys, part.getValues(), null); if (doesExist && !ifNotExists) { throw new AlreadyExistsException("Partition already exists: " + part); } @@ -4329,6 +4307,7 @@ public Partition exchange_partition(Map partitionSpecs, boolean success = false; boolean pathCreated = false; RawStore ms = getMS(); + ms.openTransaction(); Table destinationTable = @@ -4363,7 +4342,7 @@ public Partition exchange_partition(Map partitionSpecs, } // Passed the unparsed DB name here, as get_partitions_ps expects to parse it List partitionsToExchange = get_partitions_ps(sourceDbName, sourceTableName, - partVals, (short)-1); + partVals, (short)-1, null); if (partitionsToExchange == null || partitionsToExchange.isEmpty()) { throw new MetaException("No partition is found with the values " + partitionSpecs + " for the table " + sourceTableName); @@ -4388,7 +4367,7 @@ public Partition exchange_partition(Map partitionSpecs, // Check if any of the partitions already exists in destTable. List destPartitionNames = ms.listPartitionNames(parsedDestDbName[CAT_NAME], - parsedDestDbName[DB_NAME], destTableName, (short) -1); + parsedDestDbName[DB_NAME], destTableName, (short) -1, null); if (destPartitionNames != null && !destPartitionNames.isEmpty()) { for (Partition partition : partitionsToExchange) { String partToExchangeName = @@ -4517,7 +4496,7 @@ private boolean drop_partition_common(RawStore ms, String catName, String db_nam try { ms.openTransaction(); - part = ms.getPartition(catName, db_name, tbl_name, part_vals); + part = ms.getPartition(catName, db_name, tbl_name, part_vals, null); tbl = get_table_core(catName, db_name, tbl_name, null); tableDataShouldBeDeleted = checkTableDataShouldBeDeleted(tbl, deleteData); firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this)); @@ -4644,6 +4623,7 @@ public DropPartitionsResult drop_partitions_req( ? request.getEnvironmentContext() : null; boolean success = false; + ms.openTransaction(); Table tbl = null; List parts = null; @@ -4654,7 +4634,7 @@ public DropPartitionsResult drop_partitions_req( try { // We need Partition-s for firing events and for result; DN needs MPartition-s to drop. // Great... Maybe we could bypass fetching MPartitions by issuing direct SQL deletes. - tbl = get_table_core(catName, dbName, tblName); + tbl = get_table_core(catName, dbName, tblName, null); isExternal(tbl); mustPurge = isMustPurge(envContext, tbl); int minCount = 0; @@ -4667,7 +4647,7 @@ public DropPartitionsResult drop_partitions_req( ++minCount; // At least one partition per expression, if not ifExists List result = new ArrayList<>(); boolean hasUnknown = ms.getPartitionsByExpr( - catName, dbName, tblName, expr.getExpr(), null, (short)-1, result); + catName, dbName, tblName, expr.getExpr(), null, (short)-1, result, null); if (hasUnknown) { // Expr is built by DDLSA, it should only contain part cols and simple ops throw new MetaException("Unexpected unknown partitions to drop"); @@ -4688,7 +4668,7 @@ public DropPartitionsResult drop_partitions_req( } else if (spec.isSetNames()) { partNames = spec.getNames(); minCount = partNames.size(); - parts = ms.getPartitionsByNames(catName, dbName, tblName, partNames); + parts = ms.getPartitionsByNames(catName, dbName, tblName, partNames, null); } else { throw new MetaException("Partition spec is not set"); } @@ -4741,6 +4721,7 @@ public DropPartitionsResult drop_partitions_req( } success = ms.commitTransaction(); + DropPartitionsResult result = new DropPartitionsResult(); if (needResult) { result.setPartitions(parts); @@ -4832,7 +4813,7 @@ public boolean drop_partition_with_environment_context(final String db_name, @Override public Partition get_partition(final String db_name, final String tbl_name, - final List part_vals) throws MetaException, NoSuchObjectException { + final List part_vals, String validWriteIdList) throws MetaException, NoSuchObjectException { String[] parsedDbName = parseDbName(db_name, conf); startPartitionFunction("get_partition", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals); @@ -4841,8 +4822,8 @@ public Partition get_partition(final String db_name, final String tbl_name, Exception ex = null; try { authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - ret = getMS().getPartition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, validWriteIdList); + ret = getMS().getPartition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals, validWriteIdList); ret = FilterUtils.filterPartitionIfEnabled(isServerFilterEnabled, filterHook, ret); } catch (Exception e) { ex = e; @@ -4857,12 +4838,12 @@ public Partition get_partition(final String db_name, final String tbl_name, * Fire a pre-event for read table operation, if there are any * pre-event listeners registered */ - private void fireReadTablePreEvent(String catName, String dbName, String tblName) + private void fireReadTablePreEvent(String catName, String dbName, String tblName, String validWriteIdList) throws MetaException, NoSuchObjectException { if(preListeners.size() > 0) { // do this only if there is a pre event listener registered (avoid unnecessary // metastore api call) - Table t = getMS().getTable(catName, dbName, tblName); + Table t = getMS().getTable(catName, dbName, tblName, validWriteIdList); if (t == null) { throw new NoSuchObjectException(TableName.getQualified(catName, dbName, tblName) + " table not found"); @@ -4874,19 +4855,19 @@ private void fireReadTablePreEvent(String catName, String dbName, String tblName @Override public Partition get_partition_with_auth(final String db_name, final String tbl_name, final List part_vals, - final String user_name, final List group_names) + final String user_name, final List group_names, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(db_name, conf); startPartitionFunction("get_partition_with_auth", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, validWriteIdList); Partition ret = null; Exception ex = null; try { authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); ret = getMS().getPartitionWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tbl_name, part_vals, user_name, group_names); + tbl_name, part_vals, user_name, group_names, validWriteIdList); ret = FilterUtils.filterPartitionIfEnabled(isServerFilterEnabled, filterHook, ret); } catch (InvalidObjectException e) { ex = e; @@ -4902,20 +4883,20 @@ public Partition get_partition_with_auth(final String db_name, @Override public List get_partitions(final String db_name, final String tbl_name, - final short max_parts) throws NoSuchObjectException, MetaException { + final short max_parts, String validWriteIdList) throws NoSuchObjectException, MetaException { String[] parsedDbName = parseDbName(db_name, conf); startTableFunction("get_partitions", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, validWriteIdList); List ret = null; Exception ex = null; try { checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tbl_name, NO_FILTER_STRING, max_parts); + tbl_name, NO_FILTER_STRING, max_parts, validWriteIdList); authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); ret = getMS().getPartitions(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, - max_parts); + max_parts, validWriteIdList); ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); } catch (Exception e) { ex = e; @@ -4930,7 +4911,7 @@ public Partition get_partition_with_auth(final String db_name, @Override public List get_partitions_with_auth(final String dbName, final String tblName, final short maxParts, final String userName, - final List groupNames) throws TException { + final List groupNames, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(dbName, conf); startTableFunction("get_partitions_with_auth", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); @@ -4938,12 +4919,12 @@ public Partition get_partition_with_auth(final String db_name, Exception ex = null; try { checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tblName, NO_FILTER_STRING, maxParts); + tblName, NO_FILTER_STRING, maxParts, validWriteIdList); authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); ret = getMS().getPartitionsWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, - maxParts, userName, groupNames); + maxParts, userName, groupNames, validWriteIdList); ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); } catch (InvalidObjectException e) { ex = e; @@ -4960,19 +4941,19 @@ public Partition get_partition_with_auth(final String db_name, private void checkLimitNumberOfPartitionsByFilter(String catName, String dbName, String tblName, String filterString, - int maxParts) throws TException { + int maxParts, String validWriteIdList) throws TException { if (isPartitionLimitEnabled()) { checkLimitNumberOfPartitions(tblName, get_num_partitions_by_filter(prependCatalogToDbName( - catName, dbName, conf), tblName, filterString), maxParts); + catName, dbName, conf), tblName, filterString, validWriteIdList), maxParts); } } private void checkLimitNumberOfPartitionsByExpr(String catName, String dbName, String tblName, - byte[] filterExpr, int maxParts) + byte[] filterExpr, int maxParts, String validWriteIdList) throws TException { if (isPartitionLimitEnabled()) { checkLimitNumberOfPartitions(tblName, get_num_partitions_by_expr(catName, dbName, tblName, - filterExpr), maxParts); + filterExpr, validWriteIdList), maxParts); } } @@ -4994,7 +4975,7 @@ private void checkLimitNumberOfPartitions(String tblName, int numPartitions, int } @Override - public List get_partitions_pspec(final String db_name, final String tbl_name, final int max_parts) + public List get_partitions_pspec(final String db_name, final String tbl_name, final int max_parts, String validWriteIdList) throws NoSuchObjectException, MetaException { String[] parsedDbName = parseDbName(db_name, conf); @@ -5004,9 +4985,9 @@ private void checkLimitNumberOfPartitions(String tblName, int numPartitions, int List partitionSpecs = null; try { - Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName); + Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, validWriteIdList); // get_partitions will parse out the catalog and db names itself - List partitions = get_partitions(db_name, tableName, (short) max_parts); + List partitions = get_partitions(db_name, tableName, (short) max_parts, validWriteIdList); if (is_partition_spec_grouping_enabled(table)) { partitionSpecs = MetaStoreServerUtils @@ -5047,10 +5028,10 @@ public GetPartitionsResponse get_partitions_with_specs(GetPartitionsRequest requ GetPartitionsResponse response = null; Exception ex = null; try { - Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName); + Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, request.getValidWriteIdList()); List partitions = getMS() .getPartitionSpecsByFilterAndProjection(table, request.getProjectionSpec(), - request.getFilterSpec()); + request.getFilterSpec(), request.getValidWriteIdList()); List processorCapabilities = request.getProcessorCapabilities(); String processorId = request.getProcessorIdentifier(); if (processorCapabilities == null || processorCapabilities.size() == 0 || @@ -5083,16 +5064,16 @@ private static boolean is_partition_spec_grouping_enabled(Table table) { @Override public List get_partition_names(final String db_name, final String tbl_name, - final short max_parts) throws NoSuchObjectException, MetaException { + final short max_parts, String validWriteIdList) throws NoSuchObjectException, MetaException { String[] parsedDbName = parseDbName(db_name, conf); startTableFunction("get_partition_names", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, validWriteIdList); List ret = null; Exception ex = null; try { authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); ret = getMS().listPartitionNames(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, - max_parts); + max_parts, validWriteIdList); ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled, filterHook, parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, ret); } catch (MetaException e) { @@ -5123,7 +5104,7 @@ public PartitionValuesResponse get_partition_values(PartitionValuesRequest reque partCols.add(request.getPartitionKeys().get(0)); return getMS().listPartitionValues(catName, dbName, tblName, request.getPartitionKeys(), request.isApplyDistinct(), request.getFilter(), request.isAscending(), - request.getPartitionOrder(), request.getMaxParts()); + request.getPartitionOrder(), request.getMaxParts(), request.getValidWriteIdList()); } catch (NoSuchObjectException e) { LOG.error(String.format("Unable to get partition for %s.%s.%s", catName, dbName, tblName), e); throw new MetaException(e.getMessage()); @@ -5422,7 +5403,7 @@ private void alter_table_core(String catName, String dbname, String name, Table boolean success = false; Exception ex = null; try { - Table oldt = get_table_core(catName, dbname, name); + Table oldt = get_table_core(catName, dbname, name, null); firePreEvent(new PreAlterTableEvent(oldt, newTable, this)); alterHandler.alterTable(getMS(), wh, catName, dbname, name, newTable, envContext, this, validWriteIdList); @@ -5578,14 +5559,14 @@ private void alter_table_core(String catName, String dbname, String name, Table } @Override - public List get_fields(String db, String tableName) + public List get_fields(String db, String tableName, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException { - return get_fields_with_environment_context(db, tableName, null); + return get_fields_with_environment_context(db, tableName, null, validWriteIdList); } @Override public List get_fields_with_environment_context(String db, String tableName, - final EnvironmentContext envContext) + final EnvironmentContext envContext, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException { startFunction("get_fields_with_environment_context", ": db=" + db + "tbl=" + tableName); String[] names = tableName.split("\\."); @@ -5598,7 +5579,7 @@ private void alter_table_core(String catName, String dbname, String name, Table ClassLoader orgHiveLoader = null; try { try { - tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], base_table_name); + tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], base_table_name, validWriteIdList); firePreEvent(new PreReadTableEvent(tbl, this)); } catch (NoSuchObjectException e) { throw new UnknownTableException(e.getMessage()); @@ -5658,9 +5639,9 @@ private StorageSchemaReader getStorageSchemaReader() throws MetaException { * @throws UnknownDBException */ @Override - public List get_schema(String db, String tableName) + public List get_schema(String db, String tableName, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException { - return get_schema_with_environment_context(db,tableName, null); + return get_schema_with_environment_context(db,tableName, null, validWriteIdList); } @@ -5681,7 +5662,7 @@ private StorageSchemaReader getStorageSchemaReader() throws MetaException { */ @Override public List get_schema_with_environment_context(String db, String tableName, - final EnvironmentContext envContext) + final EnvironmentContext envContext, String validWriteIdList) throws MetaException, UnknownTableException, UnknownDBException { startFunction("get_schema_with_environment_context", ": db=" + db + "tbl=" + tableName); boolean success = false; @@ -5693,12 +5674,12 @@ private StorageSchemaReader getStorageSchemaReader() throws MetaException { Table tbl; try { - tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], base_table_name); + tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], base_table_name, validWriteIdList); } catch (NoSuchObjectException e) { throw new UnknownTableException(e.getMessage()); } // Pass unparsed db name here - List fieldSchemas = get_fields_with_environment_context(db, base_table_name,envContext); + List fieldSchemas = get_fields_with_environment_context(db, base_table_name,envContext, validWriteIdList); if (tbl == null || fieldSchemas == null) { throw new UnknownTableException(tableName + " doesn't exist"); @@ -5809,15 +5790,15 @@ public String get_config_value(String name, String defaultValue) private Partition get_partition_by_name_core(final RawStore ms, final String catName, final String db_name, final String tbl_name, - final String part_name) throws TException { - fireReadTablePreEvent(catName, db_name, tbl_name); + final String part_name, String validWriteIdList) throws TException { + fireReadTablePreEvent(catName, db_name, tbl_name, validWriteIdList); List partVals; try { partVals = getPartValsFromName(ms, catName, db_name, tbl_name, part_name); } catch (InvalidObjectException e) { throw new NoSuchObjectException(e.getMessage()); } - Partition p = ms.getPartition(catName, db_name, tbl_name, partVals); + Partition p = ms.getPartition(catName, db_name, tbl_name, partVals, validWriteIdList); p = FilterUtils.filterPartitionIfEnabled(isServerFilterEnabled, filterHook, p); if (p == null) { @@ -5829,7 +5810,7 @@ private Partition get_partition_by_name_core(final RawStore ms, final String cat @Override public Partition get_partition_by_name(final String db_name, final String tbl_name, - final String part_name) throws TException { + final String part_name, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(db_name, conf); startFunction("get_partition_by_name", ": tbl=" + @@ -5839,7 +5820,7 @@ public Partition get_partition_by_name(final String db_name, final String tbl_na Exception ex = null; try { ret = get_partition_by_name_core(getMS(), parsedDbName[CAT_NAME], - parsedDbName[DB_NAME], tbl_name, part_name); + parsedDbName[DB_NAME], tbl_name, part_name, validWriteIdList); ret = FilterUtils.filterPartitionIfEnabled(isServerFilterEnabled, filterHook, ret); } catch (Exception e) { ex = e; @@ -5936,7 +5917,7 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n @Override public List get_partitions_ps(final String db_name, final String tbl_name, final List part_vals, - final short max_parts) throws TException { + final short max_parts, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(db_name, conf); startPartitionFunction("get_partitions_ps", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals); @@ -5947,7 +5928,7 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); // Don't send the parsedDbName, as this method will parse itself. ret = get_partitions_ps_with_auth(db_name, tbl_name, part_vals, - max_parts, null, null); + max_parts, null, null, validWriteIdList); ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); } catch (Exception e) { ex = e; @@ -5963,17 +5944,17 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n public List get_partitions_ps_with_auth(final String db_name, final String tbl_name, final List part_vals, final short max_parts, final String userName, - final List groupNames) throws TException { + final List groupNames, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(db_name, conf); startPartitionFunction("get_partitions_ps_with_auth", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, validWriteIdList); List ret = null; Exception ex = null; try { authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); ret = getMS().listPartitionsPsWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tbl_name, part_vals, max_parts, userName, groupNames); + tbl_name, part_vals, max_parts, userName, groupNames, validWriteIdList); ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); } catch (InvalidObjectException e) { ex = e; @@ -5989,18 +5970,18 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n @Override public List get_partition_names_ps(final String db_name, - final String tbl_name, final List part_vals, final short max_parts) + final String tbl_name, final List part_vals, final short max_parts, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(db_name, conf); startPartitionFunction("get_partitions_names_ps", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, validWriteIdList); List ret = null; Exception ex = null; try { authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); ret = getMS().listPartitionNamesPs(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, - part_vals, max_parts); + part_vals, max_parts, validWriteIdList); ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled, filterHook, parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, ret); } catch (Exception e) { @@ -6054,7 +6035,7 @@ private String lowerCaseConvertPartName(String partName) throws MetaException { @Deprecated @Override public ColumnStatistics get_table_column_statistics(String dbName, String tableName, - String colName) throws TException { + String colName, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(dbName, conf); parsedDbName[CAT_NAME] = parsedDbName[CAT_NAME].toLowerCase(); parsedDbName[DB_NAME] = parsedDbName[DB_NAME].toLowerCase(); @@ -6111,7 +6092,7 @@ public TableStatsResult get_table_statistics_req(TableStatsRequest request) thro @Deprecated @Override public ColumnStatistics get_partition_column_statistics(String dbName, String tableName, - String partName, String colName) throws TException { + String partName, String colName, String validWriteIdList) throws TException { // Note: this method appears to be unused within Hive. // It doesn't take txn stats into account. dbName = dbName.toLowerCase(); @@ -6127,7 +6108,7 @@ public ColumnStatistics get_partition_column_statistics(String dbName, String ta try { List list = getMS().getPartitionColumnStatistics( parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, - Lists.newArrayList(convertedPartName), Lists.newArrayList(colName)); + Lists.newArrayList(convertedPartName), Lists.newArrayList(colName), validWriteIdList); if (list.isEmpty()) { return null; } @@ -6215,6 +6196,7 @@ private boolean updateTableColumnStatsInternal(ColumnStatistics colStats, colStats.getStatsDesc().getTableName())); Map parameters = null; + getMS().openTransaction(); boolean committed = false; try { @@ -6227,13 +6209,13 @@ private boolean updateTableColumnStatsInternal(ColumnStatistics colStats, MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.UPDATE_TABLE_COLUMN_STAT, new UpdateTableColumnStatEvent(colStats, tableObj, parameters, - writeId, this)); + writeId, validWriteIds, this)); } if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.UPDATE_TABLE_COLUMN_STAT, new UpdateTableColumnStatEvent(colStats, tableObj, parameters, - writeId,this)); + writeId, validWriteIds, this)); } } committed = getMS().commitTransaction(); @@ -6281,6 +6263,7 @@ private boolean updatePartitonColStatsInternal(Table tbl, ColumnStatistics colSt Map parameters; List partVals; boolean committed = false; + getMS().openTransaction(); try { if (tbl == null) { @@ -6293,13 +6276,13 @@ private boolean updatePartitonColStatsInternal(Table tbl, ColumnStatistics colSt MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.UPDATE_PARTITION_COLUMN_STAT, new UpdatePartitionColumnStatEvent(colStats, partVals, parameters, tbl, - writeId, this)); + writeId, validWriteIds, this)); } if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.UPDATE_PARTITION_COLUMN_STAT, new UpdatePartitionColumnStatEvent(colStats, partVals, parameters, tbl, - writeId, this)); + writeId, validWriteIds, this)); } } committed = getMS().commitTransaction(); @@ -6355,7 +6338,7 @@ public boolean delete_partition_column_statistics(String dbName, String tableNam getMS().openTransaction(); try { List partVals = getPartValsFromName(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, convertedPartName); - Table table = getMS().getTable(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName); + Table table = getMS().getTable(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, null); // This API looks unused; if it were used we'd need to update stats state and write ID. // We cannot just randomly nuke some txn stats. if (TxnUtils.isTransactionalTable(table)) { @@ -6405,9 +6388,10 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S boolean ret = false, committed = false; + getMS().openTransaction(); try { - Table table = getMS().getTable(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName); + Table table = getMS().getTable(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, null); // This API looks unused; if it were used we'd need to update stats state and write ID. // We cannot just randomly nuke some txn stats. if (TxnUtils.isTransactionalTable(table)) { @@ -6441,22 +6425,22 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S @Override public List get_partitions_by_filter(final String dbName, final String tblName, - final String filter, final short maxParts) + final String filter, final short maxParts, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(dbName, conf); startTableFunction("get_partitions_by_filter", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, validWriteIdList); List ret = null; Exception ex = null; try { checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tblName, filter, maxParts); + tblName, filter, maxParts, validWriteIdList); authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); ret = getMS().getPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, - filter, maxParts); + filter, maxParts, validWriteIdList); ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); } catch (Exception e) { ex = e; @@ -6469,7 +6453,7 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S @Override public List get_part_specs_by_filter(final String dbName, final String tblName, - final String filter, final int maxParts) + final String filter, final int maxParts, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(dbName, conf); @@ -6477,9 +6461,9 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S List partitionSpecs = null; try { - Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); + Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, validWriteIdList); // Don't pass the parsed db name, as get_partitions_by_filter will parse it itself - List partitions = get_partitions_by_filter(dbName, tblName, filter, (short) maxParts); + List partitions = get_partitions_by_filter(dbName, tblName, filter, (short) maxParts, validWriteIdList); if (is_partition_spec_grouping_enabled(table)) { partitionSpecs = MetaStoreServerUtils @@ -6508,14 +6492,14 @@ public PartitionsByExprResult get_partitions_by_expr( String dbName = req.getDbName(), tblName = req.getTblName(); String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); startTableFunction("get_partitions_by_expr", catName, dbName, tblName); - fireReadTablePreEvent(catName, dbName, tblName); + fireReadTablePreEvent(catName, dbName, tblName, req.getValidWriteIdList()); PartitionsByExprResult ret = null; Exception ex = null; try { - checkLimitNumberOfPartitionsByExpr(catName, dbName, tblName, req.getExpr(), UNLIMITED_MAX_PARTITIONS); + checkLimitNumberOfPartitionsByExpr(catName, dbName, tblName, req.getExpr(), UNLIMITED_MAX_PARTITIONS, req.getValidWriteIdList()); List partitions = new LinkedList<>(); boolean hasUnknownPartitions = getMS().getPartitionsByExpr(catName, dbName, tblName, - req.getExpr(), req.getDefaultPartitionName(), req.getMaxParts(), partitions); + req.getExpr(), req.getDefaultPartitionName(), req.getMaxParts(), partitions, req.getValidWriteIdList()); ret = new PartitionsByExprResult(partitions, hasUnknownPartitions); } catch (Exception e) { ex = e; @@ -6541,7 +6525,7 @@ private void rethrowException(Exception e) throws TException { @Override public int get_num_partitions_by_filter(final String dbName, - final String tblName, final String filter) + final String tblName, final String filter, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(dbName, conf); if (parsedDbName[DB_NAME] == null || tblName == null) { @@ -6554,7 +6538,7 @@ public int get_num_partitions_by_filter(final String dbName, Exception ex = null; try { ret = getMS().getNumPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tblName, filter); + tblName, filter, validWriteIdList); } catch (Exception e) { ex = e; rethrowException(e); @@ -6565,12 +6549,12 @@ public int get_num_partitions_by_filter(final String dbName, } private int get_num_partitions_by_expr(final String catName, final String dbName, - final String tblName, final byte[] expr) + final String tblName, final byte[] expr, String validWriteIdList) throws TException { int ret = -1; Exception ex = null; try { - ret = getMS().getNumPartitionsByExpr(catName, dbName, tblName, expr); + ret = getMS().getNumPartitionsByExpr(catName, dbName, tblName, expr, validWriteIdList); } catch (Exception e) { ex = e; rethrowException(e); @@ -6582,8 +6566,8 @@ private int get_num_partitions_by_expr(final String catName, final String dbName @Override public List get_partitions_by_names(final String dbName, final String tblName, - final List partNames) throws TException { - return get_partitions_by_names(dbName, tblName, partNames, false); + final List partNames, String validWriteIdList) throws TException { + return get_partitions_by_names(dbName, tblName, partNames, false, validWriteIdList); } @Override @@ -6592,18 +6576,18 @@ public GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNam List partitions = get_partitions_by_names(gpbnr.getDb_name(), gpbnr.getTbl_name(), gpbnr.getNames(), gpbnr.isSetGet_col_stats() && gpbnr.isGet_col_stats(), gpbnr.getProcessorCapabilities(), - gpbnr.getProcessorIdentifier()); + gpbnr.getProcessorIdentifier(), gpbnr.getValidWriteIdList()); return new GetPartitionsByNamesResult(partitions); } public List get_partitions_by_names(final String dbName, final String tblName, - final List partNames, boolean getColStats) throws TException { - return get_partitions_by_names(dbName, tblName, partNames, getColStats, null, null); + final List partNames, boolean getColStats, String validWriteIdList) throws TException { + return get_partitions_by_names(dbName, tblName, partNames, getColStats, null, null, validWriteIdList); } public List get_partitions_by_names(final String dbName, final String tblName, final List partNames, boolean getColStats, List processorCapabilities, - String processorId) throws TException { + String processorId, String validWriteIdList) throws TException { String[] dbNameParts = parseDbName(dbName, conf); String parsedCatName = dbNameParts[CAT_NAME]; @@ -6617,9 +6601,9 @@ public GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNam getMS().openTransaction(); authorizeTableForPartitionMetadata(parsedCatName, parsedDbName, tblName); - fireReadTablePreEvent(parsedCatName, parsedDbName, tblName); + fireReadTablePreEvent(parsedCatName, parsedDbName, tblName, validWriteIdList); - ret = getMS().getPartitionsByNames(parsedCatName, parsedDbName, tblName, partNames); + ret = getMS().getPartitionsByNames(parsedCatName, parsedDbName, tblName, partNames, validWriteIdList); ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); // If requested add column statistics in each of the partition objects @@ -6632,7 +6616,7 @@ public GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNam List partColStatsList = getMS().getPartitionColumnStatistics(parsedCatName, parsedDbName, tblName, Collections.singletonList(partName), - StatsSetupConst.getColumnsHavingStats(part.getParameters())); + StatsSetupConst.getColumnsHavingStats(part.getParameters()), validWriteIdList); if (partColStatsList != null && !partColStatsList.isEmpty()) { ColumnStatistics partColStats = partColStatsList.get(0); if (partColStats != null) { @@ -6697,7 +6681,7 @@ private String getPartName(HiveObjectRef hiveObject) throws MetaException { String catName = hiveObject.isSetCatName() ? hiveObject.getCatName() : getDefaultCatalog(conf); Table table = get_table_core(catName, hiveObject.getDbName(), hiveObject - .getObjectName()); + .getObjectName(), null); partName = Warehouse .makePartName(table.getPartitionKeys(), partValue); } catch (NoSuchObjectException e) { @@ -7135,7 +7119,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, if (dbName == null) { return getMS().listPrincipalPartitionColumnGrantsAll(principalName, principalType); } - Table tbl = get_table_core(catName, dbName, tableName); + Table tbl = get_table_core(catName, dbName, tableName, null); String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); if (principalName == null) { return getMS().listPartitionColumnGrantsAll(catName, dbName, tableName, partName, columnName); @@ -7180,7 +7164,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, if (dbName == null) { return getMS().listPrincipalPartitionGrantsAll(principalName, principalType); } - Table tbl = get_table_core(catName, dbName, tableName); + Table tbl = get_table_core(catName, dbName, tableName, null); String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); if (principalName == null) { return getMS().listPartitionGrantsAll(catName, dbName, tableName, partName); @@ -7451,6 +7435,7 @@ public void markPartitionForEvent(final String db_name, final String tbl_name, boolean success = false; try { String[] parsedDbName = parseDbName(db_name, conf); + ms.openTransaction(); startPartitionFunction("markPartitionForEvent", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, partName); @@ -7470,6 +7455,7 @@ public void markPartitionForEvent(final String db_name, final String tbl_name, } success = ms.commitTransaction(); + for (MetaStoreEventListener listener : listeners) { listener.onLoadPartitionDone(new LoadPartitionDoneEvent(true, tbl, partName, this)); } @@ -7986,7 +7972,7 @@ private Table getTblObject(String db, String table) throws MetaException, NoSuch private Partition getPartitionObj(String db, String table, List partitionVals, Table tableObj) throws MetaException, NoSuchObjectException { if (tableObj.isSetPartitionKeys() && !tableObj.getPartitionKeys().isEmpty()) { - return get_partition(db, table, partitionVals); + return get_partition(db, table, partitionVals, null); } return null; } @@ -8191,6 +8177,7 @@ private boolean updatePartColumnStatsWithMerge(String catName, String dbName, St List colNames, Map newStatsMap, SetPartitionsStatsRequest request) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { RawStore ms = getMS(); + ms.openTransaction(); boolean isCommitted = false, result = false; try { @@ -8209,7 +8196,7 @@ private boolean updatePartColumnStatsWithMerge(String catName, String dbName, St } // another single call to get all the partition objects - List partitions = ms.getPartitionsByNames(catName, dbName, tableName, partitionNames); + List partitions = ms.getPartitionsByNames(catName, dbName, tableName, partitionNames, null); Map mapToPart = new HashMap<>(); for (int index = 0; index < partitionNames.size(); index++) { mapToPart.put(partitionNames.get(index), partitions.get(index)); @@ -8270,6 +8257,7 @@ private boolean updateTableColumnStatsWithMerge(String catName, String dbName, S NoSuchObjectException, InvalidObjectException, InvalidInputException { ColumnStatistics firstColStats = request.getColStats().get(0); RawStore ms = getMS(); + ms.openTransaction(); boolean isCommitted = false, result = false; try { @@ -8535,7 +8523,7 @@ public CacheFileMetadataResult cache_file_metadata( ms.openTransaction(); boolean success = false; try { - Table tbl = ms.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); + Table tbl = ms.getTable(DEFAULT_CATALOG_NAME, dbName, tblName, null); if (tbl == null) { throw new NoSuchObjectException(dbName + "." + tblName + " not found"); } @@ -8560,7 +8548,7 @@ public CacheFileMetadataResult cache_file_metadata( if (partName != null) { partNames = Lists.newArrayList(partName); } else if (isAllPart) { - partNames = ms.listPartitionNames(DEFAULT_CATALOG_NAME, dbName, tblName, (short)-1); + partNames = ms.listPartitionNames(DEFAULT_CATALOG_NAME, dbName, tblName, (short)-1, null); } else { throw new MetaException("Table is partitioned"); } @@ -8573,7 +8561,7 @@ public CacheFileMetadataResult cache_file_metadata( int currentBatchSize = Math.min(batchSize, partNames.size() - index); List nameBatch = partNames.subList(index, index + currentBatchSize); index += currentBatchSize; - List parts = ms.getPartitionsByNames(DEFAULT_CATALOG_NAME, dbName, tblName, nameBatch); + List parts = ms.getPartitionsByNames(DEFAULT_CATALOG_NAME, dbName, tblName, nameBatch, null); for (Partition part : parts) { if (!part.isSetSd() || !part.getSd().isSetLocation()) { throw new MetaException("Partition does not have storage location;" + diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java index cee357216f..1707883ab7 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; import org.apache.hadoop.hive.metastore.txn.TxnStore; -import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; /** * An interface wrapper for HMSHandler. This interface contains methods that need to be @@ -83,16 +82,12 @@ Database get_database_core(final String catName, final String name) * @param catName catalog name * @param dbname database name * @param name table name + * @param validWriteIdList valid writeId to read * @return Table object * @throws NoSuchObjectException If the table does not exist. * @throws MetaException If another error occurs. */ - Table get_table_core(final String catName, final String dbname, final String name) - throws MetaException, NoSuchObjectException; - - Table get_table_core(final String catName, final String dbname, - final String name, - final String writeIdList) + Table get_table_core(final String catName, final String dbname, final String name, final String writeIdList) throws MetaException, NoSuchObjectException; /** diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDefaultTransformer.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDefaultTransformer.java index 4b543c8da9..ab9186ceeb 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDefaultTransformer.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDefaultTransformer.java @@ -381,7 +381,7 @@ public MetastoreDefaultTransformer(IHMSHandler handler) throws HiveMetaException table = tableCache.get(dbName + "." + tableName); } else { try { - table = hmsHandler.get_table_core(MetaStoreUtils.getDefaultCatalog(null), dbName, tableName); + table = hmsHandler.get_table_core(MetaStoreUtils.getDefaultCatalog(null), dbName, tableName, null); } catch (Exception e) { throw new MetaException("Could not load table " + tableName + ":" + e.getMessage()); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 509fcb25ad..bdc90994c5 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -465,6 +465,7 @@ public boolean openTransaction() { @Override @SuppressWarnings("nls") public boolean commitTransaction() { + pm.flush(); if (TXN_STATUS.ROLLBACK == transactionStatus) { debugLog("Commit transaction: rollback"); return false; @@ -1236,13 +1237,6 @@ private static String getFullyQualifiedTableName(String dbName, String tblName) + "\"" + tblName + "\""; } - @Override - public Table - getTable(String catName, String dbName, String tableName) - throws MetaException { - return getTable(catName, dbName, tableName, null); - } - @Override public Table getTable(String catName, String dbName, String tableName, String writeIdList) throws MetaException { @@ -1807,6 +1801,7 @@ private Table convertToTable(MTable mtbl) throws MetaException { t.setRewriteEnabled(mtbl.isRewriteEnabled()); t.setCatName(mtbl.getDatabase().getCatalogName()); t.setWriteId(mtbl.getWriteId()); + t.setTemporary(false); return t; } @@ -2175,7 +2170,7 @@ private boolean isValidPartition( MetaStoreServerUtils.validatePartitionNameCharacters(part.getValues(), partitionValidationPattern); boolean doesExist = doesPartitionExist(part.getCatName(), - part.getDbName(), part.getTableName(), partitionKeys, part.getValues()); + part.getDbName(), part.getTableName(), partitionKeys, part.getValues(), null); if (doesExist && !ifNotExists) { throw new MetaException("Partition already exists: " + part); } @@ -2298,12 +2293,6 @@ public boolean addPartition(Partition part) throws InvalidObjectException, return success; } - @Override - public Partition getPartition(String catName, String dbName, String tableName, - List part_vals) throws NoSuchObjectException, MetaException { - return getPartition(catName, dbName, tableName, part_vals, null); - } - @Override public Partition getPartition(String catName, String dbName, String tableName, List part_vals, @@ -2646,13 +2635,13 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio @Override public List getPartitions(String catName, String dbName, String tableName, - int maxParts) throws MetaException, NoSuchObjectException { + int maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException { return getPartitionsInternal(catName, dbName, tableName, maxParts, true, true); } @Override public Map getPartitionLocations(String catName, String dbName, String tblName, - String baseLocationToNotShow, int max) { + String baseLocationToNotShow, int max, String validWriteIdList) { catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); tblName = normalizeIdentifier(tblName); @@ -2719,7 +2708,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio @Override public List getPartitionsWithAuth(String catName, String dbName, String tblName, - short max, String userName, List groupNames) + short max, String userName, List groupNames, String validWriteIdList) throws MetaException, InvalidObjectException { boolean success = false; QueryWrapper queryWrapper = new QueryWrapper(); @@ -2752,7 +2741,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio @Override public Partition getPartitionWithAuth(String catName, String dbName, String tblName, - List partVals, String user_name, List group_names) + List partVals, String user_name, List group_names, String validWriteIdList) throws NoSuchObjectException, MetaException, InvalidObjectException { boolean success = false; try { @@ -2815,7 +2804,7 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN // TODO:pc implement max @Override public List listPartitionNames(String catName, String dbName, String tableName, - short max) throws MetaException { + short max, String validWriteIdList) throws MetaException { List pns = null; boolean success = false; try { @@ -2875,7 +2864,7 @@ public PartitionValuesResponse listPartitionValues(String catName, String dbName String tableName, List cols, boolean applyDistinct, String filter, boolean ascending, List order, - long maxParts) throws MetaException { + long maxParts, String validWriteIdList) throws MetaException { catName = normalizeIdentifier(catName); dbName = dbName.toLowerCase().trim(); @@ -2919,7 +2908,7 @@ private PartitionValuesResponse extractPartitionNamesByFilter( } if (partitionNames == null) { - partitions = getPartitionsByFilter(catName, dbName, tableName, filter, (short) maxParts); + partitions = getPartitionsByFilter(catName, dbName, tableName, filter, (short) maxParts, null); } if (partitions != null) { @@ -3149,7 +3138,7 @@ private Collection getPartitionPsQueryResults(String catName, String dbName, Str @Override public List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name, - List part_vals, short max_parts, String userName, List groupNames) + List part_vals, short max_parts, String userName, List groupNames, String validWriteIdList) throws MetaException, InvalidObjectException, NoSuchObjectException { List partitions = new ArrayList<>(); boolean success = false; @@ -3183,7 +3172,7 @@ private Collection getPartitionPsQueryResults(String catName, String dbName, Str @Override public List listPartitionNamesPs(String catName, String dbName, String tableName, - List part_vals, short max_parts) throws MetaException, NoSuchObjectException { + List part_vals, short max_parts, String validWriteIdList) throws MetaException, NoSuchObjectException { List partitionNames = new ArrayList<>(); boolean success = false; QueryWrapper queryWrapper = new QueryWrapper(); @@ -3286,7 +3275,7 @@ private Collection getPartitionPsQueryResults(String catName, String dbName, Str @Override public List getPartitionsByNames(String catName, String dbName, String tblName, - List partNames) throws MetaException, NoSuchObjectException { + List partNames, String validWriteIdList) throws MetaException, NoSuchObjectException { return getPartitionsByNamesInternal(catName, dbName, tblName, partNames, true, true); } @@ -3310,7 +3299,7 @@ private Collection getPartitionPsQueryResults(String catName, String dbName, Str @Override public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, - String defaultPartitionName, short maxParts, List result) throws TException { + String defaultPartitionName, short maxParts, List result, String validWriteIdList) throws TException { return getPartitionsByExprInternal( catName, dbName, tblName, expr, defaultPartitionName, maxParts, result, true, true); } @@ -3562,7 +3551,7 @@ private String getJDOFilterStrForPartitionVals(Table table, List vals, @Override public List getPartitionsByFilter(String catName, String dbName, String tblName, - String filter, short maxParts) throws MetaException, NoSuchObjectException { + String filter, short maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException { return getPartitionsByFilterInternal(catName, dbName, tblName, filter, maxParts, true, true); } @@ -3820,7 +3809,7 @@ protected String describeResult() { @Override public int getNumPartitionsByFilter(String catName, String dbName, String tblName, - String filter) throws MetaException, NoSuchObjectException { + String filter, String validWriteIdList) throws MetaException, NoSuchObjectException { final ExpressionTree exprTree = org.apache.commons.lang.StringUtils.isNotEmpty(filter) ? PartFilterExprUtil.getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; @@ -3851,7 +3840,7 @@ protected Integer getJdoResult( @Override public int getNumPartitionsByExpr(String catName, String dbName, String tblName, - byte[] expr) throws MetaException, NoSuchObjectException { + byte[] expr, String validWriteIdList) throws MetaException, NoSuchObjectException { final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr, null); final byte[] tempExpr = expr; // Need to be final to pass it to an inner class @@ -3928,7 +3917,7 @@ protected boolean canUseDirectSql(GetHelper> ctx) throws MetaExc @Override public List getPartitionSpecsByFilterAndProjection(final Table table, GetPartitionsProjectionSpec partitionsProjectSpec, - final GetPartitionsFilterSpec filterSpec) throws MetaException, NoSuchObjectException { + final GetPartitionsFilterSpec filterSpec, String validWriteIdList) throws MetaException, NoSuchObjectException { List fieldList = null; String inputIncludePattern = null; String inputExcludePattern = null; @@ -8751,17 +8740,6 @@ public void validateTableCols(Table table, List colNames) throws MetaExc } } - @Override - public ColumnStatistics getTableColumnStatistics( - String catName, - String dbName, - String tableName, - List colNames) throws MetaException, NoSuchObjectException { - // Note: this will get stats without verifying ACID. - return getTableColumnStatisticsInternal( - catName, dbName, tableName, colNames, true, true); - } - @Override public ColumnStatistics getTableColumnStatistics( String catName, @@ -8825,14 +8803,6 @@ protected ColumnStatistics getJdoResult( }.run(true); } - @Override - public List getPartitionColumnStatistics(String catName, String dbName, String tableName, - List partNames, List colNames) throws MetaException, NoSuchObjectException { - // Note: this will get stats without verifying ACID. - return getPartitionColumnStatisticsInternal( - catName, dbName, tableName, partNames, colNames, true, true); - } - @Override public List getPartitionColumnStatistics( String catName, String dbName, String tableName, @@ -8930,7 +8900,7 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblNam return null; } - Table table = getTable(catName, dbName, tblName); + Table table = getTable(catName, dbName, tblName, writeIdList); boolean isTxn = TxnUtils.isTransactionalTable(table.getParameters()); if (isTxn && !areTxnStatsSupported) { return null; @@ -8941,7 +8911,7 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblNam GetPartitionsProjectionSpec ps = new GetPartitionsProjectionSpec(); ps.setIncludeParamKeyPattern(StatsSetupConst.COLUMN_STATS_ACCURATE + '%'); ps.setFieldList(Lists.newArrayList("writeId", "parameters", "values")); - List parts = getPartitionSpecsByFilterAndProjection(table, ps, fs); + List parts = getPartitionSpecsByFilterAndProjection(table, ps, fs, writeIdList); // Loop through the given "partNames" list // checking isolation-level-compliance of each partition column stats. @@ -8955,13 +8925,7 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblNam } } } - return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); - } - @Override - public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, - final List partNames, final List colNames) - throws MetaException, NoSuchObjectException { final boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION); final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER); @@ -9616,7 +9580,7 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro @Override public boolean doesPartitionExist(String catName, String dbName, String tableName, - List partKeys, List partVals) + List partKeys, List partVals, String validWriteIdList) throws MetaException { String name = Warehouse.makePartName(partKeys, partVals); return this.getMPartition(catName, dbName, tableName, name) != null; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index c5e1a10869..22cae21ef5 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -198,17 +198,6 @@ void createTable(Table tbl) throws InvalidObjectException, boolean dropTable(String catalogName, String dbName, String tableName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; - /** - * Get a table object. - * @param catalogName catalog the table is in. - * @param dbName database the table is in. - * @param tableName table name. - * @return table object, or null if no such table exists (wow it would be nice if we either - * consistently returned null or consistently threw NoSuchObjectException). - * @throws MetaException something went wrong in the RDBMS - */ - Table getTable(String catalogName, String dbName, String tableName) throws MetaException; - /** * Get a table object. * @param catalogName catalog the table is in. @@ -262,18 +251,6 @@ boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException; - /** - * Get a partition. - * @param catName catalog name. - * @param dbName database name. - * @param tableName table name. - * @param part_vals partition values for this table. - * @return the partition. - * @throws MetaException error reading from RDBMS. - * @throws NoSuchObjectException no partition matching this specification exists. - */ - Partition getPartition(String catName, String dbName, String tableName, - List part_vals) throws MetaException, NoSuchObjectException; /** * Get a partition. * @param catName catalog name. @@ -297,12 +274,13 @@ Partition getPartition(String catName, String dbName, String tableName, * @param tableName table name. * @param partKeys list of partition keys used to generate the partition name. * @param part_vals list of partition values. + * @param validWriteIdList valid writeId to read * @return true if the partition exists, false otherwise. * @throws MetaException failure reading RDBMS * @throws NoSuchObjectException this is never thrown. */ boolean doesPartitionExist(String catName, String dbName, String tableName, - List partKeys, List part_vals) + List partKeys, List part_vals, String validWriteIdList) throws MetaException, NoSuchObjectException; /** @@ -327,12 +305,13 @@ boolean dropPartition(String catName, String dbName, String tableName, * @param dbName database name. * @param tableName table name * @param max maximum number of partitions, or -1 to get all partitions. + * @param validWriteIdList valid writeId to read * @return list of partitions * @throws MetaException error access the RDBMS. * @throws NoSuchObjectException no such table exists */ List getPartitions(String catName, String dbName, - String tableName, int max) throws MetaException, NoSuchObjectException; + String tableName, int max, String validWriteIdList) throws MetaException, NoSuchObjectException; /** * Get the location for every partition of a given table. If a partition location is a child of @@ -343,11 +322,12 @@ boolean dropPartition(String catName, String dbName, String tableName, * @param tblName table name. * @param baseLocationToNotShow Partition locations which are child of this path are omitted, and * null value returned instead. + * @param validWriteIdList valid writeId to read * @param max The maximum number of partition locations returned, or -1 for all * @return The map of the partitionName, location pairs */ Map getPartitionLocations(String catName, String dbName, String tblName, - String baseLocationToNotShow, int max); + String baseLocationToNotShow, int max, String validWriteIdList); /** * Alter a table. @@ -476,11 +456,12 @@ void updateCreationMetadata(String catName, String dbname, String tablename, Cre * @param db_name database name. * @param tbl_name table name. * @param max_parts maximum number of partitions to retrieve, -1 for all. + * @param validWriteIdList valid writeId to read * @return list of partition names. * @throws MetaException there was an error accessing the RDBMS */ List listPartitionNames(String catName, String db_name, - String tbl_name, short max_parts) throws MetaException; + String tbl_name, short max_parts, String validWriteIdList) throws MetaException; /** * Get a list of partition values as one big struct. @@ -492,13 +473,14 @@ void updateCreationMetadata(String catName, String dbname, String tablename, Cre * @param filter filter to apply to the partition names * @param ascending whether to put in ascending order * @param order whether to order + * @param validWriteIdList valid writeId to read * @param maxParts maximum number of parts to return, or -1 for all * @return struct with all of the partition value information * @throws MetaException error access the RDBMS */ PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, boolean ascending, - List order, long maxParts) throws MetaException; + List order, long maxParts, String validWriteIdList) throws MetaException; /** * Alter a partition. @@ -544,12 +526,13 @@ Partition alterPartition(String catName, String db_name, String tbl_name, List getPartitionsByFilter( - String catName, String dbName, String tblName, String filter, short maxParts) + String catName, String dbName, String tblName, String filter, short maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException; /** @@ -576,12 +559,13 @@ Partition alterPartition(String catName, String db_name, String tbl_name, List getPartitionSpecsByFilterAndProjection(Table table, - GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec) + GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec, String validWriteIdList) throws MetaException, NoSuchObjectException; /** * Get partitions using an already parsed expression. @@ -592,11 +576,12 @@ Partition alterPartition(String catName, String db_name, String tbl_name, List result) + byte[] expr, String defaultPartitionName, short maxParts, List result, String validWriteIdList) throws TException; /** @@ -605,11 +590,12 @@ boolean getPartitionsByExpr(String catName, String dbName, String tblName, * @param dbName database name. * @param tblName table name. * @param filter filter from Hive's SQL where clause + * @param validWriteIdList valid writeId to read * @return number of matching partitions. * @throws MetaException error accessing the RDBMS or executing the filter * @throws NoSuchObjectException no such table */ - int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) + int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter, String validWriteIdList) throws MetaException, NoSuchObjectException; /** @@ -618,11 +604,12 @@ int getNumPartitionsByFilter(String catName, String dbName, String tblName, Stri * @param dbName database name. * @param tblName table name. * @param expr an already parsed Hive expression + * @param validWriteIdList valid writeId to read * @return number of matching partitions. * @throws MetaException error accessing the RDBMS or working with the expression. * @throws NoSuchObjectException no such table. */ - int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) + int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String validWriteIdList) throws MetaException, NoSuchObjectException; /** @@ -632,12 +619,13 @@ int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] * @param tblName table name. * @param partNames list of partition names. These are names not values, so they will include * both the key and the value. + * @param validWriteIdList valid writeId to read * @return list of matching partitions * @throws MetaException error accessing the RDBMS. * @throws NoSuchObjectException No such table. */ List getPartitionsByNames(String catName, String dbName, String tblName, - List partNames) + List partNames, String validWriteIdList) throws MetaException, NoSuchObjectException; Table markPartitionForEvent(String catName, String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; @@ -824,13 +812,14 @@ boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, Privile * @param partVals partition values * @param user_name user to get privilege information for. * @param group_names groups to get privilege information for. + * @param validWriteIdList valid writeId to read * @return a partition * @throws MetaException error accessing the RDBMS. * @throws NoSuchObjectException no such partition exists * @throws InvalidObjectException error fetching privilege information */ Partition getPartitionWithAuth(String catName, String dbName, String tblName, - List partVals, String user_name, List group_names) + List partVals, String user_name, List group_names, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException; /** @@ -842,13 +831,14 @@ Partition getPartitionWithAuth(String catName, String dbName, String tblName, * @param maxParts maximum number of partitions to fetch, -1 for all partitions. * @param userName user to get privilege information for. * @param groupNames groups to get privilege information for. + * @param validWriteIdList valid writeId to read * @return list of partitions. * @throws MetaException error access the RDBMS. * @throws NoSuchObjectException no such table exists * @throws InvalidObjectException error fetching privilege information. */ List getPartitionsWithAuth(String catName, String dbName, - String tblName, short maxParts, String userName, List groupNames) + String tblName, short maxParts, String userName, List groupNames, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException; /** @@ -863,12 +853,13 @@ Partition getPartitionWithAuth(String catName, String dbName, String tblName, * Entries can be empty if you only want to specify latter partitions. * @param max_parts * The maximum number of partitions to return + * @param validWriteIdList valid writeId to read * @return A list of partition names that match the partial spec. * @throws MetaException error accessing RDBMS * @throws NoSuchObjectException No such table exists */ List listPartitionNamesPs(String catName, String db_name, String tbl_name, - List part_vals, short max_parts) + List part_vals, short max_parts, String validWriteIdList) throws MetaException, NoSuchObjectException; /** @@ -888,13 +879,14 @@ Partition getPartitionWithAuth(String catName, String dbName, String tblName, * The user name for the partition for authentication privileges * @param groupNames * The groupNames for the partition for authentication privileges + * @param validWriteIdList valid writeId to read * @return A list of partitions that match the partial spec. * @throws MetaException error access RDBMS * @throws NoSuchObjectException No such table exists * @throws InvalidObjectException error access privilege information */ List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name, - List part_vals, short max_parts, String userName, List groupNames) + List part_vals, short max_parts, String userName, List groupNames, String validWriteIdList) throws MetaException, InvalidObjectException, NoSuchObjectException; /** Persists the given column statistics object to the metastore @@ -921,21 +913,6 @@ Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, String validWriteIds, long writeId) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; - /** - * Returns the relevant column statistics for a given column in a given table in a given database - * if such statistics exist. - * @param catName catalog name. - * @param dbName name of the database, defaults to current database - * @param tableName name of the table - * @param colName names of the columns for which statistics is requested - * @return Relevant column statistics for the column for the given table - * @throws NoSuchObjectException No such table - * @throws MetaException error accessing the RDBMS - * - */ - ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, - List colName) throws MetaException, NoSuchObjectException; - /** * Returns the relevant column statistics for a given column in a given table in a given database * if such statistics exist. @@ -954,21 +931,6 @@ ColumnStatistics getTableColumnStatistics( List colName, String writeIdList) throws MetaException, NoSuchObjectException; - /** - * Get statistics for a partition for a set of columns. - * @param catName catalog name. - * @param dbName database name. - * @param tblName table name. - * @param partNames list of partition names. These are names so must be key1=val1[/key2=val2...] - * @param colNames list of columns to get stats for - * @return list of statistics objects - * @throws MetaException error accessing the RDBMS - * @throws NoSuchObjectException no such partition. - */ - List getPartitionColumnStatistics( - String catName, String dbName, String tblName, List partNames, List colNames) - throws MetaException, NoSuchObjectException; - /** * Get statistics for a partition for a set of columns. * @param catName catalog name. @@ -1215,21 +1177,6 @@ void dropFunction(String catName, String dbName, String funcName) */ List getFunctions(String catName, String dbName, String pattern) throws MetaException; - /** - * Get aggregated stats for a table or partition(s). - * @param catName catalog name. - * @param dbName database name. - * @param tblName table name. - * @param partNames list of partition names. These are the names of the partitions, not - * values. - * @param colNames list of column names - * @return aggregated stats - * @throws MetaException error accessing RDBMS - * @throws NoSuchObjectException no such table or partition - */ - AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, - List partNames, List colNames) throws MetaException, NoSuchObjectException; - /** * Get aggregated stats for a table or partition(s). * @param catName catalog name. diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 511e6c1f64..ac935a716a 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -20,7 +20,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; +import java.util.BitSet; import java.util.EmptyStackException; import java.util.HashMap; import java.util.LinkedList; @@ -42,6 +42,9 @@ import org.apache.hadoop.hive.common.DatabaseName; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.Deadline; import org.apache.hadoop.hive.metastore.FileMetadataHandler; import org.apache.hadoop.hive.metastore.ObjectStore; @@ -51,21 +54,25 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.HiveAlterHandler; +import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.cache.SharedCache.StatsType; import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator; import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; -import org.apache.hadoop.hive.metastore.messaging.AlterDatabaseMessage; -import org.apache.hadoop.hive.metastore.messaging.CreateDatabaseMessage; import org.apache.hadoop.hive.metastore.messaging.CreateTableMessage; import org.apache.hadoop.hive.metastore.messaging.DropTableMessage; import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; +import org.apache.hadoop.hive.metastore.messaging.CommitTxnMessage; +import org.apache.hadoop.hive.metastore.messaging.AbortTxnMessage; import org.apache.hadoop.hive.metastore.messaging.AddPartitionMessage; +import org.apache.hadoop.hive.metastore.messaging.AllocWriteIdMessage; import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; import org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage; import org.apache.hadoop.hive.metastore.messaging.UpdateTableColumnStatMessage; +import org.apache.hadoop.hive.metastore.metrics.Metrics; +import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; import org.apache.hadoop.hive.metastore.messaging.DeleteTableColumnStatMessage; import org.apache.hadoop.hive.metastore.messaging.UpdatePartitionColumnStatMessage; import org.apache.hadoop.hive.metastore.messaging.DeletePartitionColumnStatMessage; @@ -73,6 +80,9 @@ import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.messaging.MessageFactory; +import org.apache.hadoop.hive.metastore.messaging.OpenTxnMessage; +import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils; +import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.FileUtils; import org.apache.hadoop.hive.metastore.utils.JavaUtils; @@ -84,10 +94,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.codahale.metrics.Counter; import com.google.common.annotations.VisibleForTesting; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; -import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; // TODO filter->expr @@ -118,15 +127,30 @@ private Configuration conf; private static boolean areTxnStatsSupported; private PartitionExpressionProxy expressionProxy = null; + private static String startUpdateServiceLock = "L"; private static String lock = "L"; private static boolean sharedCacheInited = false; private static SharedCache sharedCache = new SharedCache(); - private static boolean canUseEvents = false; private static long lastEventId; + private static Map> txnIdToWriteId = new HashMap<>(); + private static boolean counterInited = false; + private static Counter cacheHit; + private static Counter cacheMiss; private static final Logger LOG = LoggerFactory.getLogger(CachedStore.class.getName()); @Override public void setConf(Configuration conf) { + if (MetastoreConf.getVar(conf, ConfVars.TRANSACTIONAL_EVENT_LISTENERS)==null || + MetastoreConf.getVar(conf, ConfVars.TRANSACTIONAL_EVENT_LISTENERS).isEmpty()) { + throw new RuntimeException("CahcedStore can not use events for invalidation as there is no " + + " TransactionalMetaStoreEventListener to add events to notification table"); + } + if (!counterInited && MetastoreConf.getBoolVar(conf, ConfVars.METRICS_ENABLED)) { + Metrics.initialize(conf); + cacheHit = Metrics.getOrCreateCounter(MetricsConstants.METADATA_CACHE_HIT); + cacheMiss = Metrics.getOrCreateCounter(MetricsConstants.METADATA_CACHE_MISS); + counterInited = true; + } setConfInternal(conf); initBlackListWhiteList(conf); initSharedCache(conf); @@ -148,7 +172,7 @@ void setConfForTestExceptSharedCache(Configuration conf) { initBlackListWhiteList(conf); } - private static synchronized void triggerUpdateUsingEvent(RawStore rawStore) { + private static synchronized void triggerUpdateUsingEvent(RawStore rawStore, Configuration conf) { if (!isCachePrewarmed.get()) { LOG.error("cache update should be done only after prewarm"); throw new RuntimeException("cache update should be done only after prewarm"); @@ -156,7 +180,7 @@ private static synchronized void triggerUpdateUsingEvent(RawStore rawStore) { long startTime = System.nanoTime(); long preEventId = lastEventId; try { - lastEventId = updateUsingNotificationEvents(rawStore, lastEventId); + lastEventId = updateUsingNotificationEvents(rawStore, lastEventId, conf); } catch (Exception e) { LOG.error(" cache update failed for start event id " + lastEventId + " with error ", e); throw new RuntimeException(e.getMessage()); @@ -167,19 +191,12 @@ private static synchronized void triggerUpdateUsingEvent(RawStore rawStore) { } } - private static synchronized void triggerPreWarm(RawStore rawStore) { + private static synchronized void triggerPreWarm(RawStore rawStore, Configuration conf) { lastEventId = rawStore.getCurrentNotificationEventId().getEventId(); - prewarm(rawStore); + prewarm(rawStore, conf); } private void setConfInternal(Configuration conf) { - if (MetastoreConf.getBoolVar(conf, ConfVars.METASTORE_CACHE_CAN_USE_EVENT)) { - canUseEvents = true; - } else { - canUseEvents = false; - } - LOG.info("canUseEvents is set to " + canUseEvents + " in cached Store"); - String rawStoreClassName = MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, ObjectStore.class.getName()); if (rawStore == null) { try { @@ -251,17 +268,14 @@ private static void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, } } - @VisibleForTesting public static long updateUsingNotificationEvents(RawStore rawStore, long lastEventId) + @VisibleForTesting public static long updateUsingNotificationEvents(RawStore rawStore, long lastEventId, Configuration conf) throws Exception { + Deadline.registerIfNot(1000000); LOG.debug("updating cache using notification events starting from event id " + lastEventId); NotificationEventRequest rqst = new NotificationEventRequest(lastEventId); //Add the events which are not related to metadata update rqst.addToEventTypeSkipList(MessageBuilder.INSERT_EVENT); - rqst.addToEventTypeSkipList(MessageBuilder.OPEN_TXN_EVENT); - rqst.addToEventTypeSkipList(MessageBuilder.COMMIT_TXN_EVENT); - rqst.addToEventTypeSkipList(MessageBuilder.ABORT_TXN_EVENT); - rqst.addToEventTypeSkipList(MessageBuilder.ALLOC_WRITE_ID_EVENT); rqst.addToEventTypeSkipList(MessageBuilder.ACID_WRITE_EVENT); rqst.addToEventTypeSkipList(MessageBuilder.CREATE_FUNCTION_EVENT); rqst.addToEventTypeSkipList(MessageBuilder.DROP_FUNCTION_EVENT); @@ -327,7 +341,7 @@ private static void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, break; case MessageBuilder.CREATE_TABLE_EVENT: CreateTableMessage createTableMessage = deserializer.getCreateTableMessage(message); - sharedCache.addTableToCache(catalogName, dbName, tableName, createTableMessage.getTableObj()); + sharedCache.addTableToCache(catalogName, dbName, tableName, createTableMessage.getTableObj(), newTableWriteIds(dbName, tableName)); break; case MessageBuilder.ALTER_TABLE_EVENT: AlterTableMessage alterTableMessage = deserializer.getAlterTableMessage(message); @@ -340,14 +354,15 @@ private static void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, DropTableMessage dropTableMessage = deserializer.getDropTableMessage(message); int batchSize = MetastoreConf.getIntVar(rawStore.getConf(), ConfVars.BATCH_RETRIEVE_OBJECTS_MAX); String tableDnsPath = null; - Path tablePath = new Path(dropTableMessage.getTableObj().getSd().getLocation()); + Path tablePath = dropTableMessage.getTableObj().getSd().getLocation()!=null? + new Path(dropTableMessage.getTableObj().getSd().getLocation()):null; if (tablePath != null) { tableDnsPath = new Warehouse(rawStore.getConf()).getDnsPath(tablePath).toString(); } while (true) { - Map partitionLocations = - rawStore.getPartitionLocations(catalogName, dbName, tableName, tableDnsPath, batchSize); + Map partitionLocations = rawStore.getPartitionLocations(catalogName, dbName, tableName, + tableDnsPath, batchSize, null); if (partitionLocations == null || partitionLocations.isEmpty()) { break; } @@ -356,25 +371,34 @@ private static void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, } sharedCache.removeTableFromCache(catalogName, dbName, tableName); break; - case MessageBuilder.CREATE_DATABASE_EVENT: - CreateDatabaseMessage createDatabaseMessage = deserializer.getCreateDatabaseMessage(message); - sharedCache.addDatabaseToCache(createDatabaseMessage.getDatabaseObject()); - break; - case MessageBuilder.ALTER_DATABASE_EVENT: - AlterDatabaseMessage alterDatabaseMessage = deserializer.getAlterDatabaseMessage(message); - sharedCache.alterDatabaseInCache(catalogName, dbName, alterDatabaseMessage.getDbObjAfter()); - break; - case MessageBuilder.DROP_DATABASE_EVENT: - sharedCache.removeDatabaseFromCache(catalogName, dbName); - break; - case MessageBuilder.CREATE_CATALOG_EVENT: - case MessageBuilder.DROP_CATALOG_EVENT: - case MessageBuilder.ALTER_CATALOG_EVENT: - // TODO : Need to add cache invalidation for catalog events - LOG.error("catalog Events are not supported for cache invalidation : " + event.getEventType()); - break; case MessageBuilder.UPDATE_TBL_COL_STAT_EVENT: UpdateTableColumnStatMessage msg = deserializer.getUpdateTableColumnStatMessage(message); + Table tbl = msg.getTableObject(); + Map newParams = new HashMap<>(tbl.getParameters()); + List colNames = new ArrayList<>(); + for (ColumnStatisticsObj statsObj : msg.getColumnStatistics().getStatsObj()) { + colNames.add(statsObj.getColName()); + } + StatsSetupConst.setColumnStatsState(newParams, colNames); + long writeId = msg.getWriteId(); + String validWriteIds = msg.getWriteIds(); + if (validWriteIds != null) { + if (!areTxnStatsSupported) { + StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); + } else { + String errorMsg = ObjectStore.verifyStatsChangeCtx(TableName.getDbTable(dbName, tableName), + tbl.getParameters(), newParams, writeId, validWriteIds, true); + if (errorMsg != null) { + throw new MetaException(errorMsg); + } + if (!ObjectStore.isCurrentStatsValidForTheQuery(newParams, writeId, validWriteIds, true)) { + // Make sure we set the flag to invalid regardless of the current value. + StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table " + + dbName + "." + tableName); + } + } + } sharedCache.alterTableAndStatsInCache(catalogName, dbName, tableName, msg.getWriteId(), msg.getColumnStatistics().getStatsObj(), msg.getParameters()); break; @@ -384,6 +408,32 @@ private static void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, break; case MessageBuilder.UPDATE_PART_COL_STAT_EVENT: UpdatePartitionColumnStatMessage msgPartUpdate = deserializer.getUpdatePartitionColumnStatMessage(message); + Partition partition = sharedCache.getPartitionFromCache(catalogName, dbName, tableName, msgPartUpdate.getPartVals()); + newParams = new HashMap<>(partition.getParameters()); + colNames = new ArrayList<>(); + for (ColumnStatisticsObj statsObj : msgPartUpdate.getColumnStatistics().getStatsObj()) { + colNames.add(statsObj.getColName()); + } + StatsSetupConst.setColumnStatsState(newParams, colNames); + writeId = msgPartUpdate.getWriteId(); + validWriteIds = msgPartUpdate.getWriteIds(); + if (validWriteIds != null) { + if (!areTxnStatsSupported) { + StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); + } else { + String errorMsg = ObjectStore.verifyStatsChangeCtx(TableName.getDbTable(dbName, tableName), + partition.getParameters(), newParams, writeId, validWriteIds, true); + if (errorMsg != null) { + throw new MetaException(errorMsg); + } + if (!ObjectStore.isCurrentStatsValidForTheQuery(newParams, writeId, validWriteIds, true)) { + // Make sure we set the flag to invalid regardless of the current value. + StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the partition " + + dbName + "." + tableName + "." + msgPartUpdate.getPartVals()); + } + } + } sharedCache.alterPartitionAndStatsInCache(catalogName, dbName, tableName, msgPartUpdate.getWriteId(), msgPartUpdate.getPartVals(), msgPartUpdate.getParameters(), msgPartUpdate.getColumnStatistics().getStatsObj()); @@ -393,6 +443,48 @@ private static void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, sharedCache.removePartitionColStatsFromCache(catalogName, dbName, tableName, msgPart.getPartValues(), msgPart.getColName()); break; + case MessageBuilder.OPEN_TXN_EVENT: + OpenTxnMessage openTxn = deserializer.getOpenTxnMessage(message); + for (long txnId : openTxn.getTxnIds()) { + txnIdToWriteId.put(txnId, new HashMap<>()); + } + break; + case MessageBuilder.ABORT_TXN_EVENT: + AbortTxnMessage abortTxn = deserializer.getAbortTxnMessage(message); + txnIdToWriteId.put(abortTxn.getTxnId(), new HashMap<>()); + break; + case MessageBuilder.ALLOC_WRITE_ID_EVENT: + AllocWriteIdMessage allocWriteId = deserializer.getAllocWriteIdMessage(message); + List txnToWriteIdList = allocWriteId.getTxnToWriteIdList(); + for (TxnToWriteId txnToWriteId : txnToWriteIdList) { + long txnId = txnToWriteId.getTxnId(); + if (txnIdToWriteId.containsKey(txnId)) { + Map m = txnIdToWriteId.get(txnId); + String fullTableName = TableName.getDbTable(dbName, tableName); + m.put(fullTableName, txnToWriteId.getWriteId()); + } + } + break; + case MessageBuilder.COMMIT_TXN_EVENT: + CommitTxnMessage commitTxn = deserializer.getCommitTxnMessage(message); + if (txnIdToWriteId.containsKey(commitTxn.getTxnId())) { + Map m = txnIdToWriteId.get(commitTxn.getTxnId()); + for (Map.Entry entry : m.entrySet()) { + String tblNameToFlag = entry.getKey(); + long writeIdToCommit = entry.getValue(); + TableName tname = TableName.fromString(tblNameToFlag, Warehouse.DEFAULT_CATALOG_NAME, Warehouse.DEFAULT_DATABASE_NAME); + sharedCache.commitWriteId(tname.getCat(), tname.getDb(), tname.getTable(), writeIdToCommit); + } + txnIdToWriteId.remove(commitTxn.getTxnId()); + } else { + GetTxnTableWriteIdsResponse getTxnTableWriteIdsResponse = HMSHandler.getMsThreadTxnHandler(conf) + .getTxnTableWriteIds(commitTxn.getTxnId()); + for (TableWriteId tableWriteId : getTxnTableWriteIdsResponse.getTableWriteIds()) { + TableName tname = TableName.fromString(tableWriteId.getFullTableName(), Warehouse.DEFAULT_CATALOG_NAME, Warehouse.DEFAULT_DATABASE_NAME); + sharedCache.commitWriteId(tname.getCat(), tname.getDb(), tname.getTable(), tableWriteId.getWriteId()); + } + } + break; default: LOG.error("Event is not supported for cache invalidation : " + event.getEventType()); } @@ -405,27 +497,23 @@ private static void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, * This initializes the caches in SharedCache by getting the objects from Metastore DB via * ObjectStore and populating the respective caches */ - static void prewarm(RawStore rawStore) { + static void prewarm(RawStore rawStore, Configuration conf) { if (isCachePrewarmed.get()) { return; } long startTime = System.nanoTime(); LOG.info("Prewarming CachedStore"); long sleepTime = 100; + TxnStore txn = TxnUtils.getTxnStore(conf); while (!isCachePrewarmed.get()) { // Prevents throwing exceptions in our raw store calls since we're not using RawStoreProxy Deadline.registerIfNot(1000000); - Collection catalogsToCache; + + List catNames = new ArrayList<>(); try { - catalogsToCache = catalogsToCache(rawStore); - LOG.info("Going to cache catalogs: " + org.apache.commons.lang.StringUtils.join(catalogsToCache, ", ")); - List catalogs = new ArrayList<>(catalogsToCache.size()); - for (String catName : catalogsToCache) { - catalogs.add(rawStore.getCatalog(catName)); - } - sharedCache.populateCatalogsInCache(catalogs); - } catch (MetaException | NoSuchObjectException e) { - LOG.warn("Failed to populate catalogs in cache, going to try again", e); + catNames = rawStore.getCatalogs(); + } catch (MetaException e) { + LOG.warn("Failed to get catalogs, going to try again", e); try { Thread.sleep(sleepTime); sleepTime = sleepTime * 2; @@ -435,12 +523,11 @@ static void prewarm(RawStore rawStore) { // try again continue; } - LOG.info("Finished prewarming catalogs, starting on databases"); + List databases = new ArrayList<>(); - for (String catName : catalogsToCache) { - try { + try { + for (String catName : catNames) { List dbNames = rawStore.getAllDatabases(catName); - LOG.info("Number of databases to prewarm in catalog {}: {}", catName, dbNames.size()); for (String dbName : dbNames) { try { databases.add(rawStore.getDatabase(catName, dbName)); @@ -449,11 +536,10 @@ static void prewarm(RawStore rawStore) { LOG.warn("Failed to cache database " + DatabaseName.getQualified(catName, dbName) + ", moving on", e); } } - } catch (MetaException e) { - LOG.warn("Failed to cache databases in catalog " + catName + ", moving on", e); } + } catch (MetaException e) { + LOG.warn("Failed to fetch databases, moving on", e); } - sharedCache.populateDatabasesInCache(databases); LOG.info("Databases cache is now prewarmed. Now adding tables, partitions and statistics to the cache"); int numberOfDatabasesCachedSoFar = 0; for (Database db : databases) { @@ -477,13 +563,21 @@ static void prewarm(RawStore rawStore) { continue; } Table table; + ValidWriteIdList writeIds; try { - table = rawStore.getTable(catName, dbName, tblName); + ValidTxnList currentTxnList = TxnCommonUtils.createValidReadTxnList(txn.getOpenTxns(), 0); + GetValidWriteIdsRequest rqst = new GetValidWriteIdsRequest(Arrays.asList(TableName.getDbTable(dbName, tblName))); + rqst.setValidTxnList(currentTxnList.toString()); + writeIds = TxnCommonUtils.createValidReaderWriteIdList(txn.getValidWriteIds(rqst).getTblValidWriteIds().get(0)); + table = rawStore.getTable(catName, dbName, tblName, null); } catch (MetaException e) { LOG.debug(ExceptionUtils.getStackTrace(e)); // It is possible the table is deleted during fetching tables of the database, // in that case, continue with the next table continue; + } catch (NoSuchTxnException e) { + LOG.warn("Cannot find transaction", e); + continue; } List colNames = MetaStoreUtils.getColumnNamesForTable(table); try { @@ -494,7 +588,7 @@ static void prewarm(RawStore rawStore) { AggrStats aggrStatsAllButDefaultPartition = null; if (!table.getPartitionKeys().isEmpty()) { Deadline.startTimer("getPartitions"); - partitions = rawStore.getPartitions(catName, dbName, tblName, -1); + partitions = rawStore.getPartitions(catName, dbName, tblName, -1, null); Deadline.stopTimer(); List partNames = new ArrayList<>(partitions.size()); for (Partition p : partitions) { @@ -504,12 +598,12 @@ static void prewarm(RawStore rawStore) { // Get partition column stats for this table Deadline.startTimer("getPartitionColumnStatistics"); partitionColStats = - rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames); + rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, null); Deadline.stopTimer(); // Get aggregate stats for all partitions of a table and for all but default // partition Deadline.startTimer("getAggrPartitionColumnStatistics"); - aggrStatsAllPartitions = rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + aggrStatsAllPartitions = rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, null); Deadline.stopTimer(); // Remove default partition from partition names and get aggregate // stats again @@ -526,18 +620,18 @@ static void prewarm(RawStore rawStore) { partNames.remove(defaultPartitionName); Deadline.startTimer("getAggrPartitionColumnStatistics"); aggrStatsAllButDefaultPartition = - rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, null); Deadline.stopTimer(); } } else { Deadline.startTimer("getTableColumnStatistics"); - tableColStats = rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); + tableColStats = rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, null); Deadline.stopTimer(); } // If the table could not cached due to memory limit, stop prewarm boolean isSuccess = sharedCache .populateTableInCache(table, tableColStats, partitions, partitionColStats, aggrStatsAllPartitions, - aggrStatsAllButDefaultPartition); + aggrStatsAllButDefaultPartition, writeIds); if (isSuccess) { LOG.trace("Cached Database: {}'s Table: {}.", dbName, tblName); } else { @@ -572,7 +666,7 @@ static void prewarm(RawStore rawStore) { * a singleton. */ @VisibleForTesting - static void clearSharedCache() { + public static void clearSharedCache() { synchronized (lock) { sharedCacheInited = false; } @@ -625,15 +719,6 @@ private static void initBlackListWhiteList(Configuration conf) { MetastoreConf.getAsString(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_CACHED_OBJECTS_BLACKLIST)); } - private static Collection catalogsToCache(RawStore rs) throws MetaException { - Collection confValue = MetastoreConf.getStringCollection(rs.getConf(), ConfVars.CATALOGS_TO_CACHE); - if (confValue == null || confValue.isEmpty() || (confValue.size() == 1 && confValue.contains(""))) { - return rs.getCatalogs(); - } else { - return confValue; - } - } - @VisibleForTesting /** * This starts a background thread, which initially populates the SharedCache and later @@ -642,32 +727,34 @@ private static void initBlackListWhiteList(Configuration conf) { * @param conf * @param runOnlyOnce * @param shouldRunPrewarm - */ static synchronized void startCacheUpdateService(Configuration conf, boolean runOnlyOnce, + */ static void startCacheUpdateService(Configuration conf, boolean runOnlyOnce, boolean shouldRunPrewarm) { - if (cacheUpdateMaster == null) { - initBlackListWhiteList(conf); - if (!MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) { - cacheRefreshPeriodMS = - MetastoreConf.getTimeVar(conf, ConfVars.CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY, TimeUnit.MILLISECONDS); - } - LOG.info("CachedStore: starting cache update service (run every {} ms)", cacheRefreshPeriodMS); - cacheUpdateMaster = Executors.newScheduledThreadPool(1, new ThreadFactory() { - @Override public Thread newThread(Runnable r) { - Thread t = Executors.defaultThreadFactory().newThread(r); - t.setName("CachedStore-CacheUpdateService: Thread-" + t.getId()); - t.setDaemon(true); - return t; + synchronized (startUpdateServiceLock) { + if (cacheUpdateMaster == null) { + initBlackListWhiteList(conf); + if (!MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) { + cacheRefreshPeriodMS = + MetastoreConf.getTimeVar(conf, ConfVars.CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY, TimeUnit.MILLISECONDS); + } + LOG.info("CachedStore: starting cache update service (run every {} ms)", cacheRefreshPeriodMS); + cacheUpdateMaster = Executors.newScheduledThreadPool(1, new ThreadFactory() { + @Override public Thread newThread(Runnable r) { + Thread t = Executors.defaultThreadFactory().newThread(r); + t.setName("CachedStore-CacheUpdateService: Thread-" + t.getId()); + t.setDaemon(true); + return t; + } + }); + if (!runOnlyOnce) { + cacheUpdateMaster + .scheduleAtFixedRate(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0, cacheRefreshPeriodMS, + TimeUnit.MILLISECONDS); } - }); - if (!runOnlyOnce) { - cacheUpdateMaster - .scheduleAtFixedRate(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0, cacheRefreshPeriodMS, - TimeUnit.MILLISECONDS); } - } - if (runOnlyOnce) { - // Some tests control the execution of the background update thread - cacheUpdateMaster.schedule(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0, TimeUnit.MILLISECONDS); + if (runOnlyOnce) { + // Some tests control the execution of the background update thread + cacheUpdateMaster.schedule(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0, TimeUnit.MILLISECONDS); + } } } @@ -694,14 +781,16 @@ private static void initBlackListWhiteList(Configuration conf) { static class CacheUpdateMasterWork implements Runnable { private boolean shouldRunPrewarm = true; private final RawStore rawStore; + private Configuration conf; CacheUpdateMasterWork(Configuration conf, boolean shouldRunPrewarm) { this.shouldRunPrewarm = shouldRunPrewarm; + this.conf = new Configuration(conf); String rawStoreClassName = MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, ObjectStore.class.getName()); try { rawStore = JavaUtils.getClass(rawStoreClassName, RawStore.class).newInstance(); - rawStore.setConf(conf); + rawStore.setConf(this.conf); } catch (InstantiationException | IllegalAccessException | MetaException e) { // MetaException here really means ClassNotFound (see the utility method). // So, if any of these happen, that means we can never succeed. @@ -711,23 +800,15 @@ private static void initBlackListWhiteList(Configuration conf) { @Override public void run() { if (!shouldRunPrewarm) { - if (canUseEvents) { - try { - triggerUpdateUsingEvent(rawStore); - } catch (Exception e) { - LOG.error("failed to update cache using events ", e); - } - } else { - // TODO: prewarm and update can probably be merged. - try { - update(); - } catch (Exception e) { - LOG.error("periodical refresh fail ", e); - } + try { + triggerUpdateUsingEvent(rawStore, conf); + } catch (Exception e) { + LOG.error("failed to update cache using events ", e); } + sharedCache.incrementUpdateCount(); } else { try { - triggerPreWarm(rawStore); + triggerPreWarm(rawStore, conf); shouldRunPrewarm = false; } catch (Exception e) { LOG.error("Prewarm failure", e); @@ -735,226 +816,6 @@ private static void initBlackListWhiteList(Configuration conf) { } } } - - void update() { - Deadline.registerIfNot(1000000); - LOG.debug("CachedStore: updating cached objects. Shared cache has been update {} times so far.", - sharedCache.getUpdateCount()); - try { - for (String catName : catalogsToCache(rawStore)) { - List dbNames = rawStore.getAllDatabases(catName); - // Update the database in cache - updateDatabases(rawStore, catName, dbNames); - for (String dbName : dbNames) { - // Update the tables in cache - updateTables(rawStore, catName, dbName); - List tblNames; - try { - tblNames = rawStore.getAllTables(catName, dbName); - } catch (MetaException e) { - LOG.debug(ExceptionUtils.getStackTrace(e)); - // Continue with next database - continue; - } - for (String tblName : tblNames) { - if (!shouldCacheTable(catName, dbName, tblName)) { - continue; - } - // Update the table column stats for a table in cache - updateTableColStats(rawStore, catName, dbName, tblName); - // Update the partitions for a table in cache - updateTablePartitions(rawStore, catName, dbName, tblName); - // Update the partition col stats for a table in cache - updateTablePartitionColStats(rawStore, catName, dbName, tblName); - // Update aggregate partition column stats for a table in cache - updateTableAggregatePartitionColStats(rawStore, catName, dbName, tblName); - } - } - } - sharedCache.incrementUpdateCount(); - LOG.debug("CachedStore: updated cached objects. Shared cache update count is: {}", - sharedCache.getUpdateCount()); - } catch (MetaException e) { - LOG.error("Updating CachedStore: error happen when refresh; skipping this iteration", e); - } - } - - private void updateDatabases(RawStore rawStore, String catName, List dbNames) { - LOG.debug("CachedStore: updating cached database objects for catalog: {}", catName); - boolean success = false; - // Try MAX_RETRIES times, then move to next method - int maxTries = MAX_RETRIES; - while (!success && (maxTries-- > 0)) { - // Prepare the list of databases - List databases = new ArrayList<>(); - for (String dbName : dbNames) { - Database db; - try { - db = rawStore.getDatabase(catName, dbName); - databases.add(db); - } catch (NoSuchObjectException e) { - LOG.info("Updating CachedStore: database: " + catName + "." + dbName + " does not exist.", e); - } - } - success = sharedCache.refreshDatabasesInCache(databases); - LOG.debug("CachedStore: updated cached database objects for catalog: {}", catName); - } - } - - private void updateTables(RawStore rawStore, String catName, String dbName) { - LOG.debug("CachedStore: updating cached table objects for catalog: {}, database: {}", catName, dbName); - boolean success = false; - // Try MAX_RETRIES times, then move to next method - int maxTries = MAX_RETRIES; - while (!success && (maxTries-- > 0)) { - List
tables = new ArrayList<>(); - try { - List tblNames = rawStore.getAllTables(catName, dbName); - for (String tblName : tblNames) { - if (!shouldCacheTable(catName, dbName, tblName)) { - continue; - } - Table table = rawStore - .getTable(StringUtils.normalizeIdentifier(catName), StringUtils.normalizeIdentifier(dbName), - StringUtils.normalizeIdentifier(tblName)); - tables.add(table); - } - success = sharedCache.refreshTablesInCache(catName, dbName, tables); - LOG.debug("CachedStore: updated cached table objects for catalog: {}, database: {}", catName, dbName); - } catch (MetaException e) { - LOG.debug("Unable to refresh cached tables for database: " + dbName, e); - } - } - } - - private void updateTableColStats(RawStore rawStore, String catName, String dbName, String tblName) { - LOG.debug("CachedStore: updating cached table col stats objects for catalog: {}, database: {}", catName, dbName); - boolean committed = false; - rawStore.openTransaction(); - try { - Table table = rawStore.getTable(catName, dbName, tblName); - if (table != null && !table.isSetPartitionKeys()) { - List colNames = MetaStoreUtils.getColumnNamesForTable(table); - Deadline.startTimer("getTableColumnStatistics"); - ColumnStatistics tableColStats = rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); - Deadline.stopTimer(); - if (tableColStats != null) { - sharedCache.refreshTableColStatsInCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), - tableColStats.getStatsObj()); - // Update the table to get consistent stats state. - sharedCache.alterTableInCache(catName, dbName, tblName, table); - } - } - committed = rawStore.commitTransaction(); - LOG.debug("CachedStore: updated cached table col stats objects for catalog: {}, database: {}", catName, dbName); - } catch (MetaException | NoSuchObjectException e) { - LOG.info("Unable to refresh table column stats for table: " + tblName, e); - } finally { - if (!committed) { - sharedCache.removeAllTableColStatsFromCache(catName, dbName, tblName); - rawStore.rollbackTransaction(); - } - } - } - - private void updateTablePartitions(RawStore rawStore, String catName, String dbName, String tblName) { - LOG.debug("CachedStore: updating cached partition objects for catalog: {}, database: {}, table: {}", catName, - dbName, tblName); - try { - Deadline.startTimer("getPartitions"); - List partitions = rawStore.getPartitions(catName, dbName, tblName, -1); - Deadline.stopTimer(); - sharedCache - .refreshPartitionsInCache(StringUtils.normalizeIdentifier(catName), StringUtils.normalizeIdentifier(dbName), - StringUtils.normalizeIdentifier(tblName), partitions); - LOG.debug("CachedStore: updated cached partition objects for catalog: {}, database: {}, table: {}", catName, - dbName, tblName); - } catch (MetaException | NoSuchObjectException e) { - LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e); - } - } - - private void updateTablePartitionColStats(RawStore rawStore, String catName, String dbName, String tblName) { - LOG.debug("CachedStore: updating cached partition col stats objects for catalog: {}, database: {}, table: {}", - catName, dbName, tblName); - boolean committed = false; - rawStore.openTransaction(); - try { - Table table = rawStore.getTable(catName, dbName, tblName); - if (table != null) { - List colNames = MetaStoreUtils.getColumnNamesForTable(table); - List partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); - // Get partition column stats for this table - Deadline.startTimer("getPartitionColumnStatistics"); - List partitionColStats = - rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames); - Deadline.stopTimer(); - sharedCache.refreshPartitionColStatsInCache(catName, dbName, tblName, partitionColStats); - Deadline.startTimer("getPartitionsByNames"); - List parts = rawStore.getPartitionsByNames(catName, dbName, tblName, partNames); - Deadline.stopTimer(); - // Also save partitions for consistency as they have the stats state. - for (Partition part : parts) { - sharedCache.alterPartitionInCache(catName, dbName, tblName, part.getValues(), part); - } - } - committed = rawStore.commitTransaction(); - LOG.debug("CachedStore: updated cached partition col stats objects for catalog: {}, database: {}, table: {}", - catName, dbName, tblName); - } catch (MetaException | NoSuchObjectException e) { - LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e); - } finally { - if (!committed) { - sharedCache.removeAllPartitionColStatsFromCache(catName, dbName, tblName); - rawStore.rollbackTransaction(); - } - } - } - - // Update cached aggregate stats for all partitions of a table and for all - // but default partition - private static void updateTableAggregatePartitionColStats(RawStore rawStore, String catName, String dbName, - String tblName) { - LOG.debug( - "CachedStore: updating cached aggregate partition col stats objects for catalog: {}, database: {}, table: {}", - catName, dbName, tblName); - try { - Table table = rawStore.getTable(catName, dbName, tblName); - if (table == null) { - return; - } - List partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); - List colNames = MetaStoreUtils.getColumnNamesForTable(table); - if ((partNames != null) && (partNames.size() > 0)) { - Deadline.startTimer("getAggregareStatsForAllPartitions"); - AggrStats aggrStatsAllPartitions = rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); - Deadline.stopTimer(); - // Remove default partition from partition names and get aggregate stats again - List partKeys = table.getPartitionKeys(); - String defaultPartitionValue = MetastoreConf.getVar(rawStore.getConf(), ConfVars.DEFAULTPARTITIONNAME); - List partCols = new ArrayList(); - List partVals = new ArrayList(); - for (FieldSchema fs : partKeys) { - partCols.add(fs.getName()); - partVals.add(defaultPartitionValue); - } - String defaultPartitionName = FileUtils.makePartName(partCols, partVals); - partNames.remove(defaultPartitionName); - Deadline.startTimer("getAggregareStatsForAllPartitionsExceptDefault"); - AggrStats aggrStatsAllButDefaultPartition = - rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); - Deadline.stopTimer(); - sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), aggrStatsAllPartitions, - aggrStatsAllButDefaultPartition, null); - LOG.debug("CachedStore: updated cached aggregate partition col stats objects for catalog:" - + " {}, database: {}, table: {}", catName, dbName, tblName); - } - } catch (MetaException | NoSuchObjectException e) { - LOG.info("Updating CachedStore: unable to read aggregate column stats of table: " + tblName, e); - } - } } @Override public Configuration getConf() { @@ -970,27 +831,7 @@ private static void updateTableAggregatePartitionColStats(RawStore rawStore, Str } @Override public boolean commitTransaction() { - if (!rawStore.commitTransaction()) { - return false; - } - - // In case of event based update, shared cache is not updated directly to avoid inconsistency. - // For example, if metastore B add a partition, then metastore A drop a partition later. However, on metastore A, - // it first get drop partition request, then from notification, create the partition. If there's no tombstone - // entry in partition cache to tell drop is after creation, we end up consumes the creation request. Though - // eventually there's drop partition notification, but during the interim, later event takes precedence. - // So we will not update the cache during raw store operation but wait during commit transaction to make sure that - // the event related to the current transactions are updated in the cache and thus we can support strong - // consistency in case there is only one metastore. - if (canUseEvents) { - try { - triggerUpdateUsingEvent(rawStore); - } catch (Exception e) { - //TODO : Not sure how to handle it as the commit is already done in the object store. - LOG.error("Failed to update cache", e); - } - } - return true; + return rawStore.commitTransaction(); } @Override public boolean isActiveTransaction() { @@ -1003,107 +844,47 @@ private static void updateTableAggregatePartitionColStats(RawStore rawStore, Str @Override public void createCatalog(Catalog cat) throws MetaException { rawStore.createCatalog(cat); - // in case of event based cache update, cache will not be updated for catalog. - if (!canUseEvents) { - sharedCache.addCatalogToCache(cat); - } } @Override public void alterCatalog(String catName, Catalog cat) throws MetaException, InvalidOperationException { rawStore.alterCatalog(catName, cat); - // in case of event based cache update, cache will not be updated for catalog. - if (!canUseEvents) { - sharedCache.alterCatalogInCache(StringUtils.normalizeIdentifier(catName), cat); - } } @Override public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException { - // in case of event based cache update, cache will not be updated for catalog. - if (!sharedCache.isCatalogCachePrewarmed() || canUseEvents) { - return rawStore.getCatalog(catalogName); - } - Catalog cat = sharedCache.getCatalogFromCache(normalizeIdentifier(catalogName)); - if (cat == null) { - throw new NoSuchObjectException(); - } - return cat; + return rawStore.getCatalog(catalogName); } @Override public List getCatalogs() throws MetaException { - // in case of event based cache update, cache will not be updated for catalog. - if (!sharedCache.isCatalogCachePrewarmed() || canUseEvents) { - return rawStore.getCatalogs(); - } - return sharedCache.listCachedCatalogs(); + return rawStore.getCatalogs(); } @Override public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException { rawStore.dropCatalog(catalogName); - - // in case of event based cache update, cache will not be updated for catalog. - if (!canUseEvents) { - catalogName = catalogName.toLowerCase(); - sharedCache.removeCatalogFromCache(catalogName); - } } @Override public void createDatabase(Database db) throws InvalidObjectException, MetaException { rawStore.createDatabase(db); - // in case of event based cache update, cache will be updated during commit. - if (!canUseEvents) { - sharedCache.addDatabaseToCache(db); - } } @Override public Database getDatabase(String catName, String dbName) throws NoSuchObjectException { - // in case of event based cache update, cache will be updated during commit. So within active transaction, read - // directly from rawStore to avoid reading stale data as the data updated during same transaction will not be - // updated in the cache. - if (!sharedCache.isDatabaseCachePrewarmed() || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getDatabase(catName, dbName); - } - dbName = dbName.toLowerCase(); - Database db = sharedCache - .getDatabaseFromCache(StringUtils.normalizeIdentifier(catName), StringUtils.normalizeIdentifier(dbName)); - if (db == null) { - throw new NoSuchObjectException(); - } - return db; + return rawStore.getDatabase(catName, dbName); } @Override public boolean dropDatabase(String catName, String dbName) throws NoSuchObjectException, MetaException { - boolean succ = rawStore.dropDatabase(catName, dbName); - if (succ && !canUseEvents) { - // in case of event based cache update, cache will be updated during commit. - sharedCache - .removeDatabaseFromCache(StringUtils.normalizeIdentifier(catName), StringUtils.normalizeIdentifier(dbName)); - } - return succ; + return rawStore.dropDatabase(catName, dbName); } @Override public boolean alterDatabase(String catName, String dbName, Database db) throws NoSuchObjectException, MetaException { - boolean succ = rawStore.alterDatabase(catName, dbName, db); - if (succ && !canUseEvents) { - // in case of event based cache update, cache will be updated during commit. - sharedCache - .alterDatabaseInCache(StringUtils.normalizeIdentifier(catName), StringUtils.normalizeIdentifier(dbName), db); - } - return succ; + return rawStore.alterDatabase(catName, dbName, db); } @Override public List getDatabases(String catName, String pattern) throws MetaException { - if (!sharedCache.isDatabaseCachePrewarmed() || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getDatabases(catName, pattern); - } - return sharedCache.listCachedDatabases(catName, pattern); + return rawStore.getDatabases(catName, pattern); } @Override public List getAllDatabases(String catName) throws MetaException { - if (!sharedCache.isDatabaseCachePrewarmed() || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getAllDatabases(catName); - } - return sharedCache.listCachedDatabases(catName); + return rawStore.getAllDatabases(catName); } @Override public boolean createType(Type type) { @@ -1138,38 +919,11 @@ private void validateTableType(Table tbl) { @Override public void createTable(Table tbl) throws InvalidObjectException, MetaException { rawStore.createTable(tbl); - // in case of event based cache update, cache will be updated during commit. - if (canUseEvents) { - return; - } - String catName = normalizeIdentifier(tbl.getCatName()); - String dbName = normalizeIdentifier(tbl.getDbName()); - String tblName = normalizeIdentifier(tbl.getTableName()); - if (!shouldCacheTable(catName, dbName, tblName)) { - return; - } - validateTableType(tbl); - sharedCache.addTableToCache(catName, dbName, tblName, tbl); } @Override public boolean dropTable(String catName, String dbName, String tblName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.dropTable(catName, dbName, tblName); - // in case of event based cache update, cache will be updated during commit. - if (succ && !canUseEvents) { - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; - } - sharedCache.removeTableFromCache(catName, dbName, tblName); - } - return succ; - } - - @Override public Table getTable(String catName, String dbName, String tblName) throws MetaException { - return getTable(catName, dbName, tblName, null); + return rawStore.dropTable(catName, dbName, tblName); } @Override public Table getTable(String catName, String dbName, String tblName, String validWriteIds) @@ -1177,23 +931,31 @@ private void validateTableType(Table tbl) { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { + ValidWriteIdList writeIdsToRead = validWriteIds!=null?new ValidReaderWriteIdList(validWriteIds):null; + if (writeIdsToRead == null || !shouldCacheTable(catName, dbName, tblName)) { return rawStore.getTable(catName, dbName, tblName, validWriteIds); } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); if (tbl == null) { - // This table is not yet loaded in cache + // no valid entry in cache // If the prewarm thread is working on this table's database, // let's move this table to the top of tblNamesBeingPrewarmed stack, // so that it gets loaded to the cache faster and is available for subsequent requests tblsPendingPrewarm.prioritizeTableForPrewarm(tblName); + if (cacheMiss!=null) cacheMiss.inc(); Table t = rawStore.getTable(catName, dbName, tblName, validWriteIds); if (t != null) { - sharedCache.addTableToCache(catName, dbName, tblName, t); + sharedCache.addTableToCacheIfNotExists(catName, dbName, tblName, t, writeIdsToRead); } return t; } + + if (!isTransactionalTable(tbl)) { + return rawStore.getTable(catName, dbName, tblName, validWriteIds); + } + + if (cacheHit!=null) cacheHit.inc(); if (validWriteIds != null) { tbl.setParameters( adjustStatsParamsForGet(tbl.getParameters(), tbl.getParameters(), tbl.getWriteId(), validWriteIds)); @@ -1221,59 +983,17 @@ private void validateTableType(Table tbl) { } @Override public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { - boolean succ = rawStore.addPartition(part); - // in case of event based cache update, cache will be updated during commit. - if (succ && !canUseEvents) { - String dbName = normalizeIdentifier(part.getDbName()); - String tblName = normalizeIdentifier(part.getTableName()); - String catName = part.isSetCatName() ? normalizeIdentifier(part.getCatName()) : DEFAULT_CATALOG_NAME; - if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; - } - sharedCache.addPartitionToCache(catName, dbName, tblName, part); - } - return succ; + return rawStore.addPartition(part); } @Override public boolean addPartitions(String catName, String dbName, String tblName, List parts) throws InvalidObjectException, MetaException { - boolean succ = rawStore.addPartitions(catName, dbName, tblName, parts); - // in case of event based cache update, cache will be updated during commit. - if (succ && !canUseEvents) { - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; - } - sharedCache.addPartitionsToCache(catName, dbName, tblName, parts); - } - return succ; + return rawStore.addPartitions(catName, dbName, tblName, parts); } @Override public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { - boolean succ = rawStore.addPartitions(catName, dbName, tblName, partitionSpec, ifNotExists); - // in case of event based cache update, cache will be updated during commit. - if (succ && !canUseEvents) { - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; - } - PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator(); - while (iterator.hasNext()) { - Partition part = iterator.next(); - sharedCache.addPartitionToCache(catName, dbName, tblName, part); - } - } - return succ; - } - - @Override public Partition getPartition(String catName, String dbName, String tblName, List partVals) - throws MetaException, NoSuchObjectException { - return getPartition(catName, dbName, tblName, partVals, null); + return rawStore.addPartitions(catName, dbName, tblName, partitionSpec, ifNotExists); } @Override public Partition getPartition(String catName, String dbName, String tblName, List partVals, @@ -1281,20 +1001,30 @@ private void validateTableType(Table tbl) { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { + ValidWriteIdList writeIdsToRead = validWriteIds!=null?new ValidReaderWriteIdList(validWriteIds):null; + if (writeIdsToRead == null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartition(catName, dbName, tblName, partVals, validWriteIds); + } + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + + if (table == null) { + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.getPartition(catName, dbName, tblName, partVals, validWriteIds); + } + + + if (!isTransactionalTable(table)) { return rawStore.getPartition(catName, dbName, tblName, partVals, validWriteIds); } + + if (cacheHit!=null) cacheHit.inc(); Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, partVals); if (part == null) { // The table containing the partition is not yet loaded in cache return rawStore.getPartition(catName, dbName, tblName, partVals, validWriteIds); } if (validWriteIds != null) { - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); - if (table == null) { - // The table containing the partition is not yet loaded in cache - return rawStore.getPartition(catName, dbName, tblName, partVals, validWriteIds); - } part.setParameters( adjustStatsParamsForGet(table.getParameters(), part.getParameters(), part.getWriteId(), validWriteIds)); } @@ -1303,109 +1033,73 @@ private void validateTableType(Table tbl) { } @Override public boolean doesPartitionExist(String catName, String dbName, String tblName, List partKeys, - List partVals) throws MetaException, NoSuchObjectException { + List partVals, String validWriteIdList) throws MetaException, NoSuchObjectException { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, partVals); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (validWriteIdList == null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, partVals, validWriteIdList); } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (tbl == null) { - // The table containing the partition is not yet loaded in cache - return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, partVals); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, partVals, validWriteIdList); + } + + if (!isTransactionalTable(tbl)) { + return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, partVals, validWriteIdList); } + return sharedCache.existPartitionFromCache(catName, dbName, tblName, partVals); } @Override public boolean dropPartition(String catName, String dbName, String tblName, List partVals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.dropPartition(catName, dbName, tblName, partVals); - // in case of event based cache update, cache will be updated during commit. - if (succ && !canUseEvents) { - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; - } - sharedCache.removePartitionFromCache(catName, dbName, tblName, partVals); - } - return succ; + return rawStore.dropPartition(catName, dbName, tblName, partVals); } @Override public void dropPartitions(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { rawStore.dropPartitions(catName, dbName, tblName, partNames); - // in case of event based cache update, cache will be updated during commit. - if (canUseEvents) { - return; - } - catName = normalizeIdentifier(catName); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return; - } - List> partVals = new ArrayList<>(); - for (String partName : partNames) { - partVals.add(partNameToVals(partName)); - } - sharedCache.removePartitionsFromCache(catName, dbName, tblName, partVals); } - @Override public List getPartitions(String catName, String dbName, String tblName, int max) + @Override public List getPartitions(String catName, String dbName, String tblName, int max, String validWriteIdList) throws MetaException, NoSuchObjectException { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getPartitions(catName, dbName, tblName, max); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitions(catName, dbName, tblName, max, validWriteIdList); } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (tbl == null) { - // The table containing the partitions is not yet loaded in cache - return rawStore.getPartitions(catName, dbName, tblName, max); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.getPartitions(catName, dbName, tblName, max, validWriteIdList); + } + + if (!isTransactionalTable(tbl)) { + return rawStore.getPartitions(catName, dbName, tblName, max, validWriteIdList); } + + if (cacheHit!=null) cacheHit.inc(); List parts = sharedCache.listCachedPartitions(catName, dbName, tblName, max); return parts; } @Override public Map getPartitionLocations(String catName, String dbName, String tblName, - String baseLocationToNotShow, int max) { - return rawStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max); + String baseLocationToNotShow, int max, String validWriteIdList) { + return rawStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max, validWriteIdList); } @Override public Table alterTable(String catName, String dbName, String tblName, Table newTable, String validWriteIds) throws InvalidObjectException, MetaException { - newTable = rawStore.alterTable(catName, dbName, tblName, newTable, validWriteIds); - // in case of event based cache update, cache will be updated during commit. - if (canUseEvents) { - return newTable; - } - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - String newTblName = normalizeIdentifier(newTable.getTableName()); - if (!shouldCacheTable(catName, dbName, tblName) && !shouldCacheTable(catName, dbName, newTblName)) { - return newTable; - } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); - if (tbl == null) { - // The table is not yet loaded in cache - return newTable; - } - if (shouldCacheTable(catName, dbName, tblName) && shouldCacheTable(catName, dbName, newTblName)) { - // If old table is in the cache and the new table can also be cached - sharedCache.alterTableInCache(catName, dbName, tblName, newTable); - } else if (!shouldCacheTable(catName, dbName, tblName) && shouldCacheTable(catName, dbName, newTblName)) { - // If old table is *not* in the cache but the new table can be cached - sharedCache.addTableToCache(catName, dbName, newTblName, newTable); - } else if (shouldCacheTable(catName, dbName, tblName) && !shouldCacheTable(catName, dbName, newTblName)) { - // If old table is in the cache but the new table *cannot* be cached - sharedCache.removeTableFromCache(catName, dbName, tblName); - } - return newTable; + return rawStore.alterTable(catName, dbName, tblName, newTable, validWriteIds); } @Override public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) @@ -1439,40 +1133,7 @@ private void validateTableType(Table tbl) { @Override public List
getTableObjectsByName(String catName, String dbName, List tblNames) throws MetaException, UnknownDBException { - if (canUseEvents && rawStore.isActiveTransaction()) { - return rawStore.getTableObjectsByName(catName, dbName, tblNames); - } - dbName = normalizeIdentifier(dbName); - catName = normalizeIdentifier(catName); - boolean missSomeInCache = false; - for (String tblName : tblNames) { - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - missSomeInCache = true; - break; - } - } - if (!isCachePrewarmed.get() || missSomeInCache) { - return rawStore.getTableObjectsByName(catName, dbName, tblNames); - } - Database db = sharedCache.getDatabaseFromCache(catName, dbName); - if (db == null) { - throw new UnknownDBException("Could not find database " + dbName); - } - List
tables = new ArrayList<>(); - for (String tblName : tblNames) { - tblName = normalizeIdentifier(tblName); - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); - if (tbl == null) { - tbl = rawStore.getTable(catName, dbName, tblName); - sharedCache.addTableToCache(catName, dbName, tblName, tbl); - } - if (tbl != null) { - tables.add(tbl); - } - tables.add(tbl); - } - return tables; + return rawStore.getTableObjectsByName(catName, dbName, tblNames); } @Override public List getAllTables(String catName, String dbName) throws MetaException { @@ -1486,19 +1147,28 @@ private void validateTableType(Table tbl) { return rawStore.listTableNamesByFilter(catName, dbName, filter, maxTables); } - @Override public List listPartitionNames(String catName, String dbName, String tblName, short maxParts) + @Override public List listPartitionNames(String catName, String dbName, String tblName, short maxParts, String validWriteIdList) throws MetaException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.listPartitionNames(catName, dbName, tblName, maxParts); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.listPartitionNames(catName, dbName, tblName, maxParts, validWriteIdList); } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (tbl == null) { - // The table is not yet loaded in cache - return rawStore.listPartitionNames(catName, dbName, tblName, maxParts); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.listPartitionNames(catName, dbName, tblName, maxParts, validWriteIdList); } + + if (!isTransactionalTable(tbl)) { + return rawStore.listPartitionNames(catName, dbName, tblName, maxParts, validWriteIdList); + } + + if (cacheHit!=null) cacheHit.inc(); List partitionNames = new ArrayList<>(); int count = 0; for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) { @@ -1511,43 +1181,19 @@ private void validateTableType(Table tbl) { @Override public PartitionValuesResponse listPartitionValues(String catName, String dbName, String tblName, List cols, boolean applyDistinct, String filter, boolean ascending, List order, - long maxParts) throws MetaException { + long maxParts, String validWriteIdList) throws MetaException { throw new UnsupportedOperationException(); } @Override public Partition alterPartition(String catName, String dbName, String tblName, List partVals, Partition newPart, String validWriteIds) throws InvalidObjectException, MetaException { - newPart = rawStore.alterPartition(catName, dbName, tblName, partVals, newPart, validWriteIds); - // in case of event based cache update, cache will be updated during commit. - if (canUseEvents) { - return newPart; - } - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return newPart; - } - sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, newPart); - return newPart; + return rawStore.alterPartition(catName, dbName, tblName, partVals, newPart, validWriteIds); } @Override public List alterPartitions(String catName, String dbName, String tblName, List> partValsList, List newParts, long writeId, String validWriteIds) throws InvalidObjectException, MetaException { - newParts = rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, writeId, validWriteIds); - // in case of event based cache update, cache will be updated during commit. - if (canUseEvents) { - return newParts; - } - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return newParts; - } - sharedCache.alterPartitionsInCache(catName, dbName, tblName, partValsList, newParts); - return newParts; + return rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, writeId, validWriteIds); } private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, String defaultPartName, short maxParts, @@ -1567,33 +1213,42 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, Str @Override // TODO: implement using SharedCache public List getPartitionsByFilter(String catName, String dbName, String tblName, String filter, - short maxParts) throws MetaException, NoSuchObjectException { - return rawStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts); + short maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException { + return rawStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts, validWriteIdList); } @Override /** * getPartitionSpecsByFilterAndProjection interface is currently non-cacheable. */ public List getPartitionSpecsByFilterAndProjection(Table table, - GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec) + GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec, String validWriteIdList) throws MetaException, NoSuchObjectException { - return rawStore.getPartitionSpecsByFilterAndProjection(table, projectionSpec, filterSpec); + return rawStore.getPartitionSpecsByFilterAndProjection(table, projectionSpec, filterSpec, validWriteIdList); } @Override public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, - String defaultPartitionName, short maxParts, List result) throws TException { + String defaultPartitionName, short maxParts, List result, String validWriteIdList) throws TException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result, validWriteIdList); } List partNames = new LinkedList<>(); - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (table == null) { - // The table is not yet loaded in cache - return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result, validWriteIdList); } + + if (!isTransactionalTable(table)) { + return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result, validWriteIdList); + } + + if (cacheHit!=null) cacheHit.inc(); boolean hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartitionName, maxParts, partNames, sharedCache); for (String partName : partNames) { @@ -1604,26 +1259,34 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, Str return hasUnknownPartitions; } - @Override public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) + @Override public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter, String validWriteIdList) throws MetaException, NoSuchObjectException { - return rawStore.getNumPartitionsByFilter(catName, dbName, tblName, filter); + return rawStore.getNumPartitionsByFilter(catName, dbName, tblName, filter, validWriteIdList); } - @Override public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) + @Override public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String validWriteIdList) throws MetaException, NoSuchObjectException { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr, validWriteIdList); } String defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); List partNames = new LinkedList<>(); - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (table == null) { - // The table is not yet loaded in cache - return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr, validWriteIdList); + } + + if (!isTransactionalTable(table)) { + return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr, validWriteIdList); } + getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartName, Short.MAX_VALUE, partNames, sharedCache); return partNames.size(); } @@ -1641,18 +1304,27 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, Str } @Override public List getPartitionsByNames(String catName, String dbName, String tblName, - List partNames) throws MetaException, NoSuchObjectException { + List partNames, String validWriteIdList) throws MetaException, NoSuchObjectException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames, validWriteIdList); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (table == null) { - // The table is not yet loaded in cache - return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames, validWriteIdList); } + + if (!isTransactionalTable(table)) { + return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames, validWriteIdList); + } + + if (cacheHit!=null) cacheHit.inc(); List partitions = new ArrayList<>(); for (String partName : partNames) { Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, partNameToVals(partName)); @@ -1793,18 +1465,27 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, Str } @Override public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, - String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { + String userName, List groupNames, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames, validWriteIdList); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (table == null) { - // The table is not yet loaded in cache - return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames, validWriteIdList); } + + if (!isTransactionalTable(table)) { + return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames, validWriteIdList); + } + + if (cacheHit!=null) cacheHit.inc(); Partition p = sharedCache.getPartitionFromCache(catName, dbName, tblName, partVals); if (p != null) { String partName = Warehouse.makePartName(table.getPartitionKeys(), partVals); @@ -1817,18 +1498,27 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, Str } @Override public List getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts, - String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { + String userName, List groupNames, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames, validWriteIdList); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (table == null) { - // The table is not yet loaded in cache - return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames, validWriteIdList); + } + + if (!isTransactionalTable(table)) { + return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames, validWriteIdList); } + + if (cacheHit!=null) cacheHit.inc(); List partitions = new ArrayList<>(); int count = 0; for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) { @@ -1845,18 +1535,26 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, Str } @Override public List listPartitionNamesPs(String catName, String dbName, String tblName, - List partSpecs, short maxParts) throws MetaException, NoSuchObjectException { + List partSpecs, short maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.listPartitionNamesPs(catName, dbName, tblName, partSpecs, maxParts); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.listPartitionNamesPs(catName, dbName, tblName, partSpecs, maxParts, validWriteIdList); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (table == null) { - // The table is not yet loaded in cache - return rawStore.listPartitionNamesPs(catName, dbName, tblName, partSpecs, maxParts); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.listPartitionNamesPs(catName, dbName, tblName, partSpecs, maxParts, validWriteIdList); + } + + if (!isTransactionalTable(table)) { + return rawStore.listPartitionNamesPs(catName, dbName, tblName, partSpecs, maxParts, validWriteIdList); } + String partNameMatcher = getPartNameMatcher(table, partSpecs); List partitionNames = new ArrayList<>(); List allPartitions = sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts); @@ -1872,19 +1570,28 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, Str } @Override public List listPartitionsPsWithAuth(String catName, String dbName, String tblName, - List partSpecs, short maxParts, String userName, List groupNames) + List partSpecs, short maxParts, String userName, List groupNames, String validWriteIdList) throws MetaException, InvalidObjectException, NoSuchObjectException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partSpecs, maxParts, userName, groupNames); + ValidWriteIdList writeIdsToRead = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partSpecs, maxParts, userName, groupNames, validWriteIdList); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (table == null) { - // The table is not yet loaded in cache - return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partSpecs, maxParts, userName, groupNames); + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partSpecs, maxParts, userName, groupNames, validWriteIdList); } + + if (!isTransactionalTable(table)) { + return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partSpecs, maxParts, userName, groupNames, validWriteIdList); + } + + if (cacheHit!=null) cacheHit.inc(); String partNameMatcher = getPartNameMatcher(table, partSpecs); List partitions = new ArrayList<>(); List allPartitions = sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts); @@ -1959,60 +1666,9 @@ public static ColumnStatistics adjustColStatForGet(Map tablePara return colStat; } - private static void updateTableColumnsStatsInternal(Configuration conf, ColumnStatistics colStats, - Map newParams, String validWriteIds, long writeId) throws MetaException { - String catName = colStats.getStatsDesc().isSetCatName() ? normalizeIdentifier( - colStats.getStatsDesc().getCatName()) : getDefaultCatalog(conf); - String dbName = normalizeIdentifier(colStats.getStatsDesc().getDbName()); - String tblName = normalizeIdentifier(colStats.getStatsDesc().getTableName()); - if (!shouldCacheTable(catName, dbName, tblName)) { - return; - } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); - if (table == null) { - // The table is not yet loaded in cache - return; - } - - boolean isTxn = TxnUtils.isTransactionalTable(table.getParameters()); - if (isTxn && validWriteIds != null) { - if (!areTxnStatsSupported) { - StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); - } else { - String errorMsg = ObjectStore - .verifyStatsChangeCtx(TableName.getDbTable(dbName, tblName), table.getParameters(), newParams, writeId, - validWriteIds, true); - if (errorMsg != null) { - throw new MetaException(errorMsg); - } - if (!ObjectStore.isCurrentStatsValidForTheQuery(newParams, table.getWriteId(), validWriteIds, true)) { - // Make sure we set the flag to invalid regardless of the current value. - StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); - LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table " + table.getDbName() + "." + table - .getTableName()); - } - } - } - - table.setWriteId(writeId); - table.setParameters(newParams); - sharedCache.alterTableInCache(catName, dbName, tblName, table); - sharedCache.updateTableColStatsInCache(catName, dbName, tblName, colStats.getStatsObj()); - } - @Override public Map updateTableColumnStatistics(ColumnStatistics colStats, String validWriteIds, long writeId) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - Map newParams = rawStore.updateTableColumnStatistics(colStats, validWriteIds, writeId); - // in case of event based cache update, cache will be updated during commit. - if (newParams != null && !canUseEvents) { - updateTableColumnsStatsInternal(conf, colStats, newParams, null, writeId); - } - return newParams; - } - - @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName, - List colNames) throws MetaException, NoSuchObjectException { - return getTableColumnStatistics(catName, dbName, tblName, colNames, null); + return rawStore.updateTableColumnStatistics(colStats, validWriteIds, writeId); } @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName, @@ -2020,14 +1676,23 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { + ValidWriteIdList writeIdsToRead = validWriteIds!=null?new ValidReaderWriteIdList(validWriteIds):null; + if (validWriteIds==null || !shouldCacheTable(catName, dbName, tblName)) { return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, validWriteIds); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (table == null) { - // The table is not yet loaded in cache + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, validWriteIds); } + + if (!isTransactionalTable(table)) { + return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, validWriteIds); + } + + if (cacheHit!=null) cacheHit.inc(); ColumnStatistics columnStatistics = sharedCache.getTableColStatsFromCache(catName, dbName, tblName, colNames, validWriteIds, areTxnStatsSupported); if (columnStatistics == null) { @@ -2040,50 +1705,40 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt @Override public boolean deleteTableColumnStatistics(String catName, String dbName, String tblName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.deleteTableColumnStatistics(catName, dbName, tblName, colName); - // in case of event based cache update, cache is updated during commit txn - if (succ && !canUseEvents) { - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; - } - sharedCache.removeTableColStatsFromCache(catName, dbName, tblName, colName); - } - return succ; + return rawStore.deleteTableColumnStatistics(catName, dbName, tblName, colName); } @Override public Map updatePartitionColumnStatistics(ColumnStatistics colStats, List partVals, String validWriteIds, long writeId) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - Map newParams = - rawStore.updatePartitionColumnStatistics(colStats, partVals, validWriteIds, writeId); - // in case of event based cache update, cache is updated during commit txn - if (newParams != null && !canUseEvents) { - String catName = colStats.getStatsDesc().isSetCatName() ? normalizeIdentifier( - colStats.getStatsDesc().getCatName()) : DEFAULT_CATALOG_NAME; - String dbName = normalizeIdentifier(colStats.getStatsDesc().getDbName()); - String tblName = normalizeIdentifier(colStats.getStatsDesc().getTableName()); - if (!shouldCacheTable(catName, dbName, tblName)) { - return newParams; - } - Partition part = getPartition(catName, dbName, tblName, partVals); - part.setParameters(newParams); - sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, part); - sharedCache.updatePartitionColStatsInCache(catName, dbName, tblName, partVals, colStats.getStatsObj()); - } - return newParams; - } - - @Override public List getPartitionColumnStatistics(String catName, String dbName, String tblName, - List partNames, List colNames) throws MetaException, NoSuchObjectException { - return getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, null); + return rawStore.updatePartitionColumnStatistics(colStats, partVals, validWriteIds, writeId); } @Override public List getPartitionColumnStatistics(String catName, String dbName, String tblName, List partNames, List colNames, String writeIdList) throws MetaException, NoSuchObjectException { + catName = StringUtils.normalizeIdentifier(catName); + dbName = StringUtils.normalizeIdentifier(dbName); + tblName = StringUtils.normalizeIdentifier(tblName); + ValidWriteIdList writeIdsToRead = writeIdList!=null?new ValidReaderWriteIdList(writeIdList):null; + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, writeIdList); + } + + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + + if (table == null) { + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, writeIdList); + } + + if (!isTransactionalTable(table)) { + return rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, writeIdList); + } + + if (cacheHit!=null) cacheHit.inc(); + // If writeIdList is not null, that means stats are requested within a txn context. So set stats compliant to false, // if areTxnStatsSupported is false or the write id which has updated the stats in not compatible with writeIdList. // This is done within table lock as the number of partitions may be more than one and we need a consistent view @@ -2100,23 +1755,7 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt @Override public boolean deletePartitionColumnStatistics(String catName, String dbName, String tblName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.deletePartitionColumnStatistics(catName, dbName, tblName, partName, partVals, colName); - // in case of event based cache update, cache is updated during commit txn. - if (succ && !canUseEvents) { - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; - } - sharedCache.removePartitionColStatsFromCache(catName, dbName, tblName, partVals, colName); - } - return succ; - } - - @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, - List colNames) throws MetaException, NoSuchObjectException { - return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, null); + return rawStore.deletePartitionColumnStatistics(catName, dbName, tblName, partName, partVals, colName); } @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, @@ -2125,19 +1764,27 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); + ValidWriteIdList writeIdsToRead = writeIdList!=null?new ValidReaderWriteIdList(writeIdList):null; // TODO: we currently cannot do transactional checks for stats here // (incl. due to lack of sync w.r.t. the below rawStore call). // In case the cache is updated using events, aggregate is calculated locally and thus can be read from cache. - if (!shouldCacheTable(catName, dbName, tblName) || (writeIdList != null && !canUseEvents)) { - return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, writeIdList); + if (writeIdsToRead==null || !shouldCacheTable(catName, dbName, tblName) || writeIdList != null) { + return rawStore.get_aggr_stats_for( + catName, dbName, tblName, partNames, colNames, writeIdList); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, writeIdsToRead); + if (table == null) { - // The table is not yet loaded in cache + // no valid entry in cache + if (cacheMiss!=null) cacheMiss.inc(); + return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, writeIdList); + } + + if (!isTransactionalTable(table)) { return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, writeIdList); } - List allPartNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); + List allPartNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1, writeIdList); StatsType type = StatsType.PARTIAL; if (partNames.size() == allPartNames.size()) { colStats = sharedCache.getAggrStatsFromCache(catName, dbName, tblName, colNames, StatsType.ALL); @@ -2159,7 +1806,9 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt LOG.debug("Didn't find aggr stats in cache. Merging them. tblName= {}, parts= {}, cols= {}", tblName, partNames, colNames); MergedColumnStatsForPartitions mergedColStats = - mergeColStatsForPartitions(catName, dbName, tblName, partNames, colNames, sharedCache, type, writeIdList); + mergeColStatsForPartitions(catName, dbName, tblName, partNames, colNames, sharedCache, type, writeIdList, + MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION), + MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER)); if (mergedColStats == null) { LOG.info("Aggregate stats of partition " + TableName.getQualified(catName, dbName, tblName) + "." + partNames + " for columns " + colNames + " is not present in cache. Getting it from raw store"); @@ -2168,12 +1817,10 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt return new AggrStats(mergedColStats.getColStats(), mergedColStats.getPartsFound()); } - private MergedColumnStatsForPartitions mergeColStatsForPartitions(String catName, String dbName, String tblName, - List partNames, List colNames, SharedCache sharedCache, StatsType type, String writeIdList) + static MergedColumnStatsForPartitions mergeColStatsForPartitions(String catName, String dbName, String tblName, + List partNames, List colNames, SharedCache sharedCache, StatsType type, String writeIdList, + boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException { - final boolean useDensityFunctionForNDVEstimation = - MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION); - final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER); Map> colStatsMap = new HashMap<>(); long partsFound = partNames.size(); Map, Long> partNameToWriteId = writeIdList != null ? new HashMap<>() : null; @@ -2234,21 +1881,19 @@ private MergedColumnStatsForPartitions mergeColStatsForPartitions(String catName .aggrPartitionStats(colStatsMap, partNames, partsFound == partNames.size(), useDensityFunctionForNDVEstimation, ndvTuner); - if (canUseEvents) { - if (type == StatsType.ALL) { - sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), - new AggrStats(colAggrStats, partsFound), null, partNameToWriteId); - } else if (type == StatsType.ALLBUTDEFAULT) { - sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), null, - new AggrStats(colAggrStats, partsFound), partNameToWriteId); - } + if (type == StatsType.ALL) { + sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), + new AggrStats(colAggrStats, partsFound), null, partNameToWriteId); + } else if (type == StatsType.ALLBUTDEFAULT) { + sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), null, + new AggrStats(colAggrStats, partsFound), partNameToWriteId); } return new MergedColumnStatsForPartitions(colAggrStats, partsFound); } - class MergedColumnStatsForPartitions { + static class MergedColumnStatsForPartitions { List colStats = new ArrayList(); long partsFound; @@ -2496,23 +2141,8 @@ long getPartsFound() { List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints) throws InvalidObjectException, MetaException { - // TODO constraintCache - List constraintNames = rawStore - .createTableWithConstraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, - defaultConstraints, checkConstraints); - // in case of event based cache update, cache is updated during commit. - if (canUseEvents) { - return constraintNames; - } - String dbName = normalizeIdentifier(tbl.getDbName()); - String tblName = normalizeIdentifier(tbl.getTableName()); - String catName = tbl.isSetCatName() ? normalizeIdentifier(tbl.getCatName()) : DEFAULT_CATALOG_NAME; - if (!shouldCacheTable(catName, dbName, tblName)) { - return constraintNames; - } - sharedCache.addTableToCache(StringUtils.normalizeIdentifier(tbl.getCatName()), - StringUtils.normalizeIdentifier(tbl.getDbName()), StringUtils.normalizeIdentifier(tbl.getTableName()), tbl); - return constraintNames; + return rawStore.createTableWithConstraints(tbl, primaryKeys, + foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); } @Override public void dropConstraint(String catName, String dbName, String tableName, String constraintName, @@ -2799,11 +2429,6 @@ static boolean isBlacklistWhitelistEmpty(Configuration conf) { .isEmpty(); } - @VisibleForTesting void resetCatalogCache() { - sharedCache.resetCatalogCache(); - setCachePrewarmedState(false); - } - @Override public void addRuntimeStat(RuntimeStat stat) throws MetaException { rawStore.addRuntimeStat(stat); } @@ -2828,4 +2453,22 @@ static boolean isBlacklistWhitelistEmpty(Configuration conf) { throws MetaException, NoSuchObjectException { return rawStore.getPartitionColsWithStats(catName, dbName, tableName); } + + public static boolean isTransactionalTable(org.apache.hadoop.hive.metastore.api.Table table) { + return table != null && table.getParameters() != null && + isTablePropertyTransactional(table.getParameters()); + } + + public static boolean isTablePropertyTransactional(Map parameters) { + String resultStr = parameters.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL); + if (resultStr == null) { + resultStr = parameters.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL.toUpperCase()); + } + return resultStr != null && resultStr.equalsIgnoreCase("true"); + } + + public static ValidWriteIdList newTableWriteIds(String dbName, String tableName) { + String fullTableName = TableName.getDbTable(dbName, tableName); + return new ValidReaderWriteIdList(fullTableName, new long[]{1}, new BitSet(), 1); + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java index 45b1b0d0bf..a5cecef181 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java @@ -21,7 +21,6 @@ import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; -import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -51,26 +50,23 @@ import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.StatObjectConverter; -import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; import org.apache.hadoop.hive.metastore.utils.StringUtils; import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator; import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator.ObjectEstimator; +import org.apache.hive.common.util.TxnIdUtils; import org.eclipse.jetty.util.ConcurrentHashSet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -82,16 +78,6 @@ public class SharedCache { private static ReentrantReadWriteLock cacheLock = new ReentrantReadWriteLock(true); private static final long MAX_DEFAULT_CACHE_SIZE = 1024 * 1024; - private boolean isCatalogCachePrewarmed = false; - private Map catalogCache = new TreeMap<>(); - private HashSet catalogsDeletedDuringPrewarm = new HashSet<>(); - private AtomicBoolean isCatalogCacheDirty = new AtomicBoolean(false); - - // For caching Database objects. Key is database name - private Map databaseCache = new TreeMap<>(); - private boolean isDatabaseCachePrewarmed = false; - private HashSet databasesDeletedDuringPrewarm = new HashSet<>(); - private AtomicBoolean isDatabaseCacheDirty = new AtomicBoolean(false); // For caching TableWrapper objects. Key is aggregate of database name and table name private Cache tableCache = null; @@ -286,12 +272,16 @@ public int getObjectSize(Class clazz, Object obj) { private Map> aggrColStatsCache = new ConcurrentHashMap>(); private AtomicBoolean isAggrPartitionColStatsCacheDirty = new AtomicBoolean(false); + private ValidWriteIdList writeIds; + private boolean valid = false; - TableWrapper(Table t, byte[] sdHash, String location, Map parameters) { + TableWrapper(Table t, byte[] sdHash, String location, Map parameters, ValidWriteIdList writeIds) { this.t = t; this.sdHash = sdHash; this.location = location; this.parameters = parameters; + this.writeIds = writeIds; + this.valid = false; this.tableColStatsCacheSize = 0; this.partitionCacheSize = 0; this.partitionColStatsCacheSize = 0; @@ -580,12 +570,11 @@ public void alterPartitionAndStats(List partVals, SharedCache sharedCach Map parameters, List colStatsObjs) { try { tableLock.writeLock().lock(); - PartitionWrapper partitionWrapper = partitionCache.get(CacheUtils.buildPartitionCacheKey(partVals)); - if (partitionWrapper == null) { + Partition newPart = getPartition(partVals, sharedCache); + if (newPart == null) { LOG.info("Partition " + partVals + " is missing from cache. Cannot update the partition stats in cache."); return; } - Partition newPart = partitionWrapper.getPartition(); newPart.setParameters(parameters); newPart.setWriteId(writeId); removePartition(partVals, sharedCache); @@ -609,35 +598,6 @@ public void alterPartitions(List> partValsList, List new } } - public void refreshPartitions(List partitions, SharedCache sharedCache) { - Map newPartitionCache = new HashMap(); - try { - tableLock.writeLock().lock(); - int size = 0; - for (Partition part : partitions) { - if (isPartitionCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping partition cache update for table: " + getTable().getTableName() - + "; the partition list we have is dirty."); - return; - } - String key = CacheUtils.buildPartitionCacheKey(part.getValues()); - PartitionWrapper wrapper = partitionCache.get(key); - if (wrapper != null) { - if (wrapper.getSdHash() != null) { - sharedCache.decrSd(wrapper.getSdHash()); - } - } - wrapper = makePartitionWrapper(part, sharedCache); - newPartitionCache.put(key, wrapper); - size += getObjectSize(PartitionWrapper.class, wrapper); - } - partitionCache = newPartitionCache; - updateMemberSize(MemberName.PARTITION_CACHE, size, SizeMode.Snapshot); - } finally { - tableLock.writeLock().unlock(); - } - } - public boolean updateTableColStats(List colStatsForTable) { try { tableLock.writeLock().lock(); @@ -665,29 +625,6 @@ public boolean updateTableColStats(List colStatsForTable) { } } - public void refreshTableColStats(List colStatsForTable) { - Map newTableColStatsCache = new HashMap(); - try { - tableLock.writeLock().lock(); - int statsSize = 0; - for (ColumnStatisticsObj colStatObj : colStatsForTable) { - if (isTableColStatsCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping table col stats cache update for table: " + getTable().getTableName() - + "; the table col stats list we have is dirty."); - return; - } - String key = colStatObj.getColName(); - // TODO: get rid of deepCopy after making sure callers don't use references - newTableColStatsCache.put(key, colStatObj.deepCopy()); - statsSize += getObjectSize(ColumnStatisticsObj.class, colStatObj); - } - tableColStatsCache = newTableColStatsCache; - updateMemberSize(MemberName.TABLE_COL_STATS_CACHE, statsSize, SizeMode.Snapshot); - } finally { - tableLock.writeLock().unlock(); - } - } - public ColumnStatistics getCachedTableColStats(ColumnStatisticsDesc csd, List colNames, String validWriteIds, boolean areTxnStatsSupported) throws MetaException { List colStatObjs = new ArrayList(); @@ -882,43 +819,6 @@ public void removeAllPartitionColStats() { } } - public void refreshPartitionColStats(List partitionColStats) { - Map newPartitionColStatsCache = new HashMap(); - try { - tableLock.writeLock().lock(); - String tableName = StringUtils.normalizeIdentifier(getTable().getTableName()); - int statsSize = 0; - for (ColumnStatistics cs : partitionColStats) { - if (isPartitionColStatsCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping partition column stats cache update for table: " + getTable().getTableName() - + "; the partition column stats list we have is dirty"); - return; - } - List partVal; - try { - partVal = Warehouse.makeValsFromName(cs.getStatsDesc().getPartName(), null); - List colStatsObjs = cs.getStatsObj(); - for (ColumnStatisticsObj colStatObj : colStatsObjs) { - if (isPartitionColStatsCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping partition column stats cache update for table: " + getTable().getTableName() - + "; the partition column list we have is dirty"); - return; - } - String key = CacheUtils.buildPartitonColStatsCacheKey(partVal, colStatObj.getColName()); - newPartitionColStatsCache.put(key, colStatObj.deepCopy()); - statsSize += getObjectSize(ColumnStatisticsObj.class, colStatObj); - } - } catch (MetaException e) { - LOG.debug("Unable to cache partition column stats for table: " + tableName, e); - } - } - partitionColStatsCache = newPartitionColStatsCache; - updateMemberSize(MemberName.PARTITION_COL_STATS_CACHE, statsSize, SizeMode.Snapshot); - } finally { - tableLock.writeLock().unlock(); - } - } - public List getAggrPartitionColStats(List colNames, StatsType statsType) { List colStats = new ArrayList(); try { @@ -1082,6 +982,18 @@ private PartitionWrapper makePartitionWrapper(Partition part, SharedCache shared } return wrapper; } + + ValidWriteIdList getWriteIds() { + return writeIds; + } + + boolean isValid() { + return valid; + } + + void setValid(boolean valid) { + this.valid = valid; + } } static class PartitionWrapper { @@ -1150,247 +1062,10 @@ public ColumnStatisticsObj getColumnStatisticsObj() { } } - public void populateCatalogsInCache(Collection catalogs) { - for (Catalog cat : catalogs) { - Catalog catCopy = cat.deepCopy(); - // ObjectStore also stores db name in lowercase - catCopy.setName(catCopy.getName().toLowerCase()); - try { - cacheLock.writeLock().lock(); - // Since we allow write operations on cache while prewarm is happening: - // 1. Don't add databases that were deleted while we were preparing list for prewarm - // 2. Skip overwriting exisiting db object - // (which is present because it was added after prewarm started) - if (catalogsDeletedDuringPrewarm.contains(catCopy.getName())) { - continue; - } - catalogCache.putIfAbsent(catCopy.getName(), catCopy); - catalogsDeletedDuringPrewarm.clear(); - isCatalogCachePrewarmed = true; - } finally { - cacheLock.writeLock().unlock(); - } - } - } - - public Catalog getCatalogFromCache(String name) { - Catalog cat = null; - try { - cacheLock.readLock().lock(); - if (catalogCache.get(name) != null) { - cat = catalogCache.get(name).deepCopy(); - } - } finally { - cacheLock.readLock().unlock(); - } - return cat; - } - - public void addCatalogToCache(Catalog cat) { - try { - cacheLock.writeLock().lock(); - Catalog catCopy = cat.deepCopy(); - // ObjectStore also stores db name in lowercase - catCopy.setName(catCopy.getName().toLowerCase()); - catalogCache.put(cat.getName(), catCopy); - isCatalogCacheDirty.set(true); - } finally { - cacheLock.writeLock().unlock(); - } - } - - public void alterCatalogInCache(String catName, Catalog newCat) { - try { - cacheLock.writeLock().lock(); - removeCatalogFromCache(catName); - addCatalogToCache(newCat.deepCopy()); - } finally { - cacheLock.writeLock().unlock(); - } - } - - public void removeCatalogFromCache(String name) { - name = normalizeIdentifier(name); - try { - cacheLock.writeLock().lock(); - // If db cache is not yet prewarmed, add this to a set which the prewarm thread can check - // so that the prewarm thread does not add it back - if (!isCatalogCachePrewarmed) { - catalogsDeletedDuringPrewarm.add(name); - } - if (catalogCache.remove(name) != null) { - isCatalogCacheDirty.set(true); - } - } finally { - cacheLock.writeLock().unlock(); - } - } - - public List listCachedCatalogs() { - try { - cacheLock.readLock().lock(); - return new ArrayList<>(catalogCache.keySet()); - } finally { - cacheLock.readLock().unlock(); - } - } - - public boolean isCatalogCachePrewarmed() { - return isCatalogCachePrewarmed; - } - - public Database getDatabaseFromCache(String catName, String name) { - Database db = null; - try { - cacheLock.readLock().lock(); - String key = CacheUtils.buildDbKey(catName, name); - if (databaseCache.get(key) != null) { - db = databaseCache.get(key).deepCopy(); - } - } finally { - cacheLock.readLock().unlock(); - } - return db; - } - - public void populateDatabasesInCache(List databases) { - for (Database db : databases) { - Database dbCopy = db.deepCopy(); - // ObjectStore also stores db name in lowercase - dbCopy.setName(dbCopy.getName().toLowerCase()); - try { - cacheLock.writeLock().lock(); - // Since we allow write operations on cache while prewarm is happening: - // 1. Don't add databases that were deleted while we were preparing list for prewarm - // 2. Skip overwriting exisiting db object - // (which is present because it was added after prewarm started) - String key = CacheUtils.buildDbKey(dbCopy.getCatalogName().toLowerCase(), dbCopy.getName().toLowerCase()); - if (databasesDeletedDuringPrewarm.contains(key)) { - continue; - } - databaseCache.putIfAbsent(key, dbCopy); - databasesDeletedDuringPrewarm.clear(); - isDatabaseCachePrewarmed = true; - } finally { - cacheLock.writeLock().unlock(); - } - } - } - - public boolean isDatabaseCachePrewarmed() { - return isDatabaseCachePrewarmed; - } - - public void addDatabaseToCache(Database db) { - try { - cacheLock.writeLock().lock(); - Database dbCopy = db.deepCopy(); - // ObjectStore also stores db name in lowercase - dbCopy.setName(dbCopy.getName().toLowerCase()); - dbCopy.setCatalogName(dbCopy.getCatalogName().toLowerCase()); - databaseCache.put(CacheUtils.buildDbKey(dbCopy.getCatalogName(), dbCopy.getName()), dbCopy); - isDatabaseCacheDirty.set(true); - } finally { - cacheLock.writeLock().unlock(); - } - } - - public void removeDatabaseFromCache(String catName, String dbName) { - try { - cacheLock.writeLock().lock(); - // If db cache is not yet prewarmed, add this to a set which the prewarm thread can check - // so that the prewarm thread does not add it back - String key = CacheUtils.buildDbKey(catName, dbName); - if (!isDatabaseCachePrewarmed) { - databasesDeletedDuringPrewarm.add(key); - } - if (databaseCache.remove(key) != null) { - isDatabaseCacheDirty.set(true); - } - } finally { - cacheLock.writeLock().unlock(); - } - } - - public List listCachedDatabases(String catName) { - List results = new ArrayList<>(); - try { - cacheLock.readLock().lock(); - for (String pair : databaseCache.keySet()) { - String[] n = CacheUtils.splitDbName(pair); - if (catName.equals(n[0])) { - results.add(n[1]); - } - } - } finally { - cacheLock.readLock().unlock(); - } - return results; - } - - public List listCachedDatabases(String catName, String pattern) { - List results = new ArrayList<>(); - try { - cacheLock.readLock().lock(); - for (String pair : databaseCache.keySet()) { - String[] n = CacheUtils.splitDbName(pair); - if (catName.equals(n[0])) { - n[1] = StringUtils.normalizeIdentifier(n[1]); - if (CacheUtils.matches(n[1], pattern)) { - results.add(n[1]); - } - } - } - } finally { - cacheLock.readLock().unlock(); - } - return results; - } - - /** - * Replaces the old db object with the new one. This will add the new database to cache if it does - * not exist. - */ - public void alterDatabaseInCache(String catName, String dbName, Database newDb) { - try { - cacheLock.writeLock().lock(); - removeDatabaseFromCache(catName, dbName); - addDatabaseToCache(newDb.deepCopy()); - isDatabaseCacheDirty.set(true); - } finally { - cacheLock.writeLock().unlock(); - } - } - - public boolean refreshDatabasesInCache(List databases) { - if (isDatabaseCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping database cache update; the database list we have is dirty."); - return false; - } - try { - cacheLock.writeLock().lock(); - databaseCache.clear(); - for (Database db : databases) { - addDatabaseToCache(db); - } - return true; - } finally { - cacheLock.writeLock().unlock(); - } - } - - public int getCachedDatabaseCount() { - try { - cacheLock.readLock().lock(); - return databaseCache.size(); - } finally { - cacheLock.readLock().unlock(); - } - } - + // This is called during prewarm public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, List partitions, List partitionColStats, AggrStats aggrStatsAllPartitions, - AggrStats aggrStatsAllButDefaultPartition) { + AggrStats aggrStatsAllButDefaultPartition, ValidWriteIdList writeIds) { String catName = StringUtils.normalizeIdentifier(table.getCatName()); String dbName = StringUtils.normalizeIdentifier(table.getDbName()); String tableName = StringUtils.normalizeIdentifier(table.getTableName()); @@ -1399,7 +1074,7 @@ public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, if (tablesDeletedDuringPrewarm.contains(CacheUtils.buildTableKey(catName, dbName, tableName))) { return false; } - TableWrapper tblWrapper = createTableWrapper(catName, dbName, tableName, table); + TableWrapper tblWrapper = createTableWrapper(catName, dbName, tableName, table, writeIds); if (!table.isSetPartitionKeys() && (tableColStats != null)) { if (table.getPartitionKeys().isEmpty() && (tableColStats != null)) { return false; @@ -1431,6 +1106,8 @@ public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, tblWrapper.isTableColStatsCacheDirty.set(false); tblWrapper.isPartitionColStatsCacheDirty.set(false); tblWrapper.isAggrPartitionColStatsCacheDirty.set(false); + tblWrapper.writeIds = writeIds; + tblWrapper.valid = true; try { cacheLock.writeLock().lock(); // 2. Skip overwriting exisiting table object @@ -1452,13 +1129,16 @@ public void completeTableCachePrewarm() { } } - public Table getTableFromCache(String catName, String dbName, String tableName) { + public Table getTableFromCache(String catName, String dbName, String tableName, ValidWriteIdList validWriteIdList) { Table t = null; try { cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { - t = CacheUtils.assemble(tblWrapper, this); + // If the request writeIds is newer than the cached version + if (tblWrapper.isValid() && TxnIdUtils.compare(tblWrapper.getWriteIds(), validWriteIdList) >= 0) { + t = CacheUtils.assemble(tblWrapper, this); + } } } finally { cacheLock.readLock().unlock(); @@ -1466,10 +1146,17 @@ public Table getTableFromCache(String catName, String dbName, String tableName) return t; } - public TableWrapper addTableToCache(String catName, String dbName, String tblName, Table tbl) { + public void addTableToCacheIfNotExists(String catName, String dbName, String tblName, Table tbl, ValidWriteIdList writeIds) { + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); + if (tblWrapper == null) { + addTableToCache(catName, dbName, tblName, tbl, writeIds); + } + } + + public TableWrapper addTableToCache(String catName, String dbName, String tblName, Table tbl, ValidWriteIdList writeIds) { try { cacheLock.writeLock().lock(); - TableWrapper wrapper = createTableWrapper(catName, dbName, tblName, tbl); + TableWrapper wrapper = createTableWrapper(catName, dbName, tblName, tbl, writeIds); tableCache.put(CacheUtils.buildTableKey(catName, dbName, tblName), wrapper); isTableCacheDirty.set(true); return wrapper; @@ -1478,7 +1165,21 @@ public TableWrapper addTableToCache(String catName, String dbName, String tblNam } } - private TableWrapper createTableWrapper(String catName, String dbName, String tblName, Table tbl) { + public void commitWriteId(String catName, String dbName, String tblName, long writeId) { + try { + cacheLock.readLock().lock(); + TableWrapper tblWrapper = + tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); + if (tblWrapper != null) { + tblWrapper.getWriteIds().commitWriteId(writeId); + tblWrapper.setValid(true); + } + } finally { + cacheLock.readLock().unlock(); + } + } + + private TableWrapper createTableWrapper(String catName, String dbName, String tblName, Table tbl, ValidWriteIdList writeIds) { TableWrapper wrapper; Table tblCopy = tbl.deepCopy(); tblCopy.setCatName(normalizeIdentifier(catName)); @@ -1494,9 +1195,9 @@ private TableWrapper createTableWrapper(String catName, String dbName, String tb StorageDescriptor sd = tbl.getSd(); increSd(sd, sdHash); tblCopy.setSd(null); - wrapper = new TableWrapper(tblCopy, sdHash, sd.getLocation(), sd.getParameters()); + wrapper = new TableWrapper(tblCopy, sdHash, sd.getLocation(), sd.getParameters(), writeIds); } else { - wrapper = new TableWrapper(tblCopy, null, null, null); + wrapper = new TableWrapper(tblCopy, null, null, null, writeIds); } return wrapper; } @@ -1532,6 +1233,7 @@ public void alterTableInCache(String catName, String dbName, String tblName, Tab cacheLock.writeLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.updateTableObj(newTable, this); String newDbName = StringUtils.normalizeIdentifier(newTable.getDbName()); String newTblName = StringUtils.normalizeIdentifier(newTable.getTableName()); @@ -1552,6 +1254,7 @@ public void alterTableAndStatsInCache(String catName, String dbName, String tblN LOG.info("Table " + tblName + " is missing from cache. Cannot update table stats in cache"); return; } + tblWrapper.valid = false; Table newTable = tblWrapper.getTable(); newTable.setWriteId(writeId); newTable.setParameters(newParams); @@ -1596,75 +1299,6 @@ public void alterTableAndStatsInCache(String catName, String dbName, String tblN return tableNames; } - public List listCachedTableNames(String catName, String dbName, String pattern, int maxTables) { - List tableNames = new ArrayList<>(); - try { - cacheLock.readLock().lock(); - int count = 0; - for (TableWrapper wrapper : tableCache.asMap().values()) { - if (wrapper.sameDatabase(catName, dbName) && CacheUtils.matches(wrapper.getTable().getTableName(), pattern) && ( - maxTables == -1 || count < maxTables)) { - tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName())); - count++; - } - } - } finally { - cacheLock.readLock().unlock(); - } - return tableNames; - } - - public List listCachedTableNames(String catName, String dbName, String pattern, TableType tableType, - int limit) { - List tableNames = new ArrayList<>(); - try { - cacheLock.readLock().lock(); - int count = 0; - for (TableWrapper wrapper : tableCache.asMap().values()) { - if (wrapper.sameDatabase(catName, dbName) && CacheUtils.matches(wrapper.getTable().getTableName(), pattern) - && wrapper.getTable().getTableType().equals(tableType.toString()) && (limit == -1 || count < limit)) { - tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName())); - count++; - } - } - } finally { - cacheLock.readLock().unlock(); - } - return tableNames; - } - - public boolean refreshTablesInCache(String catName, String dbName, List
tables) { - if (isTableCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping table cache update; the table list we have is dirty."); - return false; - } - Map newCacheForDB = new TreeMap<>(); - for (Table tbl : tables) { - String tblName = StringUtils.normalizeIdentifier(tbl.getTableName()); - TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); - if (tblWrapper != null) { - tblWrapper.updateTableObj(tbl, this); - } else { - tblWrapper = createTableWrapper(catName, dbName, tblName, tbl); - } - newCacheForDB.put(CacheUtils.buildTableKey(catName, dbName, tblName), tblWrapper); - } - try { - cacheLock.writeLock().lock(); - Iterator> entryIterator = tableCache.asMap().entrySet().iterator(); - while (entryIterator.hasNext()) { - String key = entryIterator.next().getKey(); - if (key.startsWith(CacheUtils.buildDbKeyWithDelimiterSuffix(catName, dbName))) { - entryIterator.remove(); - } - } - tableCache.putAll(newCacheForDB); - return true; - } finally { - cacheLock.writeLock().unlock(); - } - } - public ColumnStatistics getTableColStatsFromCache(String catName, String dbName, String tblName, List colNames, String validWriteIds, boolean areTxnStatsSupported) throws MetaException { try { @@ -1686,6 +1320,7 @@ public void removeTableColStatsFromCache(String catName, String dbName, String t cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.removeTableColStats(colName); } else { LOG.info("Table " + tblName + " is missing from cache."); @@ -1695,26 +1330,13 @@ public void removeTableColStatsFromCache(String catName, String dbName, String t } } - public void removeAllTableColStatsFromCache(String catName, String dbName, String tblName) { - try { - cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); - if (tblWrapper != null) { - tblWrapper.removeAllTableColStats(); - } else { - LOG.info("Table " + tblName + " is missing from cache."); - } - } finally { - cacheLock.readLock().unlock(); - } - } - public void updateTableColStatsInCache(String catName, String dbName, String tableName, List colStatsForTable) { try { cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.updateTableColStats(colStatsForTable); } else { LOG.info("Table " + tableName + " is missing from cache."); @@ -1724,21 +1346,6 @@ public void updateTableColStatsInCache(String catName, String dbName, String tab } } - public void refreshTableColStatsInCache(String catName, String dbName, String tableName, - List colStatsForTable) { - try { - cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tableName)); - if (tblWrapper != null) { - tblWrapper.refreshTableColStats(colStatsForTable); - } else { - LOG.info("Table " + tableName + " is missing from cache."); - } - } finally { - cacheLock.readLock().unlock(); - } - } - public int getCachedTableCount() { try { cacheLock.readLock().lock(); @@ -1748,36 +1355,13 @@ public int getCachedTableCount() { } } - public List getTableMeta(String catName, String dbNames, String tableNames, List tableTypes) { - List tableMetas = new ArrayList<>(); - try { - cacheLock.readLock().lock(); - for (String dbName : listCachedDatabases(catName)) { - if (CacheUtils.matches(dbName, dbNames)) { - for (Table table : listCachedTables(catName, dbName)) { - if (CacheUtils.matches(table.getTableName(), tableNames)) { - if (tableTypes == null || tableTypes.contains(table.getTableType())) { - TableMeta metaData = new TableMeta(dbName, table.getTableName(), table.getTableType()); - metaData.setCatName(catName); - metaData.setComments(table.getParameters().get("comment")); - tableMetas.add(metaData); - } - } - } - } - } - } finally { - cacheLock.readLock().unlock(); - } - return tableMetas; - } - public void addPartitionToCache(String catName, String dbName, String tblName, Partition part) { try { cacheLock.readLock().lock(); String tblKey = CacheUtils.buildTableKey(catName, dbName, tblName); TableWrapper tblWrapper = tableCache.getIfPresent(tblKey); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.cachePartition(part, this); } } finally { @@ -1790,6 +1374,7 @@ public void addPartitionsToCache(String catName, String dbName, String tblName, cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.cachePartitions(parts, this, false); } } finally { @@ -1802,9 +1387,7 @@ public Partition getPartitionFromCache(String catName, String dbName, String tbl try { cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); - if (tblWrapper != null) { - part = tblWrapper.getPartition(partVals, this); - } + part = tblWrapper.getPartition(partVals, this); } finally { cacheLock.readLock().unlock(); } @@ -1831,6 +1414,7 @@ public Partition removePartitionFromCache(String catName, String dbName, String cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { + tblWrapper.valid = false; part = tblWrapper.removePartition(partVals, this); } else { LOG.warn("This is abnormal"); @@ -1846,6 +1430,7 @@ public void removePartitionsFromCache(String catName, String dbName, String tblN cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.removePartitions(partVals, this); } } finally { @@ -1873,6 +1458,7 @@ public void alterPartitionInCache(String catName, String dbName, String tblName, cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.alterPartition(partVals, newPart, this); } } finally { @@ -1886,6 +1472,7 @@ public void alterPartitionAndStatsInCache(String catName, String dbName, String cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.alterPartitionAndStats(partVals, this, writeId, parameters, colStatsObjs); } } finally { @@ -1899,6 +1486,7 @@ public void alterPartitionsInCache(String catName, String dbName, String tblName cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.alterPartitions(partValsList, newParts, this); } } finally { @@ -1906,24 +1494,13 @@ public void alterPartitionsInCache(String catName, String dbName, String tblName } } - public void refreshPartitionsInCache(String catName, String dbName, String tblName, List partitions) { - try { - cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); - if (tblWrapper != null) { - tblWrapper.refreshPartitions(partitions, this); - } - } finally { - cacheLock.readLock().unlock(); - } - } - public void removePartitionColStatsFromCache(String catName, String dbName, String tblName, List partVals, String colName) { try { cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.removePartitionColStats(partVals, colName); } } finally { @@ -1931,24 +1508,13 @@ public void removePartitionColStatsFromCache(String catName, String dbName, Stri } } - public void removeAllPartitionColStatsFromCache(String catName, String dbName, String tblName) { - try { - cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); - if (tblWrapper != null) { - tblWrapper.removeAllPartitionColStats(); - } - } finally { - cacheLock.readLock().unlock(); - } - } - public void updatePartitionColStatsInCache(String catName, String dbName, String tableName, List partVals, List colStatsObjs) { try { cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { + tblWrapper.valid = false; tblWrapper.updatePartitionColStats(partVals, colStatsObjs); } } finally { @@ -1988,19 +1554,6 @@ public ColumStatsWithWriteId getPartitionColStatsFromCache(String catName, Strin return colStatObjs; } - public void refreshPartitionColStatsInCache(String catName, String dbName, String tblName, - List partitionColStats) { - try { - cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); - if (tblWrapper != null) { - tblWrapper.refreshPartitionColStats(partitionColStats); - } - } finally { - cacheLock.readLock().unlock(); - } - } - public List getAggrStatsFromCache(String catName, String dbName, String tblName, List colNames, StatsType statsType) { try { @@ -2069,11 +1622,6 @@ public synchronized StorageDescriptor getSdFromCache(byte[] sdHash) { return sdWrapper.getSd(); } - @VisibleForTesting - Map getDatabaseCache() { - return databaseCache; - } - @VisibleForTesting void clearTableCache() { tableCache.invalidateAll(); @@ -2084,19 +1632,7 @@ void clearTableCache() { return sdCache; } - /** - * This resets the contents of the cataog cache so that we can re-fill it in another test. - */ - void resetCatalogCache() { - isCatalogCachePrewarmed = false; - catalogCache.clear(); - catalogsDeletedDuringPrewarm.clear(); - isCatalogCacheDirty.set(false); - } - void clearDirtyFlags() { - isCatalogCacheDirty.set(false); - isDatabaseCacheDirty.set(false); isTableCacheDirty.set(false); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java index 60ad7db60e..a0c2bef4f0 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java @@ -71,7 +71,7 @@ public InsertEvent(String catName, String db, String table, List partVal this.tableObj = handler.get_table_req(req).getTable(); if (partVals != null) { this.ptnObj = handler.get_partition(MetaStoreUtils.prependNotNullCatToDbName(catName, db), - table, partVals); + table, partVals, null); } else { this.ptnObj = null; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdatePartitionColumnStatEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdatePartitionColumnStatEvent.java index ba61a08173..0e57835360 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdatePartitionColumnStatEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdatePartitionColumnStatEvent.java @@ -39,6 +39,7 @@ private Map parameters; private List partVals; private Table tableObj; + private String writeIds; /** * @param statsObj Columns statistics Info. @@ -49,13 +50,14 @@ * @param handler handler that is firing the event */ public UpdatePartitionColumnStatEvent(ColumnStatistics statsObj, List partVals, Map parameters, - Table tableObj, long writeId, IHMSHandler handler) { + Table tableObj, long writeId, String writeIds, IHMSHandler handler) { super(true, handler); this.partColStats = statsObj; this.writeId = writeId; this.parameters = parameters; this.partVals = partVals; this.tableObj = tableObj; + this.writeIds = writeIds; } /** @@ -64,13 +66,14 @@ public UpdatePartitionColumnStatEvent(ColumnStatistics statsObj, List pa * @param handler handler that is firing the event */ public UpdatePartitionColumnStatEvent(ColumnStatistics statsObj, List partVals, - Table tableObj, IHMSHandler handler) { + Table tableObj, String writeIds, IHMSHandler handler) { super(true, handler); this.partColStats = statsObj; this.partVals = partVals; this.writeId = 0; this.parameters = null; this.tableObj = tableObj; + this.writeIds = writeIds; } public ColumnStatistics getPartColStats() { @@ -90,4 +93,9 @@ public long getWriteId() { } public Table getTableObj() { return tableObj; } + + public String getWriteIds() + { + return writeIds; + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdateTableColumnStatEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdateTableColumnStatEvent.java index 71300abf4e..48d1206786 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdateTableColumnStatEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdateTableColumnStatEvent.java @@ -38,22 +38,25 @@ private long writeId; private Map parameters; private Table tableObj; + private String writeIds; /** * @param colStats Columns statistics Info. * @param tableObj table object * @param parameters table parameters to be updated after stats are updated. * @param writeId writeId for the query. + * @param writeIds writeIds for the query * @param handler handler that is firing the event */ public UpdateTableColumnStatEvent(ColumnStatistics colStats, Table tableObj, Map parameters, - long writeId, IHMSHandler handler) { + long writeId, String writeIds, IHMSHandler handler) { super(true, handler); this.colStats = colStats; this.writeId = writeId; this.parameters = parameters; this.tableObj = tableObj; + this.writeIds = writeIds; } /** @@ -76,6 +79,10 @@ public long getWriteId() { return writeId; } + public String getWriteIds() { + return writeIds; + } + public Map getTableParameters() { return parameters; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java index aa83da4ed5..4518d79b1c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java @@ -289,9 +289,9 @@ public AcidWriteMessage buildAcidWriteMessage(AcidWriteEvent acidWriteEvent, public JSONUpdateTableColumnStatMessage buildUpdateTableColumnStatMessage(ColumnStatistics colStats, Table tableObj, Map parameters, - long writeId) { + long writeId, String writeIds) { return new JSONUpdateTableColumnStatMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, now(), - colStats, tableObj, parameters, writeId); + colStats, tableObj, parameters, writeId, writeIds); } public JSONDeleteTableColumnStatMessage buildDeleteTableColumnStatMessage(String dbName, String colName) { @@ -300,9 +300,9 @@ public JSONDeleteTableColumnStatMessage buildDeleteTableColumnStatMessage(String public JSONUpdatePartitionColumnStatMessage buildUpdatePartitionColumnStatMessage(ColumnStatistics colStats, List partVals, Map parameters, - Table tableObj, long writeId) { + Table tableObj, long writeId, String writeIds) { return new JSONUpdatePartitionColumnStatMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, now(), colStats, partVals, - parameters, tableObj, writeId); + parameters, tableObj, writeId, writeIds); } public JSONDeletePartitionColumnStatMessage buildDeletePartitionColumnStatMessage(String dbName, String colName, diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdatePartitionColumnStatMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdatePartitionColumnStatMessage.java index e92a0dc9a3..f685bc4f8a 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdatePartitionColumnStatMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdatePartitionColumnStatMessage.java @@ -41,4 +41,6 @@ protected UpdatePartitionColumnStatMessage() { public abstract List getPartVals(); public abstract Table getTableObject() throws Exception; + + public abstract String getWriteIds(); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdateTableColumnStatMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdateTableColumnStatMessage.java index e3f049c48c..e5f7ef8100 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdateTableColumnStatMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdateTableColumnStatMessage.java @@ -38,4 +38,6 @@ protected UpdateTableColumnStatMessage() { public abstract Map getParameters(); public abstract Table getTableObject() throws Exception; + + public abstract String getWriteIds() throws Exception; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdatePartitionColumnStatMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdatePartitionColumnStatMessage.java index fd7fe00419..2e4d9de9fd 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdatePartitionColumnStatMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdatePartitionColumnStatMessage.java @@ -52,6 +52,9 @@ @JsonProperty private String tableObjJson; + @JsonProperty + private String writeIds; + /** * Default constructor, needed for Jackson. */ @@ -61,7 +64,7 @@ public JSONUpdatePartitionColumnStatMessage() { public JSONUpdatePartitionColumnStatMessage(String server, String servicePrincipal, Long timestamp, ColumnStatistics colStats, List partVals, Map parameters, - Table tableObj, long writeId) { + Table tableObj, long writeId, String writeIds) { this.timestamp = timestamp; this.server = server; this.servicePrincipal = servicePrincipal; @@ -75,6 +78,7 @@ public JSONUpdatePartitionColumnStatMessage(String server, String servicePrincip throw new IllegalArgumentException("Could not serialize JSONUpdatePartitionColumnStatMessage : ", e); } this.parameters = parameters; + this.writeIds = writeIds; } @Override @@ -126,6 +130,11 @@ public Table getTableObject() throws Exception { return (Table) MessageBuilder.getTObj(tableObjJson, Table.class); } + @Override + public String getWriteIds() { + return writeIds; + } + @Override public String toString() { try { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java index 275d204957..40636c8ad2 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java @@ -47,6 +47,9 @@ @JsonProperty private String tableObjJson; + @JsonProperty + private String writeIds; + /** * Default constructor, needed for Jackson. */ @@ -55,7 +58,7 @@ public JSONUpdateTableColumnStatMessage() { public JSONUpdateTableColumnStatMessage(String server, String servicePrincipal, Long timestamp, ColumnStatistics colStats, Table tableObj, Map parameters, - long writeId) { + long writeId, String writeIds) { this.timestamp = timestamp; this.server = server; this.servicePrincipal = servicePrincipal; @@ -68,6 +71,7 @@ public JSONUpdateTableColumnStatMessage(String server, String servicePrincipal, throw new IllegalArgumentException("Could not serialize JSONUpdateTableColumnStatMessage : ", e); } this.parameters = parameters; + this.writeIds = writeIds; } @Override @@ -114,6 +118,11 @@ public Long getWriteId() { return parameters; } + @Override + public String getWriteIds() { + return writeIds; + } + @Override public String toString() { try { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/MetricsConstants.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/MetricsConstants.java index 24c8c4cc3a..a8baf155c5 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/MetricsConstants.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/MetricsConstants.java @@ -45,4 +45,7 @@ public static final String TOTAL_DATABASES = "total_count_dbs"; public static final String TOTAL_TABLES = "total_count_tables"; public static final String TOTAL_PARTITIONS = "total_count_partitions"; + + public static final String METADATA_CACHE_HIT = "metadata_cache_hit"; + public static final String METADATA_CACHE_MISS = "metadata_cache_miss"; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index 83306bf653..d5ded9bafe 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -56,6 +56,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.ValidReadTxnList; import org.apache.hadoop.hive.common.ValidReaderWriteIdList; import org.apache.hadoop.hive.common.ValidTxnList; @@ -1504,6 +1505,46 @@ private ValidTxnList getValidTxnList(Connection dbConn, String fullTableName, Lo } } + @Override + @RetrySemantics.ReadOnly + public GetTxnTableWriteIdsResponse getTxnTableWriteIds(long txnId) throws MetaException { + try { + PreparedStatement pst = null; + ResultSet rs = null; + Connection dbConn = null; + try { + /** + * This runs at READ_COMMITTED for exactly the same reason as {@link #getOpenTxnsInfo()} + */ + dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); + + List params = Arrays.asList(Long.toString(txnId)); + String s = "select t2w_database, t2w_table, t2w_writeid from TXN_TO_WRITE_ID where t2w_txnid = ?"; + pst = sqlGenerator.prepareStmtWithParameters(dbConn, s, params); + LOG.debug("Going to execute query <" + s.replaceAll("\\?", "{}") + ">", txnId); + rs = pst.executeQuery(); + List tableWriteIds = new ArrayList<>(); + if (rs.next()) { + tableWriteIds.add(new TableWriteId(TableName.getDbTable(rs.getString(1), rs.getString(2)), rs.getLong(3))); + } + return new GetTxnTableWriteIdsResponse(tableWriteIds); + } catch (SQLException e) { + LOG.debug("Going to rollback"); + rollbackDBConn(dbConn); + checkRetryable(dbConn, e, "getTxnTableWriteIds(" + txnId + ")"); + throw new MetaException("Unable to get target transaction id " + + StringUtils.stringifyException(e)); + } finally { + closeStmt(pst); + close(rs); + closeDbConn(dbConn); + unlockInternal(); + } + } catch (RetryException e) { + return getTxnTableWriteIds(txnId); + } + } + @Override @RetrySemantics.ReadOnly public GetValidWriteIdsResponse getValidWriteIds(GetValidWriteIdsRequest rqst) throws MetaException { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java index e840758c9d..ae66663c5c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java @@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.common.classification.RetrySemantics; import org.apache.hadoop.hive.metastore.api.*; @@ -170,6 +171,9 @@ long cleanupMaterializationRebuildLocks(ValidTxnList validTxnList, long timeout) GetValidWriteIdsResponse getValidWriteIds(GetValidWriteIdsRequest rqst) throws NoSuchTxnException, MetaException; + @RetrySemantics.ReadOnly + public GetTxnTableWriteIdsResponse getTxnTableWriteIds(long txnId) throws MetaException; + /** * Allocate a write ID for the given table and associate it with a transaction * @param rqst info on transaction and table to allocate write id diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql index e8af9a1b11..c02c38abb0 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql @@ -1118,6 +1118,7 @@ CREATE TABLE TXN_TO_WRITE_ID ( CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE INDEX TBL_TO_WRITE_ID_IDX2 ON TXN_TO_WRITE_ID (T2W_WRITEID); CREATE TABLE NEXT_WRITE_ID ( NWI_DATABASE varchar(128) NOT NULL, diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index da3c42a1d5..b9681384f6 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -256,11 +256,6 @@ public boolean dropTable(String catName, String dbName, String tableName) return objectStore.dropTable(catName, dbName, tableName); } - @Override - public Table getTable(String catName, String dbName, String tableName) throws MetaException { - return objectStore.getTable(catName, dbName, tableName); - } - @Override public Table getTable(String catName, String dbName, String tableName, String writeIdList) throws MetaException { @@ -273,12 +268,6 @@ public boolean addPartition(Partition part) return objectStore.addPartition(part); } - @Override - public Partition getPartition(String catName, String dbName, String tableName, List partVals) - throws MetaException, NoSuchObjectException { - return objectStore.getPartition(catName, dbName, tableName, partVals); - } - @Override public Partition getPartition(String catName, String dbName, String tableName, List partVals, String writeIdList) @@ -294,15 +283,15 @@ public boolean dropPartition(String catName, String dbName, String tableName, Li } @Override - public List getPartitions(String catName, String dbName, String tableName, int max) + public List getPartitions(String catName, String dbName, String tableName, int max, String validWriteIdList) throws MetaException, NoSuchObjectException { - return objectStore.getPartitions(catName, dbName, tableName, max); + return objectStore.getPartitions(catName, dbName, tableName, max, validWriteIdList); } @Override public Map getPartitionLocations(String catName, String dbName, String tblName, - String baseLocationToNotShow, int max) { - return objectStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max); + String baseLocationToNotShow, int max, String validWriteIdList) { + return objectStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max, validWriteIdList); } @Override @@ -363,15 +352,15 @@ public void updateCreationMetadata(String catName, String dbname, String tablena } @Override - public List listPartitionNames(String catName, String dbName, String tblName, short maxParts) + public List listPartitionNames(String catName, String dbName, String tblName, short maxParts, String validWriteIdList) throws MetaException { - return objectStore.listPartitionNames(catName, dbName, tblName, maxParts); + return objectStore.listPartitionNames(catName, dbName, tblName, maxParts, validWriteIdList); } @Override public PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, - boolean ascending, List order, long maxParts) throws MetaException { + boolean ascending, List order, long maxParts, String validWriteIdList) throws MetaException { return null; } @@ -391,40 +380,40 @@ public Partition alterPartition(String catName, String dbName, String tblName, L @Override public List getPartitionsByFilter(String catName, String dbName, String tblName, - String filter, short maxParts) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts); + String filter, short maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException { + return objectStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts, validWriteIdList); } @Override public List getPartitionSpecsByFilterAndProjection(Table table, - GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec) + GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec, String validWriteIdList) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionSpecsByFilterAndProjection(table, projectionSpec, filterSpec); + return objectStore.getPartitionSpecsByFilterAndProjection(table, projectionSpec, filterSpec, validWriteIdList); } @Override public int getNumPartitionsByFilter(String catName, String dbName, String tblName, - String filter) throws MetaException, NoSuchObjectException { - return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter); + String filter, String validWriteIdList) throws MetaException, NoSuchObjectException { + return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter, validWriteIdList); } @Override public int getNumPartitionsByExpr(String catName, String dbName, String tblName, - byte[] expr) throws MetaException, NoSuchObjectException { - return objectStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); + byte[] expr, String validWriteIdList) throws MetaException, NoSuchObjectException { + return objectStore.getNumPartitionsByExpr(catName, dbName, tblName, expr, validWriteIdList); } @Override public List getPartitionsByNames(String catName, String dbName, String tblName, - List partNames) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionsByNames(catName, dbName, tblName, partNames); + List partNames, String validWriteIdList) throws MetaException, NoSuchObjectException { + return objectStore.getPartitionsByNames(catName, dbName, tblName, partNames, validWriteIdList); } @Override public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, - String defaultPartitionName, short maxParts, List result) throws TException { + String defaultPartitionName, short maxParts, List result, String validWriteIdList) throws TException { return objectStore.getPartitionsByExpr(catName, - dbName, tblName, expr, defaultPartitionName, maxParts, result); + dbName, tblName, expr, defaultPartitionName, maxParts, result, validWriteIdList); } @Override @@ -591,33 +580,33 @@ public Role getRole(String roleName) throws NoSuchObjectException { @Override public Partition getPartitionWithAuth(String catName, String dbName, String tblName, - List partVals, String userName, List groupNames) + List partVals, String userName, List groupNames, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException { return objectStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, - groupNames); + groupNames, validWriteIdList); } @Override public List getPartitionsWithAuth(String catName, String dbName, String tblName, - short maxParts, String userName, List groupNames) + short maxParts, String userName, List groupNames, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException { return objectStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, - groupNames); + groupNames, validWriteIdList); } @Override public List listPartitionNamesPs(String catName, String dbName, String tblName, - List partVals, short maxParts) + List partVals, short maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException { - return objectStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts); + return objectStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts, validWriteIdList); } @Override public List listPartitionsPsWithAuth(String catName, String dbName, String tblName, - List partVals, short maxParts, String userName, List groupNames) + List partVals, short maxParts, String userName, List groupNames, String validWriteIdList) throws MetaException, InvalidObjectException, NoSuchObjectException { return objectStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, - userName, groupNames); + userName, groupNames, validWriteIdList); } @Override @@ -690,17 +679,8 @@ public long cleanupEvents() { @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, - List colNames) throws MetaException, NoSuchObjectException { - return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames); - } - - @Override - public ColumnStatistics getTableColumnStatistics(String catName, String dbName, - String tableName, List colNames, - String writeIdList) - throws MetaException, NoSuchObjectException { - return objectStore.getTableColumnStatistics( - catName, dbName, tableName, colNames, writeIdList); + List colNames, String validWriteIdList) throws MetaException, NoSuchObjectException { + return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames, validWriteIdList); } @Override @@ -788,13 +768,6 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro } - @Override - public List getPartitionColumnStatistics(String catName, String dbName, - String tblName, List colNames, List partNames) - throws MetaException, NoSuchObjectException { - return objectStore.getPartitionColumnStatistics(catName, dbName, tblName , colNames, partNames); - } - @Override public List getPartitionColumnStatistics( String catName, String dbName, String tblName, List partNames, @@ -806,9 +779,9 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro @Override public boolean doesPartitionExist(String catName, String dbName, String tableName, - List partKeys, List partVals) + List partKeys, List partVals, String validWriteIdList) throws MetaException, NoSuchObjectException { - return objectStore.doesPartitionExist(catName, dbName, tableName, partKeys, partVals); + return objectStore.doesPartitionExist(catName, dbName, tableName, partKeys, partVals, validWriteIdList); } @Override @@ -865,13 +838,6 @@ public Function getFunction(String catName, String dbName, String funcName) return objectStore.getFunctions(catName, dbName, pattern); } - @Override - public AggrStats get_aggr_stats_for(String catName, String dbName, - String tblName, List partNames, List colNames) - throws MetaException { - return null; - } - @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index a018c503d1..5226db3b13 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -239,12 +239,6 @@ public boolean dropTable(String catName, String dbName, String tableName) throws return false; } - @Override - public Table getTable(String catName, String dbName, String tableName) throws MetaException { - - return null; - } - @Override public Table getTable(String catalogName, String dbName, String tableName, String writeIdList) throws MetaException { @@ -257,13 +251,6 @@ public boolean addPartition(Partition part) throws InvalidObjectException, MetaE return false; } - @Override - public Partition getPartition(String catName, String dbName, String tableName, List part_vals) - throws MetaException, NoSuchObjectException { - - return null; - } - @Override public Partition getPartition(String catName, String dbName, String tableName, List part_vals, String writeIdList) @@ -279,7 +266,7 @@ public boolean dropPartition(String catName, String dbName, String tableName, Li } @Override - public List getPartitions(String catName, String dbName, String tableName, int max) + public List getPartitions(String catName, String dbName, String tableName, int max, String validWriteIdList) throws MetaException { return Collections.emptyList(); @@ -287,7 +274,7 @@ public boolean dropPartition(String catName, String dbName, String tableName, Li @Override public Map getPartitionLocations(String catName, String dbName, String tblName, - String baseLocationToNotShow, int max) { + String baseLocationToNotShow, int max, String validWriteIdList) { return Collections.emptyMap(); } @@ -350,7 +337,7 @@ public void updateCreationMetadata(String catName, String dbname, String tablena } @Override - public List listPartitionNames(String catName, String db_name, String tbl_name, short max_parts) + public List listPartitionNames(String catName, String db_name, String tbl_name, short max_parts, String validWriteIdList) throws MetaException { return Collections.emptyList(); @@ -361,7 +348,7 @@ public PartitionValuesResponse listPartitionValues(String catName, String db_nam String tbl_name, List cols, boolean applyDistinct, String filter, boolean ascending, List order, - long maxParts) throws MetaException { + long maxParts, String validWriteIdList) throws MetaException { return null; } @@ -380,7 +367,7 @@ public Partition alterPartition(String catName, String db_name, String tbl_name, @Override public List getPartitionsByFilter(String catName, String dbName, String tblName, - String filter, short maxParts) + String filter, short maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException { return Collections.emptyList(); @@ -388,32 +375,32 @@ public Partition alterPartition(String catName, String db_name, String tbl_name, @Override public List getPartitionSpecsByFilterAndProjection(Table table, - GetPartitionsProjectionSpec projectSpec, GetPartitionsFilterSpec filterSpec) + GetPartitionsProjectionSpec projectSpec, GetPartitionsFilterSpec filterSpec, String validWriteIdList) throws MetaException, NoSuchObjectException { return Collections.emptyList(); } @Override public List getPartitionsByNames(String catName, String dbName, String tblName, - List partNames) throws MetaException, NoSuchObjectException { + List partNames, String validWriteIdList) throws MetaException, NoSuchObjectException { return Collections.emptyList(); } @Override public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, - String defaultPartitionName, short maxParts, List result) throws TException { + String defaultPartitionName, short maxParts, List result, String validWriteIdList) throws TException { return false; } @Override - public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) + public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter, String validWriteIdList) throws MetaException, NoSuchObjectException { return -1; } @Override - public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) + public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String validWriteIdList) throws MetaException, NoSuchObjectException { return -1; } @@ -594,7 +581,7 @@ public Role getRole(String roleName) throws NoSuchObjectException { @Override public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, - String user_name, List group_names) throws MetaException, NoSuchObjectException, + String user_name, List group_names, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException { return null; @@ -602,7 +589,7 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN @Override public List getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts, - String userName, List groupNames) throws MetaException, NoSuchObjectException, + String userName, List groupNames, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException { return Collections.emptyList(); @@ -610,14 +597,14 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN @Override public List listPartitionNamesPs(String catName, String db_name, String tbl_name, List part_vals, - short max_parts) throws MetaException, NoSuchObjectException { + short max_parts, String validWriteIdList) throws MetaException, NoSuchObjectException { return Collections.emptyList(); } @Override public List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name, - List part_vals, short max_parts, String userName, List groupNames) + List part_vals, short max_parts, String userName, List groupNames, String validWriteIdList) throws MetaException, InvalidObjectException, NoSuchObjectException { return Collections.emptyList(); @@ -728,12 +715,6 @@ public boolean removeMasterKey(Integer keySeq) { return Collections.emptyList(); } - @Override - public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, - List colName) throws MetaException, NoSuchObjectException { - return null; - } - @Override public ColumnStatistics getTableColumnStatistics( String catName, String dbName, String tableName, List colName, @@ -786,13 +767,6 @@ public String getMetaStoreSchemaVersion() throws MetaException { public void setMetaStoreSchemaVersion(String version, String comment) throws MetaException { } - @Override - public List getPartitionColumnStatistics(String catName, String dbName, - String tblName, List colNames, List partNames) - throws MetaException, NoSuchObjectException { - return Collections.emptyList(); - } - @Override public List getPartitionColumnStatistics( String catName, String dbName, String tblName, List partNames, @@ -803,7 +777,7 @@ public void setMetaStoreSchemaVersion(String version, String comment) throws Met @Override public boolean doesPartitionExist(String catName, String dbName, String tableName, - List partKeys, List partVals) + List partKeys, List partVals, String validWriteIdList) throws MetaException, NoSuchObjectException { return false; } @@ -857,13 +831,6 @@ public Function getFunction(String catName, String dbName, String funcName) return Collections.emptyList(); } - @Override - public AggrStats get_aggr_stats_for(String catName, String dbName, - String tblName, List partNames, List colNames) - throws MetaException { - return null; - } - @Override public AggrStats get_aggr_stats_for( String catName, String dbName, String tblName, List partNames, diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index 4878a47b2d..eddca3d97c 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -57,6 +57,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -1253,21 +1254,21 @@ public boolean dropType(String type) throws NoSuchObjectException, MetaException @Override public List listPartitions(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, TException { - List parts = client.get_partitions(db_name, tbl_name, max_parts); + List parts = client.get_partitions(db_name, tbl_name, max_parts, null); return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); } @Override public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException { return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs( - client.get_partitions_pspec(dbName, tableName, maxParts))); + client.get_partitions_pspec(dbName, tableName, maxParts, null))); } @Override public List listPartitions(String db_name, String tbl_name, List part_vals, short max_parts) throws NoSuchObjectException, MetaException, TException { - List parts = client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts); + List parts = client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts, null); return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); } @@ -1276,7 +1277,7 @@ public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, in String tbl_name, short max_parts, String user_name, List group_names) throws NoSuchObjectException, MetaException, TException { List parts = client.get_partitions_with_auth(db_name, tbl_name, max_parts, - user_name, group_names); + user_name, group_names, null); return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts)); } @@ -1286,7 +1287,7 @@ public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, in String user_name, List group_names) throws NoSuchObjectException, MetaException, TException { List parts = client.get_partitions_ps_with_auth(db_name, - tbl_name, part_vals, max_parts, user_name, group_names); + tbl_name, part_vals, max_parts, user_name, group_names, null); return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); } @@ -1308,7 +1309,7 @@ public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, in public List listPartitionsByFilter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException, NoSuchObjectException, TException { - List parts = client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts); + List parts = client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts, null); return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts)); } @@ -1317,7 +1318,7 @@ public PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_ String filter, int max_parts) throws MetaException, NoSuchObjectException, TException { return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs( - client.get_part_specs_by_filter(db_name, tbl_name, filter, max_parts))); + client.get_part_specs_by_filter(db_name, tbl_name, filter, max_parts, null))); } @Override @@ -1383,7 +1384,7 @@ public Database getDatabase(String name) throws NoSuchObjectException, @Override public Partition getPartition(String db_name, String tbl_name, List part_vals) throws NoSuchObjectException, MetaException, TException { - Partition p = client.get_partition(db_name, tbl_name, part_vals); + Partition p = client.get_partition(db_name, tbl_name, part_vals, null); return fastpath ? p : deepCopy(filterHook.filterPartition(p)); } @@ -1416,7 +1417,7 @@ public Partition getPartitionWithAuthInfo(String db_name, String tbl_name, throws MetaException, UnknownTableException, NoSuchObjectException, TException { Partition p = client.get_partition_with_auth(db_name, tbl_name, part_vals, user_name, - group_names); + group_names, null); return fastpath ? p : deepCopy(filterHook.filterPartition(p)); } @@ -1597,7 +1598,7 @@ public boolean tableExists(String databaseName, String tableName) throws MetaExc public List listPartitionNames(String dbName, String tblName, short max) throws NoSuchObjectException, MetaException, TException { return filterHook.filterPartitionNames(null, dbName, tblName, - client.get_partition_names(dbName, tblName, max)); + client.get_partition_names(dbName, tblName, max, null)); } @Override @@ -1605,7 +1606,7 @@ public boolean tableExists(String databaseName, String tableName) throws MetaExc List part_vals, short max_parts) throws MetaException, TException, NoSuchObjectException { return filterHook.filterPartitionNames(null, db_name, tbl_name, - client.get_partition_names_ps(db_name, tbl_name, part_vals, max_parts)); + client.get_partition_names_ps(db_name, tbl_name, part_vals, max_parts, null)); } /** @@ -1624,7 +1625,7 @@ public boolean tableExists(String databaseName, String tableName) throws MetaExc public int getNumPartitionsByFilter(String db_name, String tbl_name, String filter) throws MetaException, NoSuchObjectException, TException { - return client.get_num_partitions_by_filter(db_name, tbl_name, filter); + return client.get_num_partitions_by_filter(db_name, tbl_name, filter, null); } @Override @@ -1689,7 +1690,7 @@ public void alterDatabase(String dbName, Database db) public List getFields(String db, String tableName) throws MetaException, TException, UnknownTableException, UnknownDBException { - List fields = client.get_fields(db, tableName); + List fields = client.get_fields(db, tableName, null); return fastpath ? fields : deepCopyFieldSchemas(fields); } @@ -1846,7 +1847,7 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri envCxt = new EnvironmentContext(props); } - List fields = client.get_schema_with_environment_context(db, tableName, envCxt); + List fields = client.get_schema_with_environment_context(db, tableName, envCxt, null); return fastpath ? fields : deepCopyFieldSchemas(fields); } @@ -1859,7 +1860,7 @@ public String getConfigValue(String name, String defaultValue) @Override public Partition getPartition(String db, String tableName, String partName) throws MetaException, TException, UnknownTableException, NoSuchObjectException { - Partition p = client.get_partition_by_name(db, tableName, partName); + Partition p = client.get_partition_by_name(db, tableName, partName, null); return fastpath ? p : deepCopy(filterHook.filterPartition(p)); } @@ -3669,4 +3670,9 @@ public void setHadoopJobid(String jobId, long cqId) throws MetaException, TExcep public String getServerVersion() throws TException { return client.getVersion(); } + + @Override + public void setValidWriteIdList(String txnWriteIdList) { + throw new UnsupportedOperationException(); + } } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java index 6c7fe116cc..4f314012c0 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java @@ -153,11 +153,6 @@ public static void resetAlterTableModifier() { } // ObjectStore methods to be overridden with injected behavior - @Override - public Table getTable(String catName, String dbName, String tableName) throws MetaException { - return getTableModifier.apply(super.getTable(catName, dbName, tableName)); - } - @Override public Table getTable(String catName, String dbName, String tableName, String writeIdList) throws MetaException { return getTableModifier.apply(super.getTable(catName, dbName, tableName, writeIdList)); @@ -165,14 +160,14 @@ public Table getTable(String catName, String dbName, String tableName, String wr @Override public Partition getPartition(String catName, String dbName, String tableName, - List partVals) throws NoSuchObjectException, MetaException { - return getPartitionModifier.apply(super.getPartition(catName, dbName, tableName, partVals)); + List partVals, String validWriteIdList) throws NoSuchObjectException, MetaException { + return getPartitionModifier.apply(super.getPartition(catName, dbName, tableName, partVals, validWriteIdList)); } @Override - public List listPartitionNames(String catName, String dbName, String tableName, short max) + public List listPartitionNames(String catName, String dbName, String tableName, short max, String validWriteIdList) throws MetaException { - return listPartitionNamesModifier.apply(super.listPartitionNames(catName, dbName, tableName, max)); + return listPartitionNamesModifier.apply(super.listPartitionNames(catName, dbName, tableName, max, validWriteIdList)); } @Override diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java index 88d5e716e1..9ab8890d4b 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java @@ -57,7 +57,7 @@ public void testAlterTableAddColNotUpdateStats() throws MetaException, InvalidOb RawStore msdb = Mockito.mock(RawStore.class); Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics( - getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3")); + getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3"), null); HiveAlterHandler handler = new HiveAlterHandler(); handler.setConf(conf); handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, null, conf, null); @@ -93,7 +93,7 @@ public void testAlterTableDelColUpdateStats() throws Exception { throw t; } Mockito.verify(msdb, Mockito.times(1)).getTableColumnStatistics( - getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4") + getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4"), null ); } @@ -118,7 +118,7 @@ public void testAlterTableChangePosNotUpdateStats() throws MetaException, Invali RawStore msdb = Mockito.mock(RawStore.class); Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics( - getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4")); + getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4"), null); HiveAlterHandler handler = new HiveAlterHandler(); handler.setConf(conf); handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, null, conf, null); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index 1f7f69a86a..2157310766 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -282,7 +282,7 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO Assert.assertEquals("new" + TABLE1, tables.get(0)); // Verify fields were altered during the alterTable operation - Table alteredTable = objectStore.getTable(DEFAULT_CATALOG_NAME, DB1, "new" + TABLE1); + Table alteredTable = objectStore.getTable(DEFAULT_CATALOG_NAME, DB1, "new" + TABLE1, null); Assert.assertEquals("Owner of table was not altered", newTbl1.getOwner(), alteredTable.getOwner()); Assert.assertEquals("Owner type of table was not altered", newTbl1.getOwnerType(), alteredTable.getOwnerType()); @@ -370,19 +370,19 @@ public void testPartitionOps() throws MetaException, InvalidObjectException, objectStore.addPartition(part2); Deadline.startTimer("getPartition"); - List partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10); + List partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10, null); Assert.assertEquals(2, partitions.size()); Assert.assertEquals(111, partitions.get(0).getCreateTime()); Assert.assertEquals(222, partitions.get(1).getCreateTime()); - int numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, ""); + int numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, "", null); Assert.assertEquals(partitions.size(), numPartitions); - numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, "country = \"US\""); + numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, "country = \"US\"", null); Assert.assertEquals(2, numPartitions); objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1, value1); - partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10); + partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10, null); Assert.assertEquals(1, partitions.size()); Assert.assertEquals(222, partitions.get(0).getCreateTime()); @@ -788,7 +788,7 @@ private static void dropAllStoreObjects(RawStore store) List tbls = store.getAllTables(DEFAULT_CATALOG_NAME, db); for (String tbl : tbls) { Deadline.startTimer("getPartition"); - List parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100); + List parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100, null); for (Partition part : parts) { store.dropPartition(DEFAULT_CATALOG_NAME, db, tbl, part.getValues()); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java index 27c5bba5f7..669213b727 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java @@ -199,7 +199,7 @@ public void checkStats(AggrStats aggrStats) throws Exception { partNames.add("ds=" + i); } AggrStats aggrStats = store.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tableName, partNames, - Arrays.asList("col1")); + Arrays.asList("col1"), null); statChecker.checkStats(aggrStats); } @@ -218,7 +218,7 @@ private static void dropAllStoreObjects(RawStore store) throws MetaException, String db = dbs.get(i); List tbls = store.getAllTables(DEFAULT_CATALOG_NAME, db); for (String tbl : tbls) { - List parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100); + List parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100, null); for (Partition part : parts) { store.dropPartition(DEFAULT_CATALOG_NAME, db, tbl, part.getValues()); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java index c9a6a471cb..9339309553 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java @@ -52,7 +52,7 @@ public VerifyingObjectStore() { @Override public List getPartitionsByFilter(String catName, String dbName, String tblName, - String filter, short maxParts) + String filter, short maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException { List sqlResults = getPartitionsByFilterInternal( catName, dbName, tblName, filter, maxParts, true, false); @@ -64,7 +64,7 @@ public VerifyingObjectStore() { @Override public List getPartitionsByNames(String catName, String dbName, String tblName, - List partNames) throws MetaException, NoSuchObjectException { + List partNames, String validWriteIdList) throws MetaException, NoSuchObjectException { List sqlResults = getPartitionsByNamesInternal( catName, dbName, tblName, partNames, true, false); List ormResults = getPartitionsByNamesInternal( @@ -75,7 +75,7 @@ public VerifyingObjectStore() { @Override public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, - String defaultPartitionName, short maxParts, List result) throws TException { + String defaultPartitionName, short maxParts, List result, String validWriteIdList) throws TException { List ormParts = new LinkedList<>(); boolean sqlResult = getPartitionsByExprInternal( catName, dbName, tblName, expr, defaultPartitionName, maxParts, result, true, false); @@ -92,7 +92,7 @@ public boolean getPartitionsByExpr(String catName, String dbName, String tblName @Override public List getPartitions( - String catName, String dbName, String tableName, int maxParts) throws MetaException, NoSuchObjectException { + String catName, String dbName, String tableName, int maxParts, String validWriteIdList) throws MetaException, NoSuchObjectException { List sqlResults = getPartitionsInternal(catName, dbName, tableName, maxParts, true, false); List ormResults = getPartitionsInternal(catName, dbName, tableName, maxParts, false, true); verifyLists(sqlResults, ormResults, Partition.class); @@ -101,7 +101,7 @@ public boolean getPartitionsByExpr(String catName, String dbName, String tblName @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, - String tableName, List colNames) throws MetaException, NoSuchObjectException { + String tableName, List colNames, String validWriteIdList) throws MetaException, NoSuchObjectException { ColumnStatistics sqlResult = getTableColumnStatisticsInternal( catName, dbName, tableName, colNames, true, false); ColumnStatistics jdoResult = getTableColumnStatisticsInternal( @@ -112,7 +112,7 @@ public ColumnStatistics getTableColumnStatistics(String catName, String dbName, @Override public List getPartitionColumnStatistics(String catName, String dbName, - String tableName, List partNames, List colNames) + String tableName, List partNames, List colNames, String validWriteIdList) throws MetaException, NoSuchObjectException { List sqlResult = getPartitionColumnStatisticsInternal( catName, dbName, tableName, partNames, colNames, true, false); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java index 420369d792..ce84c0f506 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java @@ -19,6 +19,7 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.BitSet; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -28,6 +29,9 @@ import java.util.concurrent.ThreadFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog; import org.apache.hadoop.hive.metastore.Deadline; import org.apache.hadoop.hive.metastore.HiveMetaStore; @@ -164,7 +168,7 @@ ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); List allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); Assert.assertEquals(2, allDatabases.size()); Assert.assertTrue(allDatabases.contains(db1.getName())); @@ -179,19 +183,23 @@ Assert.assertTrue(db2Tables.contains(db2Ptbl1.getTableName())); // cs_db1_ptntbl1 List db1Ptbl1Partitions = - cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db1.getName(), db1Ptbl1.getTableName(), -1); + cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db1.getName(), db1Ptbl1.getTableName(), -1, + fromCache(db1.getName(), db1Ptbl1.getTableName()).toString()); Assert.assertEquals(25, db1Ptbl1Partitions.size()); Deadline.startTimer(""); List db1Ptbl1PartitionsOS = - objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db1Ptbl1.getTableName(), -1); + objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db1Ptbl1.getTableName(), -1, + fromCache(db2.getName(), db1Ptbl1.getTableName()).toString()); Assert.assertTrue(db1Ptbl1Partitions.containsAll(db1Ptbl1PartitionsOS)); // cs_db2_ptntbl1 List db2Ptbl1Partitions = - cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db2Ptbl1.getTableName(), -1); + cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db2Ptbl1.getTableName(), -1, + fromCache(db2.getName(), db2Ptbl1.getTableName()).toString()); Assert.assertEquals(25, db2Ptbl1Partitions.size()); Deadline.startTimer(""); List db2Ptbl1PartitionsOS = - objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db2Ptbl1.getTableName(), -1); + objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db2Ptbl1.getTableName(), -1, + fromCache(db2.getName(), db2Ptbl1.getTableName()).toString()); Assert.assertTrue(db2Ptbl1Partitions.containsAll(db2Ptbl1PartitionsOS)); cachedStore.shutdown(); } @@ -209,7 +217,7 @@ ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); SharedCache sharedCache = CachedStore.getSharedCache(); // cachedStore.getAllTables falls back to objectStore when whitelist/blacklist is set List db1Tables = sharedCache.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); @@ -232,7 +240,7 @@ ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); SharedCache sharedCache = CachedStore.getSharedCache(); // cachedStore.getAllTables falls back to objectStore when whitelist/blacklist is set List db1Tables = sharedCache.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); @@ -256,7 +264,7 @@ public void testPrewarmMemoryEstimation() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); SharedCache sharedCache = CachedStore.getSharedCache(); List db1Tables = sharedCache.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(2, db1Tables.size()); @@ -265,88 +273,6 @@ public void testPrewarmMemoryEstimation() throws Exception { cachedStore.shutdown(); } - @Test public void testCacheUpdate() throws Exception { - Configuration conf = MetastoreConf.newMetastoreConf(); - MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); - MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); - MetaStoreTestUtils.setConfForStandloneMode(conf); - CachedStore cachedStore = new CachedStore(); - CachedStore.clearSharedCache(); - cachedStore.setConfForTest(conf); - ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); - // Prewarm CachedStore - CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); - // Drop basedb1's unpartitioned table - objectStore.dropTable(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName()); - Deadline.startTimer(""); - // Drop a partitions of basedb1's partitioned table - objectStore.dropPartitions(DEFAULT_CATALOG_NAME, db1Ptbl1.getDbName(), db1Ptbl1.getTableName(), db1Ptbl1PtnNames); - // Update SharedCache - updateCache(cachedStore); - List allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); - Assert.assertEquals(2, allDatabases.size()); - Assert.assertTrue(allDatabases.contains(db1.getName())); - Assert.assertTrue(allDatabases.contains(db2.getName())); - // cs_db1_ptntbl1 - List db1Tbls = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName()); - Assert.assertEquals(1, db1Tbls.size()); - Assert.assertTrue(db1Tbls.contains(db1Ptbl1.getTableName())); - List db1Ptns = - cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db1.getName(), db1Ptbl1.getTableName(), -1); - Assert.assertEquals(0, db1Ptns.size()); - // cs_db2_ptntbl1 - List db2Tbls = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName()); - Assert.assertEquals(2, db2Tbls.size()); - Assert.assertTrue(db2Tbls.contains(db2Utbl1.getTableName())); - Assert.assertTrue(db2Tbls.contains(db2Ptbl1.getTableName())); - List db2Ptns = - cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db2Ptbl1.getTableName(), -1); - Assert.assertEquals(25, db2Ptns.size()); - Deadline.startTimer(""); - List db2PtnsOS = - objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db2Ptbl1.getTableName(), -1); - Assert.assertTrue(db2Ptns.containsAll(db2PtnsOS)); - // Create a new unpartitioned table under basedb1 - Table db1Utbl2 = createUnpartitionedTableObject(db1); - db1Utbl2.setTableName(db1.getName() + "_unptntbl2"); - objectStore.createTable(db1Utbl2); - // Add a new partition to db1PartitionedTable - // Create partitions for cs_db1's partitioned table - db1Ptbl1Ptns = createPartitionObjects(db1Ptbl1).getPartitions(); - Deadline.startTimer(""); - objectStore.addPartition(db1Ptbl1Ptns.get(0)); - objectStore.addPartition(db1Ptbl1Ptns.get(1)); - objectStore.addPartition(db1Ptbl1Ptns.get(2)); - objectStore.addPartition(db1Ptbl1Ptns.get(3)); - objectStore.addPartition(db1Ptbl1Ptns.get(4)); - updateCache(cachedStore); - allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); - Assert.assertEquals(2, allDatabases.size()); - Assert.assertTrue(allDatabases.contains(db1.getName())); - Assert.assertTrue(allDatabases.contains(db2.getName())); - db1Tbls = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName()); - Assert.assertEquals(2, db1Tbls.size()); - Assert.assertTrue(db1Tbls.contains(db1Ptbl1.getTableName())); - Assert.assertTrue(db1Tbls.contains(db1Utbl2.getTableName())); - db2Tbls = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName()); - Assert.assertEquals(2, db2Tbls.size()); - Assert.assertTrue(db2Tbls.contains(db2Utbl1.getTableName())); - Assert.assertTrue(db2Tbls.contains(db2Ptbl1.getTableName())); - // cs_db1_ptntbl1 - db1Ptns = cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db1.getName(), db1Ptbl1.getTableName(), -1); - Assert.assertEquals(5, db1Ptns.size()); - // cs_db2_ptntbl1 - db2Ptns = cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db2Ptbl1.getTableName(), -1); - Assert.assertEquals(25, db2Ptns.size()); - Deadline.startTimer(""); - db2PtnsOS = objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), db2Ptbl1.getTableName(), -1); - Assert.assertTrue(db2Ptns.containsAll(db2PtnsOS)); - // Clean up - objectStore.dropTable(DEFAULT_CATALOG_NAME, db1Utbl2.getDbName(), db1Utbl2.getTableName()); - cachedStore.shutdown(); - } - @Test public void testCreateAndGetDatabase() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); @@ -364,7 +290,7 @@ public void testPrewarmMemoryEstimation() throws Exception { db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Read database via CachedStore Database dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); Assert.assertEquals(db, dbRead); @@ -403,7 +329,7 @@ public void testPrewarmMemoryEstimation() throws Exception { db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Read database via CachedStore Database dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); Assert.assertEquals(db, dbRead); @@ -424,11 +350,7 @@ public void testPrewarmMemoryEstimation() throws Exception { Assert.assertEquals(localDb1, dbRead); allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); Assert.assertEquals(3, allDatabases.size()); - objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName1); - updateCache(cachedStore); - updateCache(cachedStore); - allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); - Assert.assertEquals(2, allDatabases.size()); + cachedStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName1); cachedStore.shutdown(); } @@ -443,7 +365,7 @@ public void testPrewarmMemoryEstimation() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Read database via CachedStore List allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); Assert.assertEquals(2, allDatabases.size()); @@ -457,17 +379,6 @@ public void testPrewarmMemoryEstimation() throws Exception { // Read db via ObjectStore Database dbRead = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); Assert.assertEquals(db, dbRead); - // Alter db via ObjectStore - dbOwner = "user3"; - db = new Database(db1); - db.setOwnerName(dbOwner); - objectStore.alterDatabase(DEFAULT_CATALOG_NAME, dbName, db); - db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); - updateCache(cachedStore); - updateCache(cachedStore); - // Read db via CachedStore - dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); - Assert.assertEquals(db, dbRead); cachedStore.shutdown(); } @@ -482,7 +393,7 @@ public void testPrewarmMemoryEstimation() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Read database via CachedStore List allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); Assert.assertEquals(2, allDatabases.size()); @@ -497,23 +408,14 @@ public void testPrewarmMemoryEstimation() throws Exception { cachedStore.createTable(db1Utbl2); db1Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(3, db1Tables.size()); - db1Utbl2 = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl2.getDbName(), db1Utbl2.getTableName()); - Table tblRead = objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl2.getDbName(), db1Utbl2.getTableName()); + db1Utbl2 = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl2.getDbName(), db1Utbl2.getTableName(), + fromCache(db1Utbl2.getDbName(), db1Utbl2.getTableName()).toString()); + Table tblRead = objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl2.getDbName(), db1Utbl2.getTableName(), + fromCache(db1Utbl2.getDbName(), db1Utbl2.getTableName()).toString()); Assert.assertEquals(db1Utbl2, tblRead); - // Create a new unpartitioned table under basedb2 via ObjectStore - Table db2Utbl2 = createUnpartitionedTableObject(db2); - db2Utbl2.setTableName(db2.getName() + "_unptntbl2"); - objectStore.createTable(db2Utbl2); - db2Utbl2 = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl2.getDbName(), db2Utbl2.getTableName()); - updateCache(cachedStore); - db2Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName()); - Assert.assertEquals(3, db2Tables.size()); - tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl2.getDbName(), db2Utbl2.getTableName()); - Assert.assertEquals(db2Utbl2, tblRead); - // Clean up objectStore.dropTable(DEFAULT_CATALOG_NAME, db1Utbl2.getDbName(), db1Utbl2.getTableName()); - db1Utbl2 = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl2.getDbName(), db1Utbl2.getTableName()); - objectStore.dropTable(DEFAULT_CATALOG_NAME, db2Utbl2.getDbName(), db2Utbl2.getTableName()); + db1Utbl2 = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl2.getDbName(), db1Utbl2.getTableName(), + fromCache(db1Utbl2.getDbName(), db1Utbl2.getTableName()).toString()); cachedStore.shutdown(); } @@ -530,7 +432,7 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); SharedCache sharedCache = CachedStore.getSharedCache(); List db1Tables = sharedCache.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(2, db1Tables.size()); @@ -555,7 +457,7 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); SharedCache sharedCache = CachedStore.getSharedCache(); // cachedStore.getAllTables falls back to objectStore when whitelist/blacklist is set List db1Tables = sharedCache.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); @@ -580,7 +482,7 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); SharedCache sharedCache = CachedStore.getSharedCache(); // cachedStore.getAllTables falls back to objectStore when whitelist/blacklist is set List db1Tables = sharedCache.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); @@ -603,7 +505,7 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); List db1Tables = cachedStore.getTables(DEFAULT_CATALOG_NAME, db1.getName(), "cs_db1.*"); Assert.assertEquals(2, db1Tables.size()); db1Tables = cachedStore.getTables(DEFAULT_CATALOG_NAME, db1.getName(), "cs_db1.un*"); @@ -628,35 +530,26 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); List db1Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(2, db1Tables.size()); List db2Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName()); Assert.assertEquals(2, db2Tables.size()); // Alter table db1Utbl1 via CachedStore and read via ObjectStore - Table db1Utbl1Read = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName()); + Table db1Utbl1Read = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName(), + fromCache(db1Utbl1.getDbName(), db1Utbl1.getTableName()).toString()); String newOwner = "newOwner"; Table db1Utbl1ReadAlt = new Table(db1Utbl1Read); db1Utbl1ReadAlt.setOwner(newOwner); cachedStore .alterTable(DEFAULT_CATALOG_NAME, db1Utbl1Read.getDbName(), db1Utbl1Read.getTableName(), db1Utbl1ReadAlt, "0"); db1Utbl1Read = - cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1ReadAlt.getDbName(), db1Utbl1ReadAlt.getTableName()); + cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1ReadAlt.getDbName(), db1Utbl1ReadAlt.getTableName(), + fromCache(db1Utbl1ReadAlt.getDbName(), db1Utbl1ReadAlt.getTableName()).toString()); Table db1Utbl1ReadOS = - objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1ReadAlt.getDbName(), db1Utbl1ReadAlt.getTableName()); + objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1ReadAlt.getDbName(), db1Utbl1ReadAlt.getTableName(), + fromCache(db1Utbl1ReadAlt.getDbName(), db1Utbl1ReadAlt.getTableName()).toString()); Assert.assertEquals(db1Utbl1Read, db1Utbl1ReadOS); - // Alter table db2Utbl1 via ObjectStore and read via CachedStore - Table db2Utbl1Read = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName()); - Table db2Utbl1ReadAlt = new Table(db2Utbl1Read); - db2Utbl1ReadAlt.setOwner(newOwner); - objectStore - .alterTable(DEFAULT_CATALOG_NAME, db2Utbl1Read.getDbName(), db2Utbl1Read.getTableName(), db2Utbl1ReadAlt, "0"); - updateCache(cachedStore); - db2Utbl1Read = - objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1ReadAlt.getDbName(), db2Utbl1ReadAlt.getTableName()); - Table d21Utbl1ReadCS = - cachedStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1ReadAlt.getDbName(), db2Utbl1ReadAlt.getTableName()); - Assert.assertEquals(db2Utbl1Read, d21Utbl1ReadCS); cachedStore.shutdown(); } @@ -671,65 +564,21 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); List db1Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(2, db1Tables.size()); List db2Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName()); Assert.assertEquals(2, db2Tables.size()); // Drop table db1Utbl1 via CachedStore and read via ObjectStore - Table db1Utbl1Read = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName()); + Table db1Utbl1Read = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName(), + fromCache(db1Utbl1.getDbName(), db1Utbl1.getTableName()).toString()); cachedStore.dropTable(DEFAULT_CATALOG_NAME, db1Utbl1Read.getDbName(), db1Utbl1Read.getTableName()); db1Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(1, db1Tables.size()); Table db1Utbl1ReadOS = - objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1Read.getDbName(), db1Utbl1Read.getTableName()); + objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1Read.getDbName(), db1Utbl1Read.getTableName(), + fromCache(db1Utbl1Read.getDbName(), db1Utbl1Read.getTableName()).toString()); Assert.assertNull(db1Utbl1ReadOS); - // Drop table db2Utbl1 via ObjectStore and read via CachedStore - Table db2Utbl1Read = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName()); - objectStore.dropTable(DEFAULT_CATALOG_NAME, db2Utbl1Read.getDbName(), db2Utbl1Read.getTableName()); - db2Tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName()); - Assert.assertEquals(1, db2Tables.size()); - updateCache(cachedStore); - db2Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName()); - Assert.assertEquals(1, db2Tables.size()); - Table db2Utbl1ReadCS = - cachedStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1Read.getDbName(), db2Utbl1Read.getTableName()); - Assert.assertNull(db2Utbl1ReadCS); - cachedStore.shutdown(); - } - - /********************************************************************************************** - * Methods that test SharedCache - * @throws MetaException - * @throws NoSuchObjectException - *********************************************************************************************/ - - @Test public void testSharedStoreDb() throws NoSuchObjectException, MetaException { - Configuration conf = MetastoreConf.newMetastoreConf(); - MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); - MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); - MetaStoreTestUtils.setConfForStandloneMode(conf); - CachedStore cachedStore = new CachedStore(); - CachedStore.clearSharedCache(); - cachedStore.setConfForTest(conf); - SharedCache sharedCache = CachedStore.getSharedCache(); - - Database localDb1 = createDatabaseObject("db1", "user1"); - Database localDb2 = createDatabaseObject("db2", "user1"); - Database localDb3 = createDatabaseObject("db3", "user1"); - Database newDb1 = createDatabaseObject("newdb1", "user1"); - sharedCache.addDatabaseToCache(localDb1); - sharedCache.addDatabaseToCache(localDb2); - sharedCache.addDatabaseToCache(localDb3); - Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 3); - sharedCache.alterDatabaseInCache(DEFAULT_CATALOG_NAME, "db1", newDb1); - Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 3); - sharedCache.removeDatabaseFromCache(DEFAULT_CATALOG_NAME, "db2"); - Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 2); - List dbs = sharedCache.listCachedDatabases(DEFAULT_CATALOG_NAME); - Assert.assertEquals(dbs.size(), 2); - Assert.assertTrue(dbs.contains("newdb1")); - Assert.assertTrue(dbs.contains("db3")); cachedStore.shutdown(); } @@ -793,15 +642,15 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { newTbl1.setSd(newSd1); newTbl1.setPartitionKeys(new ArrayList<>()); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl1", tbl1); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl2", tbl2); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl3", tbl3); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db2", "tbl1", tbl1); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl1", tbl1, null); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl2", tbl2, null); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl3", tbl3, null); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db2", "tbl1", tbl1, null); Assert.assertEquals(sharedCache.getCachedTableCount(), 4); Assert.assertEquals(sharedCache.getSdCache().size(), 2); - Table t = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1"); + Table t = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1", fromCache("db1", "tbl1")); Assert.assertEquals(t.getSd().getLocation(), "loc1"); sharedCache.removeTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1"); @@ -831,8 +680,6 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { String tbl1Name = "tbl1"; String tbl2Name = "tbl2"; String owner = "user1"; - Database db = createDatabaseObject(dbName, owner); - sharedCache.addDatabaseToCache(db); FieldSchema col1 = new FieldSchema("col1", "int", "integer column"); FieldSchema col2 = new FieldSchema("col2", "string", "string column"); List cols = new ArrayList(); @@ -840,9 +687,9 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { cols.add(col2); List ptnCols = new ArrayList(); Table tbl1 = createTestTbl(dbName, tbl1Name, owner, cols, ptnCols); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, tbl1); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, tbl1, null); Table tbl2 = createTestTbl(dbName, tbl2Name, owner, cols, ptnCols); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, tbl2); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, tbl2, null); Partition part1 = new Partition(); StorageDescriptor sd1 = new StorageDescriptor(); @@ -979,9 +826,11 @@ public void testAggrStatsRepeatedRead() throws Exception { List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); - AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames, + fromCache(dbName, tblName).toString()); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); - aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames, + fromCache(dbName, tblName).toString()); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); objectStore.deletePartitionColumnStatistics(DEFAULT_CATALOG_NAME, db.getName(), tbl.getTableName(), @@ -1064,10 +913,12 @@ public void testPartitionAggrStats() throws Exception { List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); - AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames, + fromCache(dbName, tblName).toString()); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40); - aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames, + fromCache(dbName, tblName).toString()); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40); cachedStore.shutdown(); @@ -1157,10 +1008,12 @@ public void testPartitionAggrStatsBitVector() throws Exception { List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); - AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames, + fromCache(dbName, tblName).toString()); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5); - aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames, + fromCache(dbName, tblName).toString()); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5); cachedStore.shutdown(); @@ -1186,24 +1039,6 @@ public void testPartitionAggrStatsBitVector() throws Exception { } }); - // Create 5 dbs - for (String dbName : dbNames) { - Callable c = new Callable() { - public Object call() { - Database db = createDatabaseObject(dbName, "user1"); - sharedCache.addDatabaseToCache(db); - return null; - } - }; - tasks.add(c); - } - executor.invokeAll(tasks); - for (String dbName : dbNames) { - Database db = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName); - Assert.assertNotNull(db); - Assert.assertEquals(dbName, db.getName()); - } - // Created 5 tables under "db1" List tblNames = new ArrayList(Arrays.asList("tbl1", "tbl2", "tbl3", "tbl4", "tbl5")); tasks.clear(); @@ -1219,7 +1054,7 @@ public Object call() { Callable c = new Callable() { public Object call() { Table tbl = createTestTbl(dbNames.get(0), tblName, "user1", cols, ptnCols); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, tbl); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, tbl, null); return null; } }; @@ -1227,7 +1062,8 @@ public Object call() { } executor.invokeAll(tasks); for (String tblName : tblNames) { - Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName); + Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, + fromCache(dbNames.get(0), tblName)); Assert.assertNotNull(tbl); Assert.assertEquals(tblName, tbl.getTableName()); } @@ -1236,7 +1072,8 @@ public Object call() { List ptnVals = new ArrayList(Arrays.asList("aaa", "bbb", "ccc", "ddd", "eee")); tasks.clear(); for (String tblName : tblNames) { - Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName); + Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, + fromCache(dbNames.get(0), tblName)); for (String ptnVal : ptnVals) { Map partParams = new HashMap(); Callable c = new Callable() { @@ -1279,7 +1116,8 @@ public Object call() { } } for (String tblName : addPtnTblNames) { - Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName); + Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, + fromCache(dbNames.get(0), tblName)); for (String ptnVal : newPtnVals) { Map partParams = new HashMap(); Callable c = new Callable() { @@ -1396,9 +1234,8 @@ public Object call() { sharedCache.setTableSizeMap(tableSizeMap); sharedCache.initialize(conf); - sharedCache.addDatabaseToCache(db); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, tbl1); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, tbl2); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, tbl1, null); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, tbl2, null); sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part1); sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part2); @@ -1445,7 +1282,7 @@ public Object call() { // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); List db1Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(2, db1Tables.size()); @@ -1479,22 +1316,20 @@ public Object call() { tableSizeMap.put(db1Ptbl1TblKey, 4000); tableSizeMap.put(db2Utbl1TblKey, 4000); tableSizeMap.put(db2Ptbl1TblKey, 4000); - Table tblDb1Utbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName()); - Table tblDb1Ptbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db1Ptbl1.getDbName(), db1Ptbl1.getTableName()); - Table tblDb2Utbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName()); - Table tblDb2Ptbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Ptbl1.getDbName(), db2Ptbl1.getTableName()); + Table tblDb1Utbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName(), null); + Table tblDb1Ptbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db1Ptbl1.getDbName(), db1Ptbl1.getTableName(), null); + Table tblDb2Utbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName(), null); + Table tblDb2Ptbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Ptbl1.getDbName(), db2Ptbl1.getTableName(), null); SharedCache sc = cachedStore.getSharedCache(); sc.setConcurrencyLevel(1); sc.setTableSizeMap(tableSizeMap); sc.initialize(conf); - sc.addDatabaseToCache(db1); - sc.addDatabaseToCache(db2); - sc.addTableToCache(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName(), tblDb1Utbl1); - sc.addTableToCache(DEFAULT_CATALOG_NAME, db1Ptbl1.getDbName(), db1Ptbl1.getTableName(), tblDb1Ptbl1); - sc.addTableToCache(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName(), tblDb2Utbl1); - sc.addTableToCache(DEFAULT_CATALOG_NAME, db2Ptbl1.getDbName(), db2Ptbl1.getTableName(), tblDb2Ptbl1); + sc.addTableToCache(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName(), tblDb1Utbl1, null); + sc.addTableToCache(DEFAULT_CATALOG_NAME, db1Ptbl1.getDbName(), db1Ptbl1.getTableName(), tblDb1Ptbl1, null); + sc.addTableToCache(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName(), tblDb2Utbl1, null); + sc.addTableToCache(DEFAULT_CATALOG_NAME, db2Ptbl1.getDbName(), db2Ptbl1.getTableName(), tblDb2Ptbl1, null); List db1Tables = sc.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(0, db1Tables.size()); @@ -1781,18 +1616,8 @@ private PartitionObjectsAndNames createPartitionObjects(Table table) { } } - // This method will return only after the cache has updated once - private void updateCache(CachedStore cachedStore) throws Exception { - int maxTries = 100; - long updateCountBefore = cachedStore.getCacheUpdateCount(); - // Start the CachedStore update service - CachedStore.startCacheUpdateService(cachedStore.getConf(), true, false); - while ((cachedStore.getCacheUpdateCount() != (updateCountBefore + 1)) && (maxTries-- > 0)) { - Thread.sleep(1000); - } - if (maxTries <= 0) { - throw new Exception("Unable to update SharedCache in 100 attempts; possibly some bug"); - } - CachedStore.stopCacheUpdateService(100); + public static ValidWriteIdList fromCache(String dbName, String tableName) { + String fullTableName = TableName.getDbTable(dbName, tableName); + return new ValidReaderWriteIdList(fullTableName, new long[0], new BitSet(), Long.MAX_VALUE); } } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java deleted file mode 100644 index 423dce8a68..0000000000 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.metastore.cache; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.metastore.HiveMetaStore; -import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; -import org.apache.hadoop.hive.metastore.ObjectStore; -import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; -import org.apache.hadoop.hive.metastore.api.Catalog; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import java.util.Comparator; -import java.util.List; - -/** - * Tests that catalogs are properly cached. - */ -@Category(MetastoreCheckinTest.class) -public class TestCatalogCaching { - private static final String CAT1_NAME = "cat1"; - private static final String CAT2_NAME = "cat2"; - - private ObjectStore objectStore; - private Configuration conf; - private CachedStore cachedStore; - - @Before - public void createObjectStore() throws MetaException, InvalidOperationException { - conf = MetastoreConf.newMetastoreConf(); - MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); - MetaStoreTestUtils.setConfForStandloneMode(conf); - objectStore = new ObjectStore(); - objectStore.setConf(conf); - - // Create three catalogs - HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf)); - - Catalog cat1 = new CatalogBuilder() - .setName(CAT1_NAME) - .setLocation("/tmp/cat1") - .build(); - objectStore.createCatalog(cat1); - Catalog cat2 = new CatalogBuilder() - .setName(CAT2_NAME) - .setLocation("/tmp/cat2") - .build(); - objectStore.createCatalog(cat2); - } - - @After - public void clearCatalogCache() throws MetaException, NoSuchObjectException { - List catalogs = objectStore.getCatalogs(); - for (String catalog : catalogs) objectStore.dropCatalog(catalog); - } - - @Test - public void defaultHiveOnly() throws Exception { - // By default just the Hive catalog should be cached. - cachedStore = new CachedStore(); - cachedStore.setConf(conf); - CachedStore.stopCacheUpdateService(1); - cachedStore.resetCatalogCache(); - - CachedStore.prewarm(objectStore); - - // Only the hive catalog should be cached - List cachedCatalogs = cachedStore.getCatalogs(); - Assert.assertEquals(1, cachedCatalogs.size()); - Assert.assertEquals(Warehouse.DEFAULT_CATALOG_NAME, cachedCatalogs.get(0)); - } - - @Test - public void cacheAll() throws Exception { - // Set the config value to empty string, which should result in all catalogs being cached. - Configuration newConf = new Configuration(conf); - MetastoreConf.setVar(newConf, MetastoreConf.ConfVars.CATALOGS_TO_CACHE, ""); - cachedStore = new CachedStore(); - cachedStore.setConf(newConf); - CachedStore.stopCacheUpdateService(1); - objectStore.setConf(newConf); // have to override it with the new conf since this is where - // prewarm gets the conf object - cachedStore.resetCatalogCache(); - - CachedStore.prewarm(objectStore); - - // All the catalogs should be cached - List cachedCatalogs = cachedStore.getCatalogs(); - Assert.assertEquals(3, cachedCatalogs.size()); - cachedCatalogs.sort(Comparator.naturalOrder()); - Assert.assertEquals(CAT1_NAME, cachedCatalogs.get(0)); - Assert.assertEquals(CAT2_NAME, cachedCatalogs.get(1)); - Assert.assertEquals(Warehouse.DEFAULT_CATALOG_NAME, cachedCatalogs.get(2)); - } - - @Test - public void cacheSome() throws Exception { - // Set the config value to 2 catalogs other than hive - Configuration newConf = new Configuration(conf); - MetastoreConf.setVar(newConf, MetastoreConf.ConfVars.CATALOGS_TO_CACHE, CAT1_NAME + "," + CAT2_NAME); - cachedStore = new CachedStore(); - cachedStore.setConf(newConf); - CachedStore.stopCacheUpdateService(1); - objectStore.setConf(newConf); // have to override it with the new conf since this is where - // prewarm gets the conf object - cachedStore.resetCatalogCache(); - - CachedStore.prewarm(objectStore); - - // All the catalogs should be cached - List cachedCatalogs = cachedStore.getCatalogs(); - Assert.assertEquals(2, cachedCatalogs.size()); - cachedCatalogs.sort(Comparator.naturalOrder()); - Assert.assertEquals(CAT1_NAME, cachedCatalogs.get(0)); - Assert.assertEquals(CAT2_NAME, cachedCatalogs.get(1)); - } -} diff --git a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/BenchmarkTool.java b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/BenchmarkTool.java index 041cd76234..a89e49c503 100644 --- a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/BenchmarkTool.java +++ b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/BenchmarkTool.java @@ -214,7 +214,7 @@ public void run() { client.createDatabase(dbName); } - if (client.tableExists(dbName, tableName)) { + if (client.tableExists(dbName, tableName, null)) { client.dropTable(dbName, tableName); } diff --git a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java index f53f2ef43b..6c636bb29b 100644 --- a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java +++ b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java @@ -69,7 +69,7 @@ static DescriptiveStatistics benchmarkListAllTables(@NotNull MicroBenchmark benc String dbName = data.dbName; return benchmark.measure(() -> - throwingSupplierWrapper(() -> client.getAllTables(dbName, null))); + throwingSupplierWrapper(() -> client.getAllTables(dbName, null, null))); } static DescriptiveStatistics benchmarkTableCreate(@NotNull MicroBenchmark bench, @@ -131,7 +131,7 @@ static DescriptiveStatistics benchmarkGetTable(@NotNull MicroBenchmark bench, createPartitionedTable(client, dbName, tableName); try { return bench.measure(() -> - throwingSupplierWrapper(() -> client.getTable(dbName, tableName))); + throwingSupplierWrapper(() -> client.getTable(dbName, tableName, null))); } finally { throwingSupplierWrapper(() -> client.dropTable(dbName, tableName)); } @@ -148,7 +148,7 @@ static DescriptiveStatistics benchmarkListTables(@NotNull MicroBenchmark bench, try { createManyTables(client, count, dbName, format); return bench.measure(() -> - throwingSupplierWrapper(() -> client.getAllTables(dbName, null))); + throwingSupplierWrapper(() -> client.getAllTables(dbName, null, null))); } finally { dropManyTables(client, count, dbName, format); } @@ -163,7 +163,7 @@ static DescriptiveStatistics benchmarkCreatePartition(@NotNull MicroBenchmark be createPartitionedTable(client, dbName, tableName); final List values = Collections.singletonList("d1"); try { - Table t = client.getTable(dbName, tableName); + Table t = client.getTable(dbName, tableName, null); Partition partition = new Util.PartitionBuilder(t) .withValues(values) .build(); @@ -191,7 +191,7 @@ static DescriptiveStatistics benchmarkListPartition(@NotNull MicroBenchmark benc Collections.singletonList("d"), 1); return bench.measure(() -> - throwingSupplierWrapper(() -> client.listPartitions(dbName, tableName))); + throwingSupplierWrapper(() -> client.listPartitions(dbName, tableName, null))); } catch (TException e) { e.printStackTrace(); return new DescriptiveStatistics(); @@ -213,7 +213,7 @@ static DescriptiveStatistics benchmarkListManyPartitions(@NotNull MicroBenchmark LOG.debug("Created {} partitions", howMany); LOG.debug("started benchmark... "); return bench.measure(() -> - throwingSupplierWrapper(() -> client.listPartitions(dbName, tableName))); + throwingSupplierWrapper(() -> client.listPartitions(dbName, tableName, null))); } catch (TException e) { e.printStackTrace(); return new DescriptiveStatistics(); @@ -235,7 +235,7 @@ static DescriptiveStatistics benchmarkGetPartitions(@NotNull MicroBenchmark benc LOG.debug("Created {} partitions", howMany); LOG.debug("started benchmark... "); return bench.measure(() -> - throwingSupplierWrapper(() -> client.getPartitions(dbName, tableName))); + throwingSupplierWrapper(() -> client.getPartitions(dbName, tableName, null))); } catch (TException e) { e.printStackTrace(); return new DescriptiveStatistics(); @@ -253,7 +253,7 @@ static DescriptiveStatistics benchmarkDropPartition(@NotNull MicroBenchmark benc createPartitionedTable(client, dbName, tableName); final List values = Collections.singletonList("d1"); try { - Table t = client.getTable(dbName, tableName); + Table t = client.getTable(dbName, tableName, null); Partition partition = new Util.PartitionBuilder(t) .withValues(values) .build(); @@ -324,7 +324,7 @@ static DescriptiveStatistics benchmarkGetPartitionNames(@NotNull MicroBenchmark addManyPartitionsNoException(client, dbName, tableName, null, Collections.singletonList("d"), count); return bench.measure( - () -> throwingSupplierWrapper(() -> client.getPartitionNames(dbName, tableName)) + () -> throwingSupplierWrapper(() -> client.getPartitionNames(dbName, tableName, null)) ); } finally { throwingSupplierWrapper(() -> client.dropTable(dbName, tableName)); @@ -343,11 +343,11 @@ static DescriptiveStatistics benchmarkGetPartitionsByName(@NotNull MicroBenchmar addManyPartitionsNoException(client, dbName, tableName, null, Collections.singletonList("d"), count); List partitionNames = throwingSupplierWrapper(() -> - client.getPartitionNames(dbName, tableName)); + client.getPartitionNames(dbName, tableName, null)); return bench.measure( () -> throwingSupplierWrapper(() -> - client.getPartitionsByNames(dbName, tableName, partitionNames)) + client.getPartitionsByNames(dbName, tableName, partitionNames, null)) ); } finally { throwingSupplierWrapper(() -> client.dropTable(dbName, tableName)); @@ -365,7 +365,7 @@ static DescriptiveStatistics benchmarkRenameTable(@NotNull MicroBenchmark bench, try { addManyPartitionsNoException(client, dbName, tableName, null, Collections.singletonList("d"), count); - Table oldTable = client.getTable(dbName, tableName); + Table oldTable = client.getTable(dbName, tableName, null); oldTable.getSd().setLocation(""); Table newTable = oldTable.deepCopy(); newTable.setTableName(tableName + "_renamed"); diff --git a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java index 7cc1e42a8b..46b69884e8 100644 --- a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java +++ b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java @@ -153,8 +153,8 @@ boolean dbExists(@NotNull String dbName) throws TException { return getAllDatabases(dbName).contains(dbName); } - boolean tableExists(@NotNull String dbName, @NotNull String tableName) throws TException { - return getAllTables(dbName, tableName).contains(tableName); + boolean tableExists(@NotNull String dbName, @NotNull String tableName, @Nullable String validWriteIdList) throws TException { + return getAllTables(dbName, tableName, validWriteIdList).contains(tableName); } Database getDatabase(@NotNull String dbName) throws TException { @@ -178,7 +178,7 @@ Database getDatabase(@NotNull String dbName) throws TException { .collect(Collectors.toSet()); } - Set getAllTables(@NotNull String dbName, @Nullable String filter) throws TException { + Set getAllTables(@NotNull String dbName, @Nullable String filter, @Nullable String validWriteIdList) throws TException { if (filter == null || filter.isEmpty()) { return new HashSet<>(client.get_all_tables(dbName)); } @@ -236,8 +236,8 @@ boolean dropTable(@NotNull String dbName, @NotNull String tableName) throws TExc return true; } - Table getTable(@NotNull String dbName, @NotNull String tableName) throws TException { - return client.get_table(dbName, tableName); + Table getTable(@NotNull String dbName, @NotNull String tableName, @Nullable String validWriteIdList) throws TException { + return client.get_table(dbName, tableName, validWriteIdList); } Partition createPartition(@NotNull Table table, @NotNull List values) throws TException { @@ -254,8 +254,8 @@ void addPartitions(List partitions) throws TException { List listPartitions(@NotNull String dbName, - @NotNull String tableName) throws TException { - return client.get_partitions(dbName, tableName, (short) -1); + @NotNull String tableName, @Nullable String validWriteIdList) throws TException { + return client.get_partitions(dbName, tableName, (short) -1, validWriteIdList); } Long getCurrentNotificationId() throws TException { @@ -263,8 +263,8 @@ Long getCurrentNotificationId() throws TException { } List getPartitionNames(@NotNull String dbName, - @NotNull String tableName) throws TException { - return client.get_partition_names(dbName, tableName, (short) -1); + @NotNull String tableName, @Nullable String validWriteIdList) throws TException { + return client.get_partition_names(dbName, tableName, (short) -1, validWriteIdList); } public boolean dropPartition(@NotNull String dbName, @NotNull String tableName, @@ -273,14 +273,14 @@ public boolean dropPartition(@NotNull String dbName, @NotNull String tableName, return client.drop_partition(dbName, tableName, arguments, true); } - List getPartitions(@NotNull String dbName, @NotNull String tableName) throws TException { - return client.get_partitions(dbName, tableName, (short) -1); + List getPartitions(@NotNull String dbName, @NotNull String tableName, @Nullable String validWriteIdList) throws TException { + return client.get_partitions(dbName, tableName, (short) -1, validWriteIdList); } DropPartitionsResult dropPartitions(@NotNull String dbName, @NotNull String tableName, @Nullable List partNames) throws TException { if (partNames == null) { - return dropPartitions(dbName, tableName, getPartitionNames(dbName, tableName)); + return dropPartitions(dbName, tableName, getPartitionNames(dbName, tableName, null)); } if (partNames.isEmpty()) { return null; @@ -290,12 +290,12 @@ DropPartitionsResult dropPartitions(@NotNull String dbName, @NotNull String tabl } List getPartitionsByNames(@NotNull String dbName, @NotNull String tableName, - @Nullable List names) throws TException { + @Nullable List names, @Nullable String validWriteIdList) throws TException { if (names == null) { return client.get_partitions_by_names(dbName, tableName, - getPartitionNames(dbName, tableName)); + getPartitionNames(dbName, tableName, validWriteIdList), validWriteIdList); } - return client.get_partitions_by_names(dbName, tableName, names); + return client.get_partitions_by_names(dbName, tableName, names, validWriteIdList); } boolean alterTable(@NotNull String dbName, @NotNull String tableName, @NotNull Table newTable) diff --git a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/Util.java b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/Util.java index 101d6759c5..55ff673253 100644 --- a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/Util.java +++ b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/Util.java @@ -508,7 +508,7 @@ static Object addManyPartitions(@NotNull HMSClient client, @Nullable Map parameters, @NotNull List arguments, int npartitions) throws TException { - Table table = client.getTable(dbName, tableName); + Table table = client.getTable(dbName, tableName, null); client.addPartitions(createManyPartitions(table, parameters, arguments, npartitions)); return null; } diff --git a/standalone-metastore/metastore-tools/tools-common/src/test/java/org/apache/hadoop/hive/metastore/tools/HMSClientTest.java b/standalone-metastore/metastore-tools/tools-common/src/test/java/org/apache/hadoop/hive/metastore/tools/HMSClientTest.java index ab4b62543f..7bd8d56846 100644 --- a/standalone-metastore/metastore-tools/tools-common/src/test/java/org/apache/hadoop/hive/metastore/tools/HMSClientTest.java +++ b/standalone-metastore/metastore-tools/tools-common/src/test/java/org/apache/hadoop/hive/metastore/tools/HMSClientTest.java @@ -194,7 +194,7 @@ public void dropNonExistingDb() { public void getAllTables() throws TException { try { client.createTable(TEST_TABLE); - assertThat(client.getAllTables(TEST_DATABASE, null), Matchers.contains(TEST_TABLE_NAME)); + assertThat(client.getAllTables(TEST_DATABASE, null, null), Matchers.contains(TEST_TABLE_NAME)); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java index bc8ac0d61b..81a24341cf 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java @@ -260,5 +260,25 @@ public RangeResponse isWriteIdRangeAborted(long minWriteId, long maxWriteId) { public ValidReaderWriteIdList updateHighWatermark(long value) { return new ValidReaderWriteIdList(tableName, exceptions, abortedBits, value, minOpenWriteId); } + + public void commitWriteId(long writeId) { + if (writeId > highWatermark) { + highWatermark = writeId; + long[] newExceptions = new long[exceptions.length + (int) (writeId - highWatermark)]; + System.arraycopy(exceptions, 0, newExceptions, 0, exceptions.length); + for (long i = highWatermark; i < writeId; i++) { + exceptions[exceptions.length + (int) (i - highWatermark)] = i; + } + exceptions = newExceptions; + } else { + int pos = Arrays.binarySearch(exceptions, writeId); + if (pos >= 0) { + long[] newExceptions = new long[exceptions.length - 1]; + System.arraycopy(exceptions, 0, newExceptions, 0, pos); + System.arraycopy(exceptions, pos + 1, newExceptions, pos, exceptions.length - pos - 1); + exceptions = newExceptions; + } + } + } } diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java index b3d64021e6..dcfc0e7595 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java @@ -118,4 +118,10 @@ * @return smallest Open write Id in this set, {@code null} if there is none. */ Long getMinOpenWriteId(); + + /** + * Mark the writeId as committed + * @param writeId + */ + void commitWriteId(long writeId); }