diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java index 4a76010..e77d8ce 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.io.HdfsUtils; import org.apache.hadoop.hive.metastore.IMetaStoreClient; @@ -45,6 +46,7 @@ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.shims.ShimLoader; @@ -450,7 +452,7 @@ private Path constructPartialPartPath(Path partialPath, String partKey, Map newColumns = HCatUtil.validatePartitionSchema(table, partitionSchema); @@ -461,7 +463,7 @@ private void updateTableSchema(IMetaStoreClient client, Table table, //Update table schema to add the newly added columns table.getTTable().getSd().setCols(tableColumns); - client.alter_table(table.getDbName(), table.getTableName(), table.getTTable()); + client.alter_table(table.getDbName(), table.getTableName(), table.getTTable(), writeIds.toString()); } } @@ -753,6 +755,10 @@ private void registerPartitions(JobContext context) throws IOException{ try { HiveConf hiveConf = HCatUtil.getHiveConf(conf); client = HCatUtil.getHiveMetastoreClient(hiveConf); + + // Advance writeId for ddl + ValidWriteIdList writeIds = AcidUtils.advanceWriteId(context.getConfiguration(), table); + if (table.getPartitionKeys().size() == 0) { // Move data from temp directory the actual table directory // No metastore operation required. @@ -764,7 +770,7 @@ private void registerPartitions(JobContext context) throws IOException{ if (table.getParameters() != null && table.getParameters().containsKey(StatsSetupConst.COLUMN_STATS_ACCURATE)) { table.getParameters().remove(StatsSetupConst.COLUMN_STATS_ACCURATE); - client.alter_table(table.getDbName(), table.getTableName(), table.getTTable()); + client.alter_table(table.getDbName(), table.getTableName(), table.getTTable(), writeIds.toString()); } return; } @@ -829,9 +835,9 @@ private void registerPartitions(JobContext context) throws IOException{ moveCustomLocationTaskOutputs(fs, table, hiveConf); } try { - updateTableSchema(client, table, jobInfo.getOutputSchema()); + updateTableSchema(client, table, jobInfo.getOutputSchema(), writeIds); LOG.info("HAR is being used. The table {} has new partitions {}.", table.getTableName(), ptnInfos); - client.add_partitions(partitionsToAdd); + client.add_partitions(partitionsToAdd, writeIds.toString()); partitionsAdded = partitionsToAdd; } catch (Exception e){ // There was an error adding partitions : rollback fs copy and rethrow @@ -847,7 +853,7 @@ private void registerPartitions(JobContext context) throws IOException{ }else{ // no harProcessor, regular operation - updateTableSchema(client, table, jobInfo.getOutputSchema()); + updateTableSchema(client, table, jobInfo.getOutputSchema(), writeIds); LOG.info("HAR not is not being used. The table {} has new partitions {}.", table.getTableName(), ptnInfos); if (partitionsToAdd.size() > 0){ if (!dynamicPartitioningUsed ) { @@ -902,7 +908,7 @@ private void registerPartitions(JobContext context) throws IOException{ publishRequired = true; } if (publishRequired){ - client.add_partitions(partitionsToAdd); + client.add_partitions(partitionsToAdd, writeIds.toString()); partitionsAdded = partitionsToAdd; } @@ -918,7 +924,7 @@ private void registerPartitions(JobContext context) throws IOException{ } else { moveCustomLocationTaskOutputs(fs, table, hiveConf); } - client.add_partitions(partitionsToAdd); + client.add_partitions(partitionsToAdd, writeIds.toString()); partitionsAdded = partitionsToAdd; } } diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java index 3cf172b..b4be219 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java @@ -114,7 +114,7 @@ public void testCustomPerms() throws Exception { // Lets first test for default permissions, this is the case when user specified nothing. Table tbl = getTable(dbName, tblName, typeName); - msc.createTable(tbl); + msc.createTable(tbl, null); Database db = Hive.get(hcatConf).getDatabase(dbName); Path dfsPath = clientWH.getDefaultTablePath(db, tblName); cleanupTbl(dbName, tblName, typeName); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java index fe1d8af..e340699 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java @@ -208,7 +208,7 @@ public void testHMSCBreakability() throws IOException, MetaException, LoginExcep // Break the client try { - client.createTable(tbl); + client.createTable(tbl, null); fail("Exception was expected while creating table with long name"); } catch (Exception e) { } diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java index 983a66a..ebca35a 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java @@ -220,7 +220,7 @@ public void createTable() throws Exception { tableParams.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "false"); tbl.setParameters(tableParams); - client.createTable(tbl); + client.createTable(tbl, null); } /* diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java index 8a8a326..d706ab8 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java @@ -243,7 +243,7 @@ private static void createTable(String tableName, String tablePerm) throws Excep org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.class.getName()); tbl.setPartitionKeys(ColumnHolder.partitionCols); - hmsc.createTable(tbl); + hmsc.createTable(tbl, null); Path path = new Path(warehousedir, tableName); FileSystem fs = path.getFileSystem(hiveConf); fs.setPermission(path, new FsPermission(tablePerm)); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatOutputFormat.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatOutputFormat.java index 4ac01df..4aafedf 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatOutputFormat.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatOutputFormat.java @@ -130,7 +130,7 @@ private void initTable() throws Exception { tbl.setParameters(tableParams); - client.createTable(tbl); + client.createTable(tbl, null); Path tblPath = new Path(client.getTable(dbName, tblName).getSd().getLocation()); assertTrue(tblPath.getFileSystem(hiveConf).mkdirs(new Path(tblPath, "colname=p1"))); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java index 22a0d3f..28edff4 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java @@ -261,7 +261,7 @@ private void createTable(String dbName, String tableName) throws Exception { Map tableParams = new HashMap(); tbl.setParameters(tableParams); - msc.createTable(tbl); + msc.createTable(tbl, null); } protected List getPartitionKeys() { diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java index e611394..68212ff 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java @@ -206,7 +206,7 @@ public void onCreateTable(CreateTableEvent tableEvent) throws MetaException { FileIterator fileIter = MetaStoreUtils.isExternalTable(t) ? null : new FileIterator(t.getSd().getLocation()); CreateTableMessage msg = - MessageBuilder.getInstance().buildCreateTableMessage(t, fileIter); + MessageBuilder.getInstance().buildCreateTableMessage(t, fileIter, tableEvent.getValidWriteIdList()); NotificationEvent event = new NotificationEvent(0, now(), EventType.CREATE_TABLE.toString(), msgEncoder.getSerializer().serialize(msg)); @@ -243,7 +243,7 @@ public void onAlterTable(AlterTableEvent tableEvent) throws MetaException { Table after = tableEvent.getNewTable(); AlterTableMessage msg = MessageBuilder.getInstance() .buildAlterTableMessage(before, after, tableEvent.getIsTruncateOp(), - tableEvent.getWriteId()); + tableEvent.getWriteId(), tableEvent.getValidWriteIdList()); NotificationEvent event = new NotificationEvent(0, now(), EventType.ALTER_TABLE.toString(), msgEncoder.getSerializer().serialize(msg) @@ -361,7 +361,7 @@ public void onAddPartition(AddPartitionEvent partitionEvent) throws MetaExceptio PartitionFilesIterator fileIter = MetaStoreUtils.isExternalTable(t) ? null : new PartitionFilesIterator(partitionEvent.getPartitionIterator(), t); EventMessage msg = MessageBuilder.getInstance() - .buildAddPartitionMessage(t, partitionEvent.getPartitionIterator(), fileIter); + .buildAddPartitionMessage(t, partitionEvent.getPartitionIterator(), fileIter, partitionEvent.getValidWriteIdList()); MessageSerializer serializer = msgEncoder.getSerializer(); NotificationEvent event = new NotificationEvent(0, now(), @@ -401,7 +401,7 @@ public void onAlterPartition(AlterPartitionEvent partitionEvent) throws MetaExce AlterPartitionMessage msg = MessageBuilder.getInstance() .buildAlterPartitionMessage(partitionEvent.getTable(), before, after, partitionEvent.getIsTruncateOp(), - partitionEvent.getWriteId()); + partitionEvent.getWriteId(), partitionEvent.getValidWriteIdList()); NotificationEvent event = new NotificationEvent(0, now(), EventType.ALTER_PARTITION.toString(), msgEncoder.getSerializer().serialize(msg)); @@ -571,7 +571,7 @@ public void onOpenTxn(OpenTxnEvent openTxnEvent, Connection dbConn, SQLGenerator public void onCommitTxn(CommitTxnEvent commitTxnEvent, Connection dbConn, SQLGenerator sqlGenerator) throws MetaException { CommitTxnMessage msg = - MessageBuilder.getInstance().buildCommitTxnMessage(commitTxnEvent.getTxnId()); + MessageBuilder.getInstance().buildCommitTxnMessage(commitTxnEvent.getTxnId(), commitTxnEvent.getTxnWriteIds()); NotificationEvent event = new NotificationEvent(0, now(), EventType.COMMIT_TXN.toString(), @@ -759,7 +759,8 @@ public void onUpdateTableColumnStat(UpdateTableColumnStatEvent updateTableColumn .buildUpdateTableColumnStatMessage(updateTableColumnStatEvent.getColStats(), updateTableColumnStatEvent.getTableObj(), updateTableColumnStatEvent.getTableParameters(), - updateTableColumnStatEvent.getWriteId()); + updateTableColumnStatEvent.getWriteId(), + updateTableColumnStatEvent.getValidWriteIdList()); NotificationEvent event = new NotificationEvent(0, now(), EventType.UPDATE_TABLE_COLUMN_STAT.toString(), msgEncoder.getSerializer().serialize(msg)); ColumnStatisticsDesc statDesc = updateTableColumnStatEvent.getColStats().getStatsDesc(); @@ -789,7 +790,8 @@ public void onUpdatePartitionColumnStat(UpdatePartitionColumnStatEvent updatePar updatePartColStatEvent.getPartVals(), updatePartColStatEvent.getPartParameters(), updatePartColStatEvent.getTableObj(), - updatePartColStatEvent.getWriteId()); + updatePartColStatEvent.getWriteId(), + updatePartColStatEvent.getValidWriteIdList()); NotificationEvent event = new NotificationEvent(0, now(), EventType.UPDATE_PARTITION_COLUMN_STAT.toString(), msgEncoder.getSerializer().serialize(msg)); ColumnStatisticsDesc statDesc = updatePartColStatEvent.getPartColStats().getStatsDesc(); diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java index efafe0c..c1b958f 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java @@ -214,7 +214,7 @@ public void onCreateTable(CreateTableEvent tableEvent) throws MetaException { HCatConstants.HCAT_MSGBUS_TOPIC_NAME, getTopicPrefix(conf) + "." + newTbl.getDbName().toLowerCase() + "." + newTbl.getTableName().toLowerCase()); - handler.alter_table(newTbl.getDbName(), newTbl.getTableName(), newTbl); + handler.alter_table(newTbl.getDbName(), newTbl.getTableName(), newTbl, tableEvent.getValidWriteIdList()); } catch (TException e) { MetaException me = new MetaException(e.toString()); me.initCause(e); diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java index 66a1737..cd1a6af 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java @@ -473,7 +473,8 @@ private static void createPartitionIfNotExists(HiveEndPoint ep, addPartitionDesc.addPartition(partSpec, partLocation); Partition partition = Hive.convertAddSpecToMetaPartition(tableObject, addPartitionDesc.getPartition(0), conf); - msClient.add_partition(partition); + // TODO =====to be reworked in HIVE-21637====== + msClient.add_partition(partition, null); } catch (AlreadyExistsException e) { //ignore this - multiple clients may be trying to create the same partition @@ -898,7 +899,8 @@ public Void run() throws StreamingException { private void commitImpl() throws TransactionError, StreamingException { try { recordWriter.flush(); - msClient.commitTxn(txnToWriteIds.get(currentTxnIndex).getTxnId()); + // TODO =====to be reworked in HIVE-21637====== + msClient.commitTxn(txnToWriteIds.get(currentTxnIndex).getTxnId(), null); state = TxnState.COMMITTED; txnStatus[currentTxnIndex] = TxnState.COMMITTED; } catch (NoSuchTxnException e) { diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/Transaction.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/Transaction.java index e1c6735..07d9b85 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/Transaction.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/Transaction.java @@ -83,7 +83,8 @@ public void commit() throws TransactionException { throw new TransactionException("Unable to release lock: " + lock + " for transaction: " + transactionId, e); } try { - metaStoreClient.commitTxn(transactionId); + // TODO =====to be reworked in HIVE-21637====== + metaStoreClient.commitTxn(transactionId, null); state = TxnState.COMMITTED; } catch (NoSuchTxnException e) { throw new TransactionException("Invalid transaction id: " + transactionId, e); diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MetaStorePartitionHelper.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MetaStorePartitionHelper.java index fb88f2d..e99f977 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MetaStorePartitionHelper.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MetaStorePartitionHelper.java @@ -98,7 +98,8 @@ public void createPartitionIfNotExists(List newPartitionValues) throws W partition.setSd(partitionSd); partition.setValues(newPartitionValues); - metaStoreClient.add_partition(partition); + // TODO =====to be reworked in HIVE-21637====== + metaStoreClient.add_partition(partition, null); } catch (AlreadyExistsException e) { LOG.debug("Partition already exisits: {}.{}:{}", databaseName, tableName, newPartitionValues); } catch (NoSuchObjectException e) { diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingTestUtils.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingTestUtils.java index afda7d5..7a51552 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingTestUtils.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingTestUtils.java @@ -264,7 +264,7 @@ private Table internalCreate(IMetaStoreClient metaStoreClient) throws Exception table.setPartitionKeys(partitionFields); } if (metaStoreClient != null) { - metaStoreClient.createTable(table); + metaStoreClient.createTable(table, null); } for (List partitionValues : partitions) { @@ -278,7 +278,7 @@ private Table internalCreate(IMetaStoreClient metaStoreClient) throws Exception partition.setValues(partitionValues); if (metaStoreClient != null) { - metaStoreClient.add_partition(partition); + metaStoreClient.add_partition(partition, null); } } return table; diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestTransaction.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestTransaction.java index c47cf4d..51f94ea 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestTransaction.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestTransaction.java @@ -84,7 +84,7 @@ public void testCommit() throws Exception { transaction.commit(); verify(mockLock).release(); - verify(mockMetaStoreClient).commitTxn(TRANSACTION_ID); + verify(mockMetaStoreClient).commitTxn(TRANSACTION_ID, null); assertThat(transaction.getState(), is(TransactionBatch.TxnState.COMMITTED)); } diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMetaStorePartitionHelper.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMetaStorePartitionHelper.java index 335ecd2..f52c00d 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMetaStorePartitionHelper.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMetaStorePartitionHelper.java @@ -114,7 +114,7 @@ public void createOnUnpartitionTableDoesNothing() throws Exception { public void createOnPartitionTable() throws Exception { helper.createPartitionIfNotExists(PARTITIONED_VALUES); - verify(mockClient).add_partition(partitionCaptor.capture()); + verify(mockClient).add_partition(partitionCaptor.capture(), null); Partition actual = partitionCaptor.getValue(); assertThat(actual.getSd().getLocation(), is(PARTITION_LOCATION)); assertThat(actual.getValues(), is(PARTITIONED_VALUES)); diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java index a06191d..f9cb1ce 100644 --- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java +++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java @@ -209,7 +209,8 @@ public HCatTable getTable(String dbName, String tableName) public void createTable(HCatCreateTableDesc createTableDesc) throws HCatException { try { - hmsClient.createTable(createTableDesc.getHCatTable().toHiveTable()); + // TODO =====to be reworked in HIVE-21637====== + hmsClient.createTable(createTableDesc.getHCatTable().toHiveTable(), null); } catch (AlreadyExistsException e) { if (!createTableDesc.getIfNotExists()) { throw new HCatException( @@ -238,7 +239,8 @@ public void updateTableSchema(String dbName, String tableName, List partInfoList) ptnList.add(hCatPartition.toHivePartition()); } - numPartitions = hmsClient.add_partitions(ptnList); + // TODO =====to be reworked in HIVE-21637====== + numPartitions = hmsClient.add_partitions(ptnList, null); } catch (InvalidObjectException e) { throw new HCatException( "InvalidObjectException while adding partition.", e); @@ -953,7 +960,8 @@ public int addPartitions(List partInfoList) public int addPartitionSpec(HCatPartitionSpec partitionSpec) throws HCatException { try { - return hmsClient.add_partitions_pspec(partitionSpec.toPartitionSpecProxy()); + // TODO =====to be reworked in HIVE-21637====== + return hmsClient.add_partitions_pspec(partitionSpec.toPartitionSpecProxy(), null); } catch (InvalidObjectException e) { throw new HCatException( "InvalidObjectException while adding partition.", e); diff --git a/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java index 93e9d48..54bc591 100644 --- a/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java @@ -56,8 +56,8 @@ public synchronized long openTxn(String user) throws TException { return client.openTxn(user); } - public synchronized void commitTxn(long txnid) throws TException { - client.commitTxn(txnid); + public synchronized void commitTxn(long txnid, String writeIds) throws TException { + client.commitTxn(txnid, writeIds); } public synchronized void rollbackTxn(long txnid) throws TException { @@ -76,12 +76,12 @@ public synchronized LockResponse lock(LockRequest request) throws TException { return client.lock(request); } - public synchronized Partition add_partition(Partition partition) throws TException { - return client.add_partition(partition); + public synchronized Partition add_partition(Partition partition, String validWriteIdList) throws TException { + return client.add_partition(partition, validWriteIdList); } - public synchronized int add_partitions(List partitions) throws TException { - return client.add_partitions(partitions); + public synchronized int add_partitions(List partitions, String validWriteIdList) throws TException { + return client.add_partitions(partitions, validWriteIdList); } public synchronized void alter_partition(String catName, String dbName, String tblName, @@ -114,9 +114,9 @@ public synchronized Partition getPartitionWithAuthInfo(String dbName, String tab return client.getPartitionWithAuthInfo(dbName, tableName, pvals, userName, groupNames); } - public synchronized Partition appendPartition(String db_name, String table_name, List part_vals) + public synchronized Partition appendPartition(String db_name, String table_name, List part_vals, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, TException { - return client.appendPartition(db_name, table_name, part_vals); + return client.appendPartition(db_name, table_name, part_vals, validWriteIdList); } public synchronized FireEventResponse fireListenerEvent(FireEventRequest rqst) throws TException { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 3a20130..6cee6c9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -1729,10 +1729,6 @@ public void releaseLocksAndCommitOrRollback(boolean commit, HiveTxnManager txnMa } else { txnMgr = txnManager; } - // If we've opened a transaction we need to commit or rollback rather than explicitly - // releasing the locks. - conf.unset(ValidTxnList.VALID_TXNS_KEY); - conf.unset(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); if(!checkConcurrency()) { return; } @@ -1759,6 +1755,11 @@ public void releaseLocksAndCommitOrRollback(boolean commit, HiveTxnManager txnMa ctx.setHiveLocks(null); } + // If we've opened a transaction we need to commit or rollback rather than explicitly + // releasing the locks. + conf.unset(ValidTxnList.VALID_TXNS_KEY); + conf.unset(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.RELEASE_LOCKS); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java index 2987cab..49011d9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java @@ -21,6 +21,7 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.TableType; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java index 1d94ff3..ec37ce1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java @@ -85,7 +85,8 @@ public int execute() throws Exception { String tableName = desc.getTableName(); // describe the table - populate the output stream - Table tbl = context.getDb().getTable(tableName, false); + String[] names = Utilities.getDbTableName(tableName); + Table tbl = context.getDb().getTable(names[0], names[1], true, true, false); if (tbl == null) { throw new HiveException(ErrorMsg.INVALID_TABLE, tableName); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 7c9d910..150f5c3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -2771,7 +2771,8 @@ private int remFirstIncPendFlag(Hive hive, ReplRemoveFirstIncLoadPendFlagDesc de String incPendPara = parameters != null ? parameters.get(ReplUtils.REPL_FIRST_INC_PENDING_FLAG) : null; if (incPendPara != null) { parameters.remove(ReplUtils.REPL_FIRST_INC_PENDING_FLAG); - hive.getMSC().alter_table(dbNameOrPattern, tableName, tbl); + // TODO =====to be reworked in HIVE-21637====== + hive.getMSC().alter_table(dbNameOrPattern, tableName, tbl, null); } } } else { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 24fc0d5..6b5b4be 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -1960,6 +1960,33 @@ public static TableSnapshot getTableSnapshot(Configuration conf, validWriteIdList != null ? validWriteIdList.toString() : null); } + public static ValidWriteIdList advanceWriteId(Configuration conf, Table tbl) throws LockException { + if (!isTransactionalTable(tbl)) { + return null; + } + HiveTxnManager txnMgr = SessionState.get().getTxnMgr(); + long writeId = txnMgr.getTableWriteId(tbl.getDbName(), tbl.getTableName()); + List txnTables = new ArrayList<>(); + String fullTableName = TableName.getDbTable(tbl.getDbName(), tbl.getTableName()); + txnTables.add(fullTableName); + ValidTxnWriteIdList txnWriteIds; + if (conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY) != null) { + txnWriteIds = new ValidTxnWriteIdList(conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY)); + } else { + String txnString; + if (conf.get(ValidTxnList.VALID_TXNS_KEY) != null) { + txnString = conf.get(ValidTxnList.VALID_TXNS_KEY); + } else { + ValidTxnList txnIds = txnMgr.getValidTxns(); + txnString = txnIds.toString(); + } + txnWriteIds = txnMgr.getValidWriteIds(txnTables, txnString); + } + ValidWriteIdList writeIds = txnWriteIds.getTableValidWriteIdList(fullTableName); + writeIds.commitWriteId(writeId); + conf.set(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY, txnWriteIds.toString()); + return writeIds; + } /** * Returns ValidWriteIdList for the table with the given "dbName" and "tableName". * This is called when HiveConf has no list for the table. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index 800d80a..acc7f2b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -492,7 +492,8 @@ public void commitTxn() throws LockException { // do all new clear in clearLocksAndHB method to make sure that same code is there for replCommitTxn flow. clearLocksAndHB(); LOG.debug("Committing txn " + JavaUtils.txnIdToString(txnId)); - getMS().commitTxn(txnId); + String txnWriteIdString = conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); + getMS().commitTxn(txnId, txnWriteIdString); } catch (NoSuchTxnException e) { LOG.error("Metastore could not find " + JavaUtils.txnIdToString(txnId)); throw new LockException(e, ErrorMsg.TXN_NO_SUCH_TRANSACTION, JavaUtils.txnIdToString(txnId)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 111cd1d..a06e708 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -680,7 +680,7 @@ public void alterTable(String catName, String dbName, String tblName, Table newT } public void alterTable(String catName, String dbName, String tblName, Table newTbl, boolean cascade, - EnvironmentContext environmentContext, boolean transactional, long replWriteId) + EnvironmentContext environmentContext, boolean transactional, long writeId) throws HiveException { if (catName == null) { @@ -699,10 +699,13 @@ public void alterTable(String catName, String dbName, String tblName, Table newT environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE); } + // Advance writeId for ddl + AcidUtils.advanceWriteId(conf, newTbl); + // Take a table snapshot and set it to newTbl. AcidUtils.TableSnapshot tableSnapshot = null; if (transactional) { - if (replWriteId > 0) { + if (writeId > 0) { // We need a valid writeId list for a transactional table modification. During // replication we do not have a valid writeId list which was used to modify the table // on the source. But we know for sure that the writeId associated with it was valid @@ -710,8 +713,8 @@ public void alterTable(String catName, String dbName, String tblName, Table newT // transaction list with only that writeId. ValidWriteIdList writeIds = new ValidReaderWriteIdList(TableName.getDbTable(dbName, tblName), new long[0], new BitSet(), - replWriteId); - tableSnapshot = new TableSnapshot(replWriteId, writeIds.writeToString()); + writeId); + tableSnapshot = new TableSnapshot(writeId, writeIds.writeToString()); } else { // Make sure we pass in the names, so we can get the correct snapshot for rename table. tableSnapshot = AcidUtils.getTableSnapshot(conf, newTbl, dbName, tblName, true); @@ -1017,6 +1020,10 @@ public void createTable(Table tbl, boolean ifNotExists, tTbl.setPrivileges(principalPrivs); } } + + // Advance writeId for ddl + ValidWriteIdList writeIds = AcidUtils.advanceWriteId(conf, tbl); + // Set table snapshot to api.Table to make it persistent. A transactional table being // replicated may have a valid write Id copied from the source. Use that instead of // crafting one on the replica. @@ -1030,10 +1037,10 @@ public void createTable(Table tbl, boolean ifNotExists, if (primaryKeys == null && foreignKeys == null && uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null && checkConstraints == null) { - getMSC().createTable(tTbl); + getMSC().createTable(tTbl, writeIds.toString()); } else { getMSC().createTableWithConstraints(tTbl, primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); + uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, writeIds.toString()); } } catch (AlreadyExistsException e) { @@ -2231,8 +2238,11 @@ private Partition loadPartitionInternal(Path loadPath, Table tbl, Map partitions, boolean resetStatistics, Table tbl, - List tableSnapshots) + List tableSnapshots, ValidWriteIdList writeIds) throws HiveException { try { if (partitions.isEmpty() || tableSnapshots.isEmpty()) { @@ -2280,7 +2290,7 @@ private void addPartitionsToMetastore(List partitions, LOG.debug(debugMsg.toString()); } getSynchronizedMSC().add_partitions(partitions.stream().map(Partition::getTPartition) - .collect(Collectors.toList())); + .collect(Collectors.toList()), writeIds.toString()); } catch(AlreadyExistsException aee) { // With multiple users concurrently issuing insert statements on the same partition has // a side effect that some queries may not see a partition at the time when they're issued, @@ -2743,6 +2753,8 @@ private void constructOneLBLocationMap(FileStatus fSta, List> futures = Lists.newLinkedList(); Map, Partition> result = Maps.newLinkedHashMap(); try { + // Advance writeId for ddl + ValidWriteIdList writeIds = AcidUtils.advanceWriteId(conf, tbl); futures = executor.invokeAll(tasks); LOG.debug("Number of partitionsToAdd to be added is " + futures.size()); for (Future future : futures) { @@ -2763,7 +2775,7 @@ private void constructOneLBLocationMap(FileStatus fSta, .stream() .filter(entry -> !entry.getValue().hasOldPartition) .map(entry -> entry.getValue().tableSnapshot) - .collect(Collectors.toList())); + .collect(Collectors.toList()), writeIds); // For acid table, add the acid_write event with file list at the time of load itself. But // it should be done after partition is created. @@ -2981,9 +2993,12 @@ public Partition createPartition(Table tbl, Map partSpec) throws try { org.apache.hadoop.hive.metastore.api.Partition part = Partition.createMetaPartitionObject(tbl, partSpec, null); + // Advance writeId for ddl + ValidWriteIdList writeIds = AcidUtils.advanceWriteId(conf, tbl); + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl); part.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : 0); - return new Partition(tbl, getMSC().add_partition(part)); + return new Partition(tbl, getMSC().add_partition(part, writeIds.toString())); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -2999,6 +3014,8 @@ public Partition createPartition(Table tbl, Map partSpec) throws long writeId; String validWriteIdList; + // Advance writeId for ddl + AcidUtils.advanceWriteId(conf, tbl); // In case of replication, get the writeId from the source and use valid write Id list // for replication. if (addPartitionDesc.getReplicationSpec().isInReplicationScope() && @@ -3034,7 +3051,7 @@ public Partition createPartition(Table tbl, Map partSpec) throws if (!addPartitionDesc.getReplicationSpec().isInReplicationScope()){ // TODO: normally, the result is not necessary; might make sense to pass false for (org.apache.hadoop.hive.metastore.api.Partition outPart - : getMSC().add_partitions(in, addPartitionDesc.isIfNotExists(), true)) { + : getMSC().add_partitions(in, addPartitionDesc.isIfNotExists(), true, validWriteIdList)) { out.add(new Partition(tbl, outPart)); } } else { @@ -3061,7 +3078,7 @@ public Partition createPartition(Table tbl, Map partSpec) throws } } for (org.apache.hadoop.hive.metastore.api.Partition outPart - : getMSC().add_partitions(partsToAdd, addPartitionDesc.isIfNotExists(), true)) { + : getMSC().add_partitions(partsToAdd, addPartitionDesc.isIfNotExists(), true, validWriteIdList)) { out.add(new Partition(tbl, outPart)); } EnvironmentContext ec = new EnvironmentContext(); @@ -3183,7 +3200,10 @@ public Partition getPartition(Table tbl, Map partSpec, LOG.debug("creating partition for table " + tbl.getTableName() + " with partition spec : " + partSpec); try { - tpart = getSynchronizedMSC().appendPartition(tbl.getDbName(), tbl.getTableName(), pvals); + // Advance writeId for ddl + ValidWriteIdList writeIds = AcidUtils.advanceWriteId(conf, tbl); + + tpart = getSynchronizedMSC().appendPartition(tbl.getDbName(), tbl.getTableName(), pvals, writeIds.toString()); } catch (AlreadyExistsException aee) { LOG.debug("Caught already exists exception, trying to alter partition instead"); tpart = getSynchronizedMSC().getPartitionWithAuthInfo(tbl.getDbName(), @@ -4973,11 +4993,16 @@ public static boolean isHadoop1() { String sourceDb, String sourceTable, String destDb, String destinationTableName) throws HiveException { try { + Table descTable = getTable(destDb, destinationTableName); + + // Advance writeId for ddl + ValidWriteIdList writeIds = AcidUtils.advanceWriteId(conf, descTable); + List partitions = getMSC().exchange_partitions(partitionSpecs, sourceDb, sourceTable, destDb, - destinationTableName); + destinationTableName, writeIds.toString()); - return convertFromMetastore(getTable(destDb, destinationTableName), partitions); + return convertFromMetastore(descTable, partitions); } catch (Exception ex) { LOG.error(StringUtils.stringifyException(ex)); throw new HiveException(ex); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index 410868c..cc101aa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -111,7 +111,7 @@ private Warehouse getWh() throws MetaException { @Override protected void create_table_with_environment_context( - org.apache.hadoop.hive.metastore.api.Table tbl, EnvironmentContext envContext) + org.apache.hadoop.hive.metastore.api.Table tbl, EnvironmentContext envContext, String validWriteIdList) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { @@ -120,7 +120,7 @@ protected void create_table_with_environment_context( return; } // non-temp tables should use underlying client. - super.create_table_with_environment_context(tbl, envContext); + super.create_table_with_environment_context(tbl, envContext, validWriteIdList); } @Override @@ -407,14 +407,14 @@ public boolean tableExists(String databaseName, String tableName) throws MetaExc @Deprecated @Override public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl, - boolean cascade) throws InvalidOperationException, MetaException, TException { + boolean cascade, String validWriteIdList) throws InvalidOperationException, MetaException, TException { org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbname, tbl_name); if (old_tbl != null) { //actually temp table does not support partitions, cascade is not applicable here alterTempTable(dbname, tbl_name, old_tbl, new_tbl, null); return; } - super.alter_table(dbname, tbl_name, new_tbl, cascade); + super.alter_table(dbname, tbl_name, new_tbl, cascade, validWriteIdList); } @Override @@ -433,7 +433,7 @@ public void alter_table(String catName, String dbName, String tbl_name, @Override public void alter_table(String dbname, String tbl_name, - org.apache.hadoop.hive.metastore.api.Table new_tbl) throws InvalidOperationException, + org.apache.hadoop.hive.metastore.api.Table new_tbl, String validWriteIdList) throws InvalidOperationException, MetaException, TException { org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbname, tbl_name); if (old_tbl != null) { @@ -442,12 +442,12 @@ public void alter_table(String dbname, String tbl_name, alterTempTable(dbname, tbl_name, old_tbl, new_tbl, null); return; } - super.alter_table(dbname, tbl_name, new_tbl); + super.alter_table(dbname, tbl_name, new_tbl, validWriteIdList); } @Override public void alter_table_with_environmentContext(String dbname, String tbl_name, - org.apache.hadoop.hive.metastore.api.Table new_tbl, EnvironmentContext envContext) + org.apache.hadoop.hive.metastore.api.Table new_tbl, EnvironmentContext envContext, String validWriteIdList) throws InvalidOperationException, MetaException, TException { // First try temp table org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbname, tbl_name); @@ -457,7 +457,7 @@ public void alter_table_with_environmentContext(String dbname, String tbl_name, } // Try underlying client - super.alter_table_with_environmentContext(dbname, tbl_name, new_tbl, envContext); + super.alter_table_with_environmentContext(dbname, tbl_name, new_tbl, envContext, validWriteIdList); } @Override @@ -722,7 +722,8 @@ private void truncateTempTable(org.apache.hadoop.hive.metastore.api.Table table) EnvironmentContext environmentContext = new EnvironmentContext(); if (needToUpdateStats(table.getParameters(), environmentContext)) { - alter_table_with_environmentContext(table.getDbName(), table.getTableName(), table, environmentContext); + // TODO =====to be reworked in HIVE-21637====== + alter_table_with_environmentContext(table.getDbName(), table.getTableName(), table, environmentContext, null); } } catch (Exception e) { throw new MetaException(e.getMessage()); @@ -1003,13 +1004,13 @@ private int addPartitions(List partitions) */ @Override public org.apache.hadoop.hive.metastore.api.Partition add_partition( - org.apache.hadoop.hive.metastore.api.Partition partition) throws TException { + org.apache.hadoop.hive.metastore.api.Partition partition, String validWriteIdList) throws TException { // First try temp table org.apache.hadoop.hive.metastore.api.Table table = getTempTable(partition.getDbName(), partition.getTableName()); if (table == null) { //(assume) not a temp table - Try underlying client - return super.add_partition(partition); + return super.add_partition(partition, validWriteIdList); } TempTable tt = getTempTable(table); if(tt == null) { @@ -1028,7 +1029,7 @@ private int addPartitions(List partitions) * @throws TException */ @Override - public int add_partitions(List partitions) throws TException { + public int add_partitions(List partitions, String validWriteIdList) throws TException { if (partitions.isEmpty()) { return 0; } @@ -1037,7 +1038,7 @@ public int add_partitions(List partitions) throws TException { getTempTable(partition.getDbName(), partition.getTableName()); if (table == null) { // not a temp table - Try underlying client - return super.add_partitions(partitions); + return super.add_partitions(partitions, validWriteIdList); } TempTable tt = getTempTable(table); if (tt == null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index 4fccfff..4723f0d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -213,7 +213,8 @@ public Object run() throws Exception { } heartbeater.cancel(); msc.markCompacted(CompactionInfo.compactionInfoToStruct(ci)); - msc.commitTxn(compactorTxnId); + // TODO =====to be reworked in HIVE-21637====== + msc.commitTxn(compactorTxnId, null); if (conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST)) { mrJob = mr.getMrJob(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java index 80025b7..7787166 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java @@ -1058,7 +1058,8 @@ private void closeTxnCtx(TxnCtx txnCtx, IMetaStoreClient msc, boolean isOk) if (txnCtx == null) return; try { if (isOk) { - msc.commitTxn(txnCtx.txnId); + // TODO =====to be reworked in HIVE-21637====== + msc.commitTxn(txnCtx.txnId, null); } else { msc.abortTxns(Lists.newArrayList(txnCtx.txnId)); } diff --git a/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java b/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java index 1becbb8..e419de3 100644 --- a/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java +++ b/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java @@ -120,7 +120,7 @@ public void testPartitionExpr() throws Exception { addSd(cols, tbl); tbl.setPartitionKeys(partCols); - client.createTable(tbl); + client.createTable(tbl, null); tbl = client.getTable(dbName, tblName); addPartition(client, tbl, Lists.newArrayList("p11", "32"), "part1"); @@ -263,6 +263,6 @@ private void addPartition(HiveMetaStoreClient client, Table table, part.getSd().setSerdeInfo(table.getSd().getSerdeInfo()); part.getSd().setLocation(table.getSd().getLocation() + location); - client.add_partition(part); + client.add_partition(part, null); } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java index 3e45016..760d5e7 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java @@ -109,7 +109,7 @@ private Table createPartitionedTable(String catName, String dbName, String table sd.setCols(Arrays.asList(col1, col2)); table.setPartitionKeys(Arrays.asList(col3)); table.setSd(sd); - db.createTable(table); + db.createTable(table, null); return db.getTable(catName, dbName, tableName); } catch (Exception exception) { fail("Unable to drop and create table " + StatsUtils.getFullyQualifiedTableName(dbName, tableName) + " because " @@ -153,7 +153,7 @@ public void testNumberOfCreatePartitionCalls() throws Exception { ArgumentCaptor ifNotExistsArg = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor needResultsArg = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor> argParts = ArgumentCaptor.forClass((Class) List.class); - Mockito.verify(spyDb, Mockito.times(2)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), needResultsArg.capture()); + Mockito.verify(spyDb, Mockito.times(2)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), needResultsArg.capture(), null); // confirm the batch sizes were 5, 5 in the two calls to create partitions List> apds = argParts.getAllValues(); int retryAttempt = 1; @@ -182,7 +182,7 @@ public void testUnevenNumberOfCreatePartitionCalls() throws Exception { ArgumentCaptor ifNotExistsArg = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor needResultsArg = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor> argParts = ArgumentCaptor.forClass((Class) List.class); - Mockito.verify(spyDb, Mockito.times(2)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), needResultsArg.capture()); + Mockito.verify(spyDb, Mockito.times(2)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), needResultsArg.capture(), null); // confirm the batch sizes were 5, 4 in the two calls to create partitions List> apds = argParts.getAllValues(); int retryAttempt = 1; @@ -213,7 +213,7 @@ public void testEqualNumberOfPartitions() throws Exception { ArgumentCaptor> argParts = ArgumentCaptor.forClass((Class) List.class); // there should be 1 call to create partitions with batch sizes of 13 Mockito.verify(spyDb, Mockito.times(1)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), - needResultsArg.capture()); + needResultsArg.capture(), null); Assert.assertEquals("Unexpected number of batch size", 13, argParts.getValue().size()); assertTrue(ifNotExistsArg.getValue()); @@ -235,13 +235,13 @@ public void testSmallNumberOfPartitions() throws Exception { msck.createPartitionsInBatches(spyDb, repairOutput, partsNotInMs, table, 20, 2, 0); // there should be 1 call to create partitions with batch sizes of 10 Mockito.verify(spyDb, Mockito.times(1)).add_partitions(Mockito.anyObject(), Mockito.anyBoolean(), - Mockito.anyBoolean()); + Mockito.anyBoolean(), null); ArgumentCaptor ifNotExistsArg = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor needResultsArg = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor> argParts = ArgumentCaptor.forClass((Class) List.class); // there should be 1 call to create partitions with batch sizes of 10 Mockito.verify(spyDb, Mockito.times(1)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), - needResultsArg.capture()); + needResultsArg.capture(), null); Assert.assertEquals("Unexpected number of batch size", 10, argParts.getValue().size()); assertTrue(ifNotExistsArg.getValue()); @@ -262,7 +262,7 @@ public void testBatchingWhenException() throws Exception { // first call to createPartitions should throw exception Mockito.doThrow(HiveException.class).doCallRealMethod().doCallRealMethod().when(spyDb) .add_partitions(Mockito.anyObject(), Mockito.anyBoolean(), - Mockito.anyBoolean()); + Mockito.anyBoolean(), null); // test with a batch size of 30 and decaying factor of 2 msck.createPartitionsInBatches(spyDb, repairOutput, partsNotInMs, table, 30, 2, 0); @@ -272,7 +272,7 @@ public void testBatchingWhenException() throws Exception { ArgumentCaptor> argParts = ArgumentCaptor.forClass((Class) List.class); // there should be 3 calls to create partitions with batch sizes of 23, 15, 8 Mockito.verify(spyDb, Mockito.times(3)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), - needResultsArg.capture()); + needResultsArg.capture(), null); List> apds = argParts.getAllValues(); int retryAttempt = 1; Assert.assertEquals( @@ -300,7 +300,7 @@ public void testRetriesExhaustedBatchSize() throws Exception { Set partsNotInMs = createPartsNotInMs(17); IMetaStoreClient spyDb = Mockito.spy(db); Mockito.doThrow(HiveException.class).when(spyDb) - .add_partitions(Mockito.anyObject(), Mockito.anyBoolean(), Mockito.anyBoolean()); + .add_partitions(Mockito.anyObject(), Mockito.anyBoolean(), Mockito.anyBoolean(), null); // batch size of 5 and decaying factor of 2 Exception ex = null; try { @@ -316,7 +316,7 @@ public void testRetriesExhaustedBatchSize() throws Exception { ArgumentCaptor> argParts = ArgumentCaptor.forClass((Class) List.class); // there should be 5 calls to create partitions with batch sizes of 17, 15, 7, 3, 1 Mockito.verify(spyDb, Mockito.times(5)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), - needResultsArg.capture()); + needResultsArg.capture(), null); List> apds = argParts.getAllValues(); int retryAttempt = 1; Assert.assertEquals( @@ -347,7 +347,7 @@ public void testMaxRetriesReached() throws Exception { Set partsNotInMs = createPartsNotInMs(17); IMetaStoreClient spyDb = Mockito.spy(db); Mockito.doThrow(HiveException.class).when(spyDb) - .add_partitions(Mockito.anyObject(), Mockito.anyBoolean(), Mockito.anyBoolean()); + .add_partitions(Mockito.anyObject(), Mockito.anyBoolean(), Mockito.anyBoolean(), null); // batch size of 5 and decaying factor of 2 Exception ex = null; try { @@ -360,7 +360,7 @@ public void testMaxRetriesReached() throws Exception { ArgumentCaptor ifNotExistsArg = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor needResultsArg = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor> argParts = ArgumentCaptor.forClass((Class) List.class); - Mockito.verify(spyDb, Mockito.times(2)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), needResultsArg.capture()); + Mockito.verify(spyDb, Mockito.times(2)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), needResultsArg.capture(), null); List> apds = argParts.getAllValues(); int retryAttempt = 1; Assert.assertEquals( @@ -383,7 +383,7 @@ public void testOneMaxRetries() throws Exception { Set partsNotInMs = createPartsNotInMs(17); IMetaStoreClient spyDb = Mockito.spy(db); Mockito.doThrow(HiveException.class).when(spyDb) - .add_partitions(Mockito.anyObject(), Mockito.anyBoolean(), Mockito.anyBoolean()); + .add_partitions(Mockito.anyObject(), Mockito.anyBoolean(), Mockito.anyBoolean(), null); // batch size of 5 and decaying factor of 2 Exception ex = null; try { @@ -399,7 +399,7 @@ public void testOneMaxRetries() throws Exception { ArgumentCaptor> argParts = ArgumentCaptor.forClass((Class) List.class); // there should be 5 calls to create partitions with batch sizes of 17, 15, 7, 3, 1 Mockito.verify(spyDb, Mockito.times(1)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), - needResultsArg.capture()); + needResultsArg.capture(), null); List> apds = argParts.getAllValues(); int retryAttempt = 1; Assert.assertEquals( diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java index 1ec4636..ffcf264 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java @@ -106,7 +106,7 @@ private Table createPartitionedTable(String catName, String dbName, String table sd.setCols(Arrays.asList(col1, col2)); table.setPartitionKeys(Arrays.asList(col3)); table.setSd(sd); - db.createTable(table); + db.createTable(table, null); return db.getTable(catName, dbName, tableName); } catch (Exception exception) { fail("Unable to drop and create table " + StatsUtils diff --git a/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java b/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java index a2f8bab..690f462 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java @@ -211,7 +211,7 @@ public void testTxnTable() throws Exception { currentWriteIds = msClient.getValidWriteIds(fqName).toString(); verifyStatsUpToDate(tblName, cols, msClient, currentWriteIds, false); - msClient.commitTxn(badTxnId); + msClient.commitTxn(badTxnId, null); // Analyze should be able to override stats of an committed txn. assertTrue(su.runOneIteration()); @@ -563,7 +563,7 @@ private void setTableSkipProperty( IMetaStoreClient msClient, String tbl, String val) throws Exception { Table table = msClient.getTable(ss.getCurrentDatabase(), tbl); table.getParameters().put(StatsUpdaterThread.SKIP_STATS_AUTOUPDATE_PROPERTY, val); - msClient.alter_table(table.getDbName(), table.getTableName(), table); + msClient.alter_table(table.getDbName(), table.getTableName(), table, null); } private void setPartitionSkipProperty( @@ -580,7 +580,7 @@ private void verifyAndUnsetColStats( EnvironmentContext ec = new EnvironmentContext(); // Make sure metastore doesn't mess with our bogus stats updates. ec.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); - msClient.alter_table_with_environmentContext(tbl.getDbName(), tbl.getTableName(), tbl, ec); + msClient.alter_table_with_environmentContext(tbl.getDbName(), tbl.getTableName(), tbl, ec, null); // Double-check. tbl = msClient.getTable(ss.getCurrentDatabase(), tblName); for (String col : cols) { diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java index cfd7290..f4f64f0 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java @@ -174,7 +174,7 @@ protected Table newTable(String dbName, String tableName, boolean partitioned, // drop the table first, in case some previous test created it ms.dropTable(dbName, tableName); - ms.createTable(table); + ms.createTable(table, null); return table; } @@ -189,7 +189,7 @@ protected Partition newPartition(Table t, String value, List sortCols) th part.setTableName(t.getTableName()); part.setSd(newStorageDescriptor(getLocation(t.getTableName(), value), sortCols)); part.setParameters(new HashMap()); - ms.add_partition(part); + ms.add_partition(part, null); return part; } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java index bbefc3d..7fd96ff 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java @@ -43,6 +43,7 @@ private static final org.apache.thrift.protocol.TField WRITE_EVENT_INFOS_FIELD_DESC = new org.apache.thrift.protocol.TField("writeEventInfos", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField KEY_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("keyValue", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final org.apache.thrift.protocol.TField REPL_LAST_ID_INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("replLastIdInfo", org.apache.thrift.protocol.TType.STRUCT, (short)5); + private static final org.apache.thrift.protocol.TField TXN_WRITE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("txnWriteIds", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private List writeEventInfos; // optional private CommitTxnKeyValue keyValue; // optional private ReplLastIdInfo replLastIdInfo; // optional + private String txnWriteIds; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -62,7 +64,8 @@ REPL_POLICY((short)2, "replPolicy"), WRITE_EVENT_INFOS((short)3, "writeEventInfos"), KEY_VALUE((short)4, "keyValue"), - REPL_LAST_ID_INFO((short)5, "replLastIdInfo"); + REPL_LAST_ID_INFO((short)5, "replLastIdInfo"), + TXN_WRITE_IDS((short)6, "txnWriteIds"); private static final Map byName = new HashMap(); @@ -87,6 +90,8 @@ public static _Fields findByThriftId(int fieldId) { return KEY_VALUE; case 5: // REPL_LAST_ID_INFO return REPL_LAST_ID_INFO; + case 6: // TXN_WRITE_IDS + return TXN_WRITE_IDS; default: return null; } @@ -129,7 +134,7 @@ public String getFieldName() { // isset id assignments private static final int __TXNID_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.REPL_POLICY,_Fields.WRITE_EVENT_INFOS,_Fields.KEY_VALUE,_Fields.REPL_LAST_ID_INFO}; + private static final _Fields optionals[] = {_Fields.REPL_POLICY,_Fields.WRITE_EVENT_INFOS,_Fields.KEY_VALUE,_Fields.REPL_LAST_ID_INFO,_Fields.TXN_WRITE_IDS}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -144,6 +149,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CommitTxnKeyValue.class))); tmpMap.put(_Fields.REPL_LAST_ID_INFO, new org.apache.thrift.meta_data.FieldMetaData("replLastIdInfo", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ReplLastIdInfo.class))); + tmpMap.put(_Fields.TXN_WRITE_IDS, new org.apache.thrift.meta_data.FieldMetaData("txnWriteIds", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CommitTxnRequest.class, metaDataMap); } @@ -181,6 +188,9 @@ public CommitTxnRequest(CommitTxnRequest other) { if (other.isSetReplLastIdInfo()) { this.replLastIdInfo = new ReplLastIdInfo(other.replLastIdInfo); } + if (other.isSetTxnWriteIds()) { + this.txnWriteIds = other.txnWriteIds; + } } public CommitTxnRequest deepCopy() { @@ -195,6 +205,7 @@ public void clear() { this.writeEventInfos = null; this.keyValue = null; this.replLastIdInfo = null; + this.txnWriteIds = null; } public long getTxnid() { @@ -326,6 +337,29 @@ public void setReplLastIdInfoIsSet(boolean value) { } } + public String getTxnWriteIds() { + return this.txnWriteIds; + } + + public void setTxnWriteIds(String txnWriteIds) { + this.txnWriteIds = txnWriteIds; + } + + public void unsetTxnWriteIds() { + this.txnWriteIds = null; + } + + /** Returns true if field txnWriteIds is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnWriteIds() { + return this.txnWriteIds != null; + } + + public void setTxnWriteIdsIsSet(boolean value) { + if (!value) { + this.txnWriteIds = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TXNID: @@ -368,6 +402,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TXN_WRITE_IDS: + if (value == null) { + unsetTxnWriteIds(); + } else { + setTxnWriteIds((String)value); + } + break; + } } @@ -388,6 +430,9 @@ public Object getFieldValue(_Fields field) { case REPL_LAST_ID_INFO: return getReplLastIdInfo(); + case TXN_WRITE_IDS: + return getTxnWriteIds(); + } throw new IllegalStateException(); } @@ -409,6 +454,8 @@ public boolean isSet(_Fields field) { return isSetKeyValue(); case REPL_LAST_ID_INFO: return isSetReplLastIdInfo(); + case TXN_WRITE_IDS: + return isSetTxnWriteIds(); } throw new IllegalStateException(); } @@ -471,6 +518,15 @@ public boolean equals(CommitTxnRequest that) { return false; } + boolean this_present_txnWriteIds = true && this.isSetTxnWriteIds(); + boolean that_present_txnWriteIds = true && that.isSetTxnWriteIds(); + if (this_present_txnWriteIds || that_present_txnWriteIds) { + if (!(this_present_txnWriteIds && that_present_txnWriteIds)) + return false; + if (!this.txnWriteIds.equals(that.txnWriteIds)) + return false; + } + return true; } @@ -503,6 +559,11 @@ public int hashCode() { if (present_replLastIdInfo) list.add(replLastIdInfo); + boolean present_txnWriteIds = true && (isSetTxnWriteIds()); + list.add(present_txnWriteIds); + if (present_txnWriteIds) + list.add(txnWriteIds); + return list.hashCode(); } @@ -564,6 +625,16 @@ public int compareTo(CommitTxnRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTxnWriteIds()).compareTo(other.isSetTxnWriteIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnWriteIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnWriteIds, other.txnWriteIds); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -627,6 +698,16 @@ public String toString() { } first = false; } + if (isSetTxnWriteIds()) { + if (!first) sb.append(", "); + sb.append("txnWriteIds:"); + if (this.txnWriteIds == null) { + sb.append("null"); + } else { + sb.append(this.txnWriteIds); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -735,6 +816,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CommitTxnRequest st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // TXN_WRITE_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.txnWriteIds = iprot.readString(); + struct.setTxnWriteIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -786,6 +875,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CommitTxnRequest s oprot.writeFieldEnd(); } } + if (struct.txnWriteIds != null) { + if (struct.isSetTxnWriteIds()) { + oprot.writeFieldBegin(TXN_WRITE_IDS_FIELD_DESC); + oprot.writeString(struct.txnWriteIds); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -817,7 +913,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CommitTxnRequest st if (struct.isSetReplLastIdInfo()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetTxnWriteIds()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetReplPolicy()) { oprot.writeString(struct.replPolicy); } @@ -836,6 +935,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CommitTxnRequest st if (struct.isSetReplLastIdInfo()) { struct.replLastIdInfo.write(oprot); } + if (struct.isSetTxnWriteIds()) { + oprot.writeString(struct.txnWriteIds); + } } @Override @@ -843,7 +945,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CommitTxnRequest str TTupleProtocol iprot = (TTupleProtocol) prot; struct.txnid = iprot.readI64(); struct.setTxnidIsSet(true); - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.replPolicy = iprot.readString(); struct.setReplPolicyIsSet(true); @@ -872,6 +974,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CommitTxnRequest str struct.replLastIdInfo.read(iprot); struct.setReplLastIdInfoIsSet(true); } + if (incoming.get(4)) { + struct.txnWriteIds = iprot.readString(); + struct.setTxnWriteIdsIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index f2781ce..a6c65aa 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -84,11 +84,11 @@ public List get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException; - public void create_table(Table tbl) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException; + public void create_table(Table tbl, String validWriteIdList) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException; - public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException; + public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, String validWriteIdList) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException; - public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException; + public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints, String validWriteIdList) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException; public void drop_constraint(DropConstraintRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; @@ -138,31 +138,31 @@ public List get_table_names_by_filter(String dbname, String filter, short max_tables) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException; - public void alter_table(String dbname, String tbl_name, Table new_tbl) throws InvalidOperationException, MetaException, org.apache.thrift.TException; + public void alter_table(String dbname, String tbl_name, Table new_tbl, String validWriteIdList) throws InvalidOperationException, MetaException, org.apache.thrift.TException; - public void alter_table_with_environment_context(String dbname, String tbl_name, Table new_tbl, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException; + public void alter_table_with_environment_context(String dbname, String tbl_name, Table new_tbl, EnvironmentContext environment_context, String validWriteIdList) throws InvalidOperationException, MetaException, org.apache.thrift.TException; - public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade) throws InvalidOperationException, MetaException, org.apache.thrift.TException; + public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade, String validWriteIdList) throws InvalidOperationException, MetaException, org.apache.thrift.TException; public AlterTableResponse alter_table_req(AlterTableRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException; - public Partition add_partition(Partition new_part) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; + public Partition add_partition(Partition new_part, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; - public Partition add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; + public Partition add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; - public int add_partitions(List new_parts) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; + public int add_partitions(List new_parts, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; - public int add_partitions_pspec(List new_parts) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; + public int add_partitions_pspec(List new_parts, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; - public Partition append_partition(String db_name, String tbl_name, List part_vals) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; + public Partition append_partition(String db_name, String tbl_name, List part_vals, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; public AddPartitionsResult add_partitions_req(AddPartitionsRequest request) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; - public Partition append_partition_with_environment_context(String db_name, String tbl_name, List part_vals, EnvironmentContext environment_context) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; + public Partition append_partition_with_environment_context(String db_name, String tbl_name, List part_vals, EnvironmentContext environment_context, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; - public Partition append_partition_by_name(String db_name, String tbl_name, String part_name) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; + public Partition append_partition_by_name(String db_name, String tbl_name, String part_name, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; - public Partition append_partition_by_name_with_environment_context(String db_name, String tbl_name, String part_name, EnvironmentContext environment_context) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; + public Partition append_partition_by_name_with_environment_context(String db_name, String tbl_name, String part_name, EnvironmentContext environment_context, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException; public boolean drop_partition(String db_name, String tbl_name, List part_vals, boolean deleteData) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; @@ -176,9 +176,9 @@ public Partition get_partition(String db_name, String tbl_name, List part_vals) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public Partition exchange_partition(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException; + public Partition exchange_partition(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException; - public List exchange_partitions(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException; + public List exchange_partitions(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException; public Partition get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; @@ -250,9 +250,9 @@ public CheckConstraintsResponse get_check_constraints(CheckConstraintsRequest request) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; - public boolean update_table_column_statistics(ColumnStatistics stats_obj) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException; + public boolean update_table_column_statistics(ColumnStatistics stats_obj, String validWriteIdList) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException; - public boolean update_partition_column_statistics(ColumnStatistics stats_obj) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException; + public boolean update_partition_column_statistics(ColumnStatistics stats_obj, String validWriteIdList) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException; public SetPartitionsStatsResponse update_table_column_statistics_req(SetPartitionsStatsRequest req) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException; @@ -536,11 +536,11 @@ public void get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void create_table(Table tbl, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void create_table(Table tbl, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void drop_constraint(DropConstraintRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -590,31 +590,31 @@ public void get_table_names_by_filter(String dbname, String filter, short max_tables, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void alter_table(String dbname, String tbl_name, Table new_tbl, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void alter_table(String dbname, String tbl_name, Table new_tbl, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void alter_table_with_environment_context(String dbname, String tbl_name, Table new_tbl, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void alter_table_with_environment_context(String dbname, String tbl_name, Table new_tbl, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void alter_table_req(AlterTableRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void add_partition(Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void add_partition(Partition new_part, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void add_partitions(List new_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void add_partitions(List new_parts, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void add_partitions_pspec(List new_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void add_partitions_pspec(List new_parts, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void append_partition(String db_name, String tbl_name, List part_vals, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void append_partition(String db_name, String tbl_name, List part_vals, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void add_partitions_req(AddPartitionsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void append_partition_with_environment_context(String db_name, String tbl_name, List part_vals, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void append_partition_with_environment_context(String db_name, String tbl_name, List part_vals, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void append_partition_by_name(String db_name, String tbl_name, String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void append_partition_by_name(String db_name, String tbl_name, String part_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void append_partition_by_name_with_environment_context(String db_name, String tbl_name, String part_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void append_partition_by_name_with_environment_context(String db_name, String tbl_name, String part_name, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void drop_partition(String db_name, String tbl_name, List part_vals, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -628,9 +628,9 @@ public void get_partition(String db_name, String tbl_name, List part_vals, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void exchange_partition(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void exchange_partition(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void exchange_partitions(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void exchange_partitions(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_partition_with_auth(String db_name, String tbl_name, List part_vals, String user_name, List group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -702,9 +702,9 @@ public void get_check_constraints(CheckConstraintsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void update_table_column_statistics(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void update_table_column_statistics(ColumnStatistics stats_obj, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void update_partition_column_statistics(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void update_partition_column_statistics(ColumnStatistics stats_obj, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void update_table_column_statistics_req(SetPartitionsStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -1572,16 +1572,17 @@ public void send_get_schema_with_environment_context(String db_name, String tabl throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_schema_with_environment_context failed: unknown result"); } - public void create_table(Table tbl) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException + public void create_table(Table tbl, String validWriteIdList) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_create_table(tbl); + send_create_table(tbl, validWriteIdList); recv_create_table(); } - public void send_create_table(Table tbl) throws org.apache.thrift.TException + public void send_create_table(Table tbl, String validWriteIdList) throws org.apache.thrift.TException { create_table_args args = new create_table_args(); args.setTbl(tbl); + args.setValidWriteIdList(validWriteIdList); sendBase("create_table", args); } @@ -1604,17 +1605,18 @@ public void recv_create_table() throws AlreadyExistsException, InvalidObjectExce return; } - public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException + public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, String validWriteIdList) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_create_table_with_environment_context(tbl, environment_context); + send_create_table_with_environment_context(tbl, environment_context, validWriteIdList); recv_create_table_with_environment_context(); } - public void send_create_table_with_environment_context(Table tbl, EnvironmentContext environment_context) throws org.apache.thrift.TException + public void send_create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, String validWriteIdList) throws org.apache.thrift.TException { create_table_with_environment_context_args args = new create_table_with_environment_context_args(); args.setTbl(tbl); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); sendBase("create_table_with_environment_context", args); } @@ -1637,13 +1639,13 @@ public void recv_create_table_with_environment_context() throws AlreadyExistsExc return; } - public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException + public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints, String validWriteIdList) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); + send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, validWriteIdList); recv_create_table_with_constraints(); } - public void send_create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints) throws org.apache.thrift.TException + public void send_create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints, String validWriteIdList) throws org.apache.thrift.TException { create_table_with_constraints_args args = new create_table_with_constraints_args(); args.setTbl(tbl); @@ -1653,6 +1655,7 @@ public void send_create_table_with_constraints(Table tbl, List pr args.setNotNullConstraints(notNullConstraints); args.setDefaultConstraints(defaultConstraints); args.setCheckConstraints(checkConstraints); + args.setValidWriteIdList(validWriteIdList); sendBase("create_table_with_constraints", args); } @@ -2339,18 +2342,19 @@ public void send_get_table_names_by_filter(String dbname, String filter, short m throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_table_names_by_filter failed: unknown result"); } - public void alter_table(String dbname, String tbl_name, Table new_tbl) throws InvalidOperationException, MetaException, org.apache.thrift.TException + public void alter_table(String dbname, String tbl_name, Table new_tbl, String validWriteIdList) throws InvalidOperationException, MetaException, org.apache.thrift.TException { - send_alter_table(dbname, tbl_name, new_tbl); + send_alter_table(dbname, tbl_name, new_tbl, validWriteIdList); recv_alter_table(); } - public void send_alter_table(String dbname, String tbl_name, Table new_tbl) throws org.apache.thrift.TException + public void send_alter_table(String dbname, String tbl_name, Table new_tbl, String validWriteIdList) throws org.apache.thrift.TException { alter_table_args args = new alter_table_args(); args.setDbname(dbname); args.setTbl_name(tbl_name); args.setNew_tbl(new_tbl); + args.setValidWriteIdList(validWriteIdList); sendBase("alter_table", args); } @@ -2367,19 +2371,20 @@ public void recv_alter_table() throws InvalidOperationException, MetaException, return; } - public void alter_table_with_environment_context(String dbname, String tbl_name, Table new_tbl, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException + public void alter_table_with_environment_context(String dbname, String tbl_name, Table new_tbl, EnvironmentContext environment_context, String validWriteIdList) throws InvalidOperationException, MetaException, org.apache.thrift.TException { - send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context); + send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context, validWriteIdList); recv_alter_table_with_environment_context(); } - public void send_alter_table_with_environment_context(String dbname, String tbl_name, Table new_tbl, EnvironmentContext environment_context) throws org.apache.thrift.TException + public void send_alter_table_with_environment_context(String dbname, String tbl_name, Table new_tbl, EnvironmentContext environment_context, String validWriteIdList) throws org.apache.thrift.TException { alter_table_with_environment_context_args args = new alter_table_with_environment_context_args(); args.setDbname(dbname); args.setTbl_name(tbl_name); args.setNew_tbl(new_tbl); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); sendBase("alter_table_with_environment_context", args); } @@ -2396,19 +2401,20 @@ public void recv_alter_table_with_environment_context() throws InvalidOperationE return; } - public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade) throws InvalidOperationException, MetaException, org.apache.thrift.TException + public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade, String validWriteIdList) throws InvalidOperationException, MetaException, org.apache.thrift.TException { - send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade); + send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade, validWriteIdList); recv_alter_table_with_cascade(); } - public void send_alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade) throws org.apache.thrift.TException + public void send_alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade, String validWriteIdList) throws org.apache.thrift.TException { alter_table_with_cascade_args args = new alter_table_with_cascade_args(); args.setDbname(dbname); args.setTbl_name(tbl_name); args.setNew_tbl(new_tbl); args.setCascade(cascade); + args.setValidWriteIdList(validWriteIdList); sendBase("alter_table_with_cascade", args); } @@ -2454,16 +2460,17 @@ public AlterTableResponse recv_alter_table_req() throws InvalidOperationExceptio throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "alter_table_req failed: unknown result"); } - public Partition add_partition(Partition new_part) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException + public Partition add_partition(Partition new_part, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException { - send_add_partition(new_part); + send_add_partition(new_part, validWriteIdList); return recv_add_partition(); } - public void send_add_partition(Partition new_part) throws org.apache.thrift.TException + public void send_add_partition(Partition new_part, String validWriteIdList) throws org.apache.thrift.TException { add_partition_args args = new add_partition_args(); args.setNew_part(new_part); + args.setValidWriteIdList(validWriteIdList); sendBase("add_partition", args); } @@ -2486,17 +2493,18 @@ public Partition recv_add_partition() throws InvalidObjectException, AlreadyExis throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "add_partition failed: unknown result"); } - public Partition add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException + public Partition add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException { - send_add_partition_with_environment_context(new_part, environment_context); + send_add_partition_with_environment_context(new_part, environment_context, validWriteIdList); return recv_add_partition_with_environment_context(); } - public void send_add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context) throws org.apache.thrift.TException + public void send_add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context, String validWriteIdList) throws org.apache.thrift.TException { add_partition_with_environment_context_args args = new add_partition_with_environment_context_args(); args.setNew_part(new_part); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); sendBase("add_partition_with_environment_context", args); } @@ -2519,16 +2527,17 @@ public Partition recv_add_partition_with_environment_context() throws InvalidObj throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "add_partition_with_environment_context failed: unknown result"); } - public int add_partitions(List new_parts) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException + public int add_partitions(List new_parts, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException { - send_add_partitions(new_parts); + send_add_partitions(new_parts, validWriteIdList); return recv_add_partitions(); } - public void send_add_partitions(List new_parts) throws org.apache.thrift.TException + public void send_add_partitions(List new_parts, String validWriteIdList) throws org.apache.thrift.TException { add_partitions_args args = new add_partitions_args(); args.setNew_parts(new_parts); + args.setValidWriteIdList(validWriteIdList); sendBase("add_partitions", args); } @@ -2551,16 +2560,17 @@ public int recv_add_partitions() throws InvalidObjectException, AlreadyExistsExc throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "add_partitions failed: unknown result"); } - public int add_partitions_pspec(List new_parts) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException + public int add_partitions_pspec(List new_parts, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException { - send_add_partitions_pspec(new_parts); + send_add_partitions_pspec(new_parts, validWriteIdList); return recv_add_partitions_pspec(); } - public void send_add_partitions_pspec(List new_parts) throws org.apache.thrift.TException + public void send_add_partitions_pspec(List new_parts, String validWriteIdList) throws org.apache.thrift.TException { add_partitions_pspec_args args = new add_partitions_pspec_args(); args.setNew_parts(new_parts); + args.setValidWriteIdList(validWriteIdList); sendBase("add_partitions_pspec", args); } @@ -2583,18 +2593,19 @@ public int recv_add_partitions_pspec() throws InvalidObjectException, AlreadyExi throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "add_partitions_pspec failed: unknown result"); } - public Partition append_partition(String db_name, String tbl_name, List part_vals) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException + public Partition append_partition(String db_name, String tbl_name, List part_vals, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException { - send_append_partition(db_name, tbl_name, part_vals); + send_append_partition(db_name, tbl_name, part_vals, validWriteIdList); return recv_append_partition(); } - public void send_append_partition(String db_name, String tbl_name, List part_vals) throws org.apache.thrift.TException + public void send_append_partition(String db_name, String tbl_name, List part_vals, String validWriteIdList) throws org.apache.thrift.TException { append_partition_args args = new append_partition_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_vals(part_vals); + args.setValidWriteIdList(validWriteIdList); sendBase("append_partition", args); } @@ -2649,19 +2660,20 @@ public AddPartitionsResult recv_add_partitions_req() throws InvalidObjectExcepti throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "add_partitions_req failed: unknown result"); } - public Partition append_partition_with_environment_context(String db_name, String tbl_name, List part_vals, EnvironmentContext environment_context) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException + public Partition append_partition_with_environment_context(String db_name, String tbl_name, List part_vals, EnvironmentContext environment_context, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException { - send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context); + send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context, validWriteIdList); return recv_append_partition_with_environment_context(); } - public void send_append_partition_with_environment_context(String db_name, String tbl_name, List part_vals, EnvironmentContext environment_context) throws org.apache.thrift.TException + public void send_append_partition_with_environment_context(String db_name, String tbl_name, List part_vals, EnvironmentContext environment_context, String validWriteIdList) throws org.apache.thrift.TException { append_partition_with_environment_context_args args = new append_partition_with_environment_context_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_vals(part_vals); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); sendBase("append_partition_with_environment_context", args); } @@ -2684,18 +2696,19 @@ public Partition recv_append_partition_with_environment_context() throws Invalid throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "append_partition_with_environment_context failed: unknown result"); } - public Partition append_partition_by_name(String db_name, String tbl_name, String part_name) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException + public Partition append_partition_by_name(String db_name, String tbl_name, String part_name, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException { - send_append_partition_by_name(db_name, tbl_name, part_name); + send_append_partition_by_name(db_name, tbl_name, part_name, validWriteIdList); return recv_append_partition_by_name(); } - public void send_append_partition_by_name(String db_name, String tbl_name, String part_name) throws org.apache.thrift.TException + public void send_append_partition_by_name(String db_name, String tbl_name, String part_name, String validWriteIdList) throws org.apache.thrift.TException { append_partition_by_name_args args = new append_partition_by_name_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_name(part_name); + args.setValidWriteIdList(validWriteIdList); sendBase("append_partition_by_name", args); } @@ -2718,19 +2731,20 @@ public Partition recv_append_partition_by_name() throws InvalidObjectException, throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "append_partition_by_name failed: unknown result"); } - public Partition append_partition_by_name_with_environment_context(String db_name, String tbl_name, String part_name, EnvironmentContext environment_context) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException + public Partition append_partition_by_name_with_environment_context(String db_name, String tbl_name, String part_name, EnvironmentContext environment_context, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException { - send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context); + send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context, validWriteIdList); return recv_append_partition_by_name_with_environment_context(); } - public void send_append_partition_by_name_with_environment_context(String db_name, String tbl_name, String part_name, EnvironmentContext environment_context) throws org.apache.thrift.TException + public void send_append_partition_by_name_with_environment_context(String db_name, String tbl_name, String part_name, EnvironmentContext environment_context, String validWriteIdList) throws org.apache.thrift.TException { append_partition_by_name_with_environment_context_args args = new append_partition_by_name_with_environment_context_args(); args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_name(part_name); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); sendBase("append_partition_by_name_with_environment_context", args); } @@ -2943,13 +2957,13 @@ public Partition recv_get_partition() throws MetaException, NoSuchObjectExceptio throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partition failed: unknown result"); } - public Partition exchange_partition(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException + public Partition exchange_partition(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException { - send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); + send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, validWriteIdList); return recv_exchange_partition(); } - public void send_exchange_partition(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws org.apache.thrift.TException + public void send_exchange_partition(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, String validWriteIdList) throws org.apache.thrift.TException { exchange_partition_args args = new exchange_partition_args(); args.setPartitionSpecs(partitionSpecs); @@ -2957,6 +2971,7 @@ public void send_exchange_partition(Map partitionSpecs, String so args.setSource_table_name(source_table_name); args.setDest_db(dest_db); args.setDest_table_name(dest_table_name); + args.setValidWriteIdList(validWriteIdList); sendBase("exchange_partition", args); } @@ -2982,13 +2997,13 @@ public Partition recv_exchange_partition() throws MetaException, NoSuchObjectExc throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "exchange_partition failed: unknown result"); } - public List exchange_partitions(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException + public List exchange_partitions(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException { - send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); + send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, validWriteIdList); return recv_exchange_partitions(); } - public void send_exchange_partitions(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws org.apache.thrift.TException + public void send_exchange_partitions(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, String validWriteIdList) throws org.apache.thrift.TException { exchange_partitions_args args = new exchange_partitions_args(); args.setPartitionSpecs(partitionSpecs); @@ -2996,6 +3011,7 @@ public void send_exchange_partitions(Map partitionSpecs, String s args.setSource_table_name(source_table_name); args.setDest_db(dest_db); args.setDest_table_name(dest_table_name); + args.setValidWriteIdList(validWriteIdList); sendBase("exchange_partitions", args); } @@ -4088,16 +4104,17 @@ public CheckConstraintsResponse recv_get_check_constraints() throws MetaExceptio throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_check_constraints failed: unknown result"); } - public boolean update_table_column_statistics(ColumnStatistics stats_obj) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException + public boolean update_table_column_statistics(ColumnStatistics stats_obj, String validWriteIdList) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException { - send_update_table_column_statistics(stats_obj); + send_update_table_column_statistics(stats_obj, validWriteIdList); return recv_update_table_column_statistics(); } - public void send_update_table_column_statistics(ColumnStatistics stats_obj) throws org.apache.thrift.TException + public void send_update_table_column_statistics(ColumnStatistics stats_obj, String validWriteIdList) throws org.apache.thrift.TException { update_table_column_statistics_args args = new update_table_column_statistics_args(); args.setStats_obj(stats_obj); + args.setValidWriteIdList(validWriteIdList); sendBase("update_table_column_statistics", args); } @@ -4123,16 +4140,17 @@ public boolean recv_update_table_column_statistics() throws NoSuchObjectExceptio throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "update_table_column_statistics failed: unknown result"); } - public boolean update_partition_column_statistics(ColumnStatistics stats_obj) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException + public boolean update_partition_column_statistics(ColumnStatistics stats_obj, String validWriteIdList) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException { - send_update_partition_column_statistics(stats_obj); + send_update_partition_column_statistics(stats_obj, validWriteIdList); return recv_update_partition_column_statistics(); } - public void send_update_partition_column_statistics(ColumnStatistics stats_obj) throws org.apache.thrift.TException + public void send_update_partition_column_statistics(ColumnStatistics stats_obj, String validWriteIdList) throws org.apache.thrift.TException { update_partition_column_statistics_args args = new update_partition_column_statistics_args(); args.setStats_obj(stats_obj); + args.setValidWriteIdList(validWriteIdList); sendBase("update_partition_column_statistics", args); } @@ -8066,24 +8084,27 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void create_table(Table tbl, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void create_table(Table tbl, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - create_table_call method_call = new create_table_call(tbl, resultHandler, this, ___protocolFactory, ___transport); + create_table_call method_call = new create_table_call(tbl, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_table_call extends org.apache.thrift.async.TAsyncMethodCall { private Table tbl; - public create_table_call(Table tbl, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public create_table_call(Table tbl, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.tbl = tbl; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("create_table", org.apache.thrift.protocol.TMessageType.CALL, 0)); create_table_args args = new create_table_args(); args.setTbl(tbl); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -8098,9 +8119,9 @@ public void getResult() throws AlreadyExistsException, InvalidObjectException, M } } - public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - create_table_with_environment_context_call method_call = new create_table_with_environment_context_call(tbl, environment_context, resultHandler, this, ___protocolFactory, ___transport); + create_table_with_environment_context_call method_call = new create_table_with_environment_context_call(tbl, environment_context, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -8108,10 +8129,12 @@ public void create_table_with_environment_context(Table tbl, EnvironmentContext @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_table_with_environment_context_call extends org.apache.thrift.async.TAsyncMethodCall { private Table tbl; private EnvironmentContext environment_context; - public create_table_with_environment_context_call(Table tbl, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public create_table_with_environment_context_call(Table tbl, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.tbl = tbl; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -8119,6 +8142,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa create_table_with_environment_context_args args = new create_table_with_environment_context_args(); args.setTbl(tbl); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -8133,9 +8157,9 @@ public void getResult() throws AlreadyExistsException, InvalidObjectException, M } } - public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - create_table_with_constraints_call method_call = new create_table_with_constraints_call(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, resultHandler, this, ___protocolFactory, ___transport); + create_table_with_constraints_call method_call = new create_table_with_constraints_call(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -8148,7 +8172,8 @@ public void create_table_with_constraints(Table tbl, List primary private List notNullConstraints; private List defaultConstraints; private List checkConstraints; - public create_table_with_constraints_call(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public create_table_with_constraints_call(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.tbl = tbl; this.primaryKeys = primaryKeys; @@ -8157,6 +8182,7 @@ public create_table_with_constraints_call(Table tbl, List primary this.notNullConstraints = notNullConstraints; this.defaultConstraints = defaultConstraints; this.checkConstraints = checkConstraints; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -8169,6 +8195,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setNotNullConstraints(notNullConstraints); args.setDefaultConstraints(defaultConstraints); args.setCheckConstraints(checkConstraints); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -9008,9 +9035,9 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void alter_table(String dbname, String tbl_name, Table new_tbl, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void alter_table(String dbname, String tbl_name, Table new_tbl, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - alter_table_call method_call = new alter_table_call(dbname, tbl_name, new_tbl, resultHandler, this, ___protocolFactory, ___transport); + alter_table_call method_call = new alter_table_call(dbname, tbl_name, new_tbl, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -9019,11 +9046,13 @@ public void alter_table(String dbname, String tbl_name, Table new_tbl, org.apach private String dbname; private String tbl_name; private Table new_tbl; - public alter_table_call(String dbname, String tbl_name, Table new_tbl, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public alter_table_call(String dbname, String tbl_name, Table new_tbl, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.dbname = dbname; this.tbl_name = tbl_name; this.new_tbl = new_tbl; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -9032,6 +9061,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDbname(dbname); args.setTbl_name(tbl_name); args.setNew_tbl(new_tbl); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -9046,9 +9076,9 @@ public void getResult() throws InvalidOperationException, MetaException, org.apa } } - public void alter_table_with_environment_context(String dbname, String tbl_name, Table new_tbl, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void alter_table_with_environment_context(String dbname, String tbl_name, Table new_tbl, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - alter_table_with_environment_context_call method_call = new alter_table_with_environment_context_call(dbname, tbl_name, new_tbl, environment_context, resultHandler, this, ___protocolFactory, ___transport); + alter_table_with_environment_context_call method_call = new alter_table_with_environment_context_call(dbname, tbl_name, new_tbl, environment_context, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -9058,12 +9088,14 @@ public void alter_table_with_environment_context(String dbname, String tbl_name, private String tbl_name; private Table new_tbl; private EnvironmentContext environment_context; - public alter_table_with_environment_context_call(String dbname, String tbl_name, Table new_tbl, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public alter_table_with_environment_context_call(String dbname, String tbl_name, Table new_tbl, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.dbname = dbname; this.tbl_name = tbl_name; this.new_tbl = new_tbl; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -9073,6 +9105,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setTbl_name(tbl_name); args.setNew_tbl(new_tbl); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -9087,9 +9120,9 @@ public void getResult() throws InvalidOperationException, MetaException, org.apa } } - public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - alter_table_with_cascade_call method_call = new alter_table_with_cascade_call(dbname, tbl_name, new_tbl, cascade, resultHandler, this, ___protocolFactory, ___transport); + alter_table_with_cascade_call method_call = new alter_table_with_cascade_call(dbname, tbl_name, new_tbl, cascade, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -9099,12 +9132,14 @@ public void alter_table_with_cascade(String dbname, String tbl_name, Table new_t private String tbl_name; private Table new_tbl; private boolean cascade; - public alter_table_with_cascade_call(String dbname, String tbl_name, Table new_tbl, boolean cascade, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public alter_table_with_cascade_call(String dbname, String tbl_name, Table new_tbl, boolean cascade, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.dbname = dbname; this.tbl_name = tbl_name; this.new_tbl = new_tbl; this.cascade = cascade; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -9114,6 +9149,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setTbl_name(tbl_name); args.setNew_tbl(new_tbl); args.setCascade(cascade); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -9160,24 +9196,27 @@ public AlterTableResponse getResult() throws InvalidOperationException, MetaExce } } - public void add_partition(Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void add_partition(Partition new_part, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - add_partition_call method_call = new add_partition_call(new_part, resultHandler, this, ___protocolFactory, ___transport); + add_partition_call method_call = new add_partition_call(new_part, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partition_call extends org.apache.thrift.async.TAsyncMethodCall { private Partition new_part; - public add_partition_call(Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public add_partition_call(Partition new_part, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.new_part = new_part; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("add_partition", org.apache.thrift.protocol.TMessageType.CALL, 0)); add_partition_args args = new add_partition_args(); args.setNew_part(new_part); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -9192,9 +9231,9 @@ public Partition getResult() throws InvalidObjectException, AlreadyExistsExcepti } } - public void add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - add_partition_with_environment_context_call method_call = new add_partition_with_environment_context_call(new_part, environment_context, resultHandler, this, ___protocolFactory, ___transport); + add_partition_with_environment_context_call method_call = new add_partition_with_environment_context_call(new_part, environment_context, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -9202,10 +9241,12 @@ public void add_partition_with_environment_context(Partition new_part, Environme @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partition_with_environment_context_call extends org.apache.thrift.async.TAsyncMethodCall { private Partition new_part; private EnvironmentContext environment_context; - public add_partition_with_environment_context_call(Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public add_partition_with_environment_context_call(Partition new_part, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.new_part = new_part; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -9213,6 +9254,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa add_partition_with_environment_context_args args = new add_partition_with_environment_context_args(); args.setNew_part(new_part); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -9227,24 +9269,27 @@ public Partition getResult() throws InvalidObjectException, AlreadyExistsExcepti } } - public void add_partitions(List new_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void add_partitions(List new_parts, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - add_partitions_call method_call = new add_partitions_call(new_parts, resultHandler, this, ___protocolFactory, ___transport); + add_partitions_call method_call = new add_partitions_call(new_parts, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions_call extends org.apache.thrift.async.TAsyncMethodCall { private List new_parts; - public add_partitions_call(List new_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public add_partitions_call(List new_parts, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.new_parts = new_parts; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("add_partitions", org.apache.thrift.protocol.TMessageType.CALL, 0)); add_partitions_args args = new add_partitions_args(); args.setNew_parts(new_parts); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -9259,24 +9304,27 @@ public int getResult() throws InvalidObjectException, AlreadyExistsException, Me } } - public void add_partitions_pspec(List new_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void add_partitions_pspec(List new_parts, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - add_partitions_pspec_call method_call = new add_partitions_pspec_call(new_parts, resultHandler, this, ___protocolFactory, ___transport); + add_partitions_pspec_call method_call = new add_partitions_pspec_call(new_parts, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions_pspec_call extends org.apache.thrift.async.TAsyncMethodCall { private List new_parts; - public add_partitions_pspec_call(List new_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public add_partitions_pspec_call(List new_parts, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.new_parts = new_parts; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("add_partitions_pspec", org.apache.thrift.protocol.TMessageType.CALL, 0)); add_partitions_pspec_args args = new add_partitions_pspec_args(); args.setNew_parts(new_parts); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -9291,9 +9339,9 @@ public int getResult() throws InvalidObjectException, AlreadyExistsException, Me } } - public void append_partition(String db_name, String tbl_name, List part_vals, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void append_partition(String db_name, String tbl_name, List part_vals, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - append_partition_call method_call = new append_partition_call(db_name, tbl_name, part_vals, resultHandler, this, ___protocolFactory, ___transport); + append_partition_call method_call = new append_partition_call(db_name, tbl_name, part_vals, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -9302,11 +9350,13 @@ public void append_partition(String db_name, String tbl_name, List part_ private String db_name; private String tbl_name; private List part_vals; - public append_partition_call(String db_name, String tbl_name, List part_vals, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public append_partition_call(String db_name, String tbl_name, List part_vals, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.part_vals = part_vals; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -9315,6 +9365,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_vals(part_vals); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -9361,9 +9412,9 @@ public AddPartitionsResult getResult() throws InvalidObjectException, AlreadyExi } } - public void append_partition_with_environment_context(String db_name, String tbl_name, List part_vals, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void append_partition_with_environment_context(String db_name, String tbl_name, List part_vals, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - append_partition_with_environment_context_call method_call = new append_partition_with_environment_context_call(db_name, tbl_name, part_vals, environment_context, resultHandler, this, ___protocolFactory, ___transport); + append_partition_with_environment_context_call method_call = new append_partition_with_environment_context_call(db_name, tbl_name, part_vals, environment_context, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -9373,12 +9424,14 @@ public void append_partition_with_environment_context(String db_name, String tbl private String tbl_name; private List part_vals; private EnvironmentContext environment_context; - public append_partition_with_environment_context_call(String db_name, String tbl_name, List part_vals, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public append_partition_with_environment_context_call(String db_name, String tbl_name, List part_vals, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.part_vals = part_vals; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -9388,6 +9441,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setTbl_name(tbl_name); args.setPart_vals(part_vals); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -9402,9 +9456,9 @@ public Partition getResult() throws InvalidObjectException, AlreadyExistsExcepti } } - public void append_partition_by_name(String db_name, String tbl_name, String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void append_partition_by_name(String db_name, String tbl_name, String part_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - append_partition_by_name_call method_call = new append_partition_by_name_call(db_name, tbl_name, part_name, resultHandler, this, ___protocolFactory, ___transport); + append_partition_by_name_call method_call = new append_partition_by_name_call(db_name, tbl_name, part_name, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -9413,11 +9467,13 @@ public void append_partition_by_name(String db_name, String tbl_name, String par private String db_name; private String tbl_name; private String part_name; - public append_partition_by_name_call(String db_name, String tbl_name, String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public append_partition_by_name_call(String db_name, String tbl_name, String part_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.part_name = part_name; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -9426,6 +9482,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setDb_name(db_name); args.setTbl_name(tbl_name); args.setPart_name(part_name); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -9440,9 +9497,9 @@ public Partition getResult() throws InvalidObjectException, AlreadyExistsExcepti } } - public void append_partition_by_name_with_environment_context(String db_name, String tbl_name, String part_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void append_partition_by_name_with_environment_context(String db_name, String tbl_name, String part_name, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - append_partition_by_name_with_environment_context_call method_call = new append_partition_by_name_with_environment_context_call(db_name, tbl_name, part_name, environment_context, resultHandler, this, ___protocolFactory, ___transport); + append_partition_by_name_with_environment_context_call method_call = new append_partition_by_name_with_environment_context_call(db_name, tbl_name, part_name, environment_context, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -9452,12 +9509,14 @@ public void append_partition_by_name_with_environment_context(String db_name, St private String tbl_name; private String part_name; private EnvironmentContext environment_context; - public append_partition_by_name_with_environment_context_call(String db_name, String tbl_name, String part_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public append_partition_by_name_with_environment_context_call(String db_name, String tbl_name, String part_name, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.db_name = db_name; this.tbl_name = tbl_name; this.part_name = part_name; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -9467,6 +9526,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setTbl_name(tbl_name); args.setPart_name(part_name); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -9721,9 +9781,9 @@ public Partition getResult() throws MetaException, NoSuchObjectException, org.ap } } - public void exchange_partition(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void exchange_partition(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - exchange_partition_call method_call = new exchange_partition_call(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, resultHandler, this, ___protocolFactory, ___transport); + exchange_partition_call method_call = new exchange_partition_call(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -9734,13 +9794,15 @@ public void exchange_partition(Map partitionSpecs, String source_ private String source_table_name; private String dest_db; private String dest_table_name; - public exchange_partition_call(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public exchange_partition_call(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.partitionSpecs = partitionSpecs; this.source_db = source_db; this.source_table_name = source_table_name; this.dest_db = dest_db; this.dest_table_name = dest_table_name; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -9751,6 +9813,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setSource_table_name(source_table_name); args.setDest_db(dest_db); args.setDest_table_name(dest_table_name); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -9765,9 +9828,9 @@ public Partition getResult() throws MetaException, NoSuchObjectException, Invali } } - public void exchange_partitions(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void exchange_partitions(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - exchange_partitions_call method_call = new exchange_partitions_call(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, resultHandler, this, ___protocolFactory, ___transport); + exchange_partitions_call method_call = new exchange_partitions_call(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -9778,13 +9841,15 @@ public void exchange_partitions(Map partitionSpecs, String source private String source_table_name; private String dest_db; private String dest_table_name; - public exchange_partitions_call(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public exchange_partitions_call(Map partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.partitionSpecs = partitionSpecs; this.source_db = source_db; this.source_table_name = source_table_name; this.dest_db = dest_db; this.dest_table_name = dest_table_name; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -9795,6 +9860,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setSource_table_name(source_table_name); args.setDest_db(dest_db); args.setDest_table_name(dest_table_name); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -11103,24 +11169,27 @@ public CheckConstraintsResponse getResult() throws MetaException, NoSuchObjectEx } } - public void update_table_column_statistics(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void update_table_column_statistics(ColumnStatistics stats_obj, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - update_table_column_statistics_call method_call = new update_table_column_statistics_call(stats_obj, resultHandler, this, ___protocolFactory, ___transport); + update_table_column_statistics_call method_call = new update_table_column_statistics_call(stats_obj, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_table_column_statistics_call extends org.apache.thrift.async.TAsyncMethodCall { private ColumnStatistics stats_obj; - public update_table_column_statistics_call(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public update_table_column_statistics_call(ColumnStatistics stats_obj, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.stats_obj = stats_obj; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("update_table_column_statistics", org.apache.thrift.protocol.TMessageType.CALL, 0)); update_table_column_statistics_args args = new update_table_column_statistics_args(); args.setStats_obj(stats_obj); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -11135,24 +11204,27 @@ public boolean getResult() throws NoSuchObjectException, InvalidObjectException, } } - public void update_partition_column_statistics(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void update_partition_column_statistics(ColumnStatistics stats_obj, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - update_partition_column_statistics_call method_call = new update_partition_column_statistics_call(stats_obj, resultHandler, this, ___protocolFactory, ___transport); + update_partition_column_statistics_call method_call = new update_partition_column_statistics_call(stats_obj, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_column_statistics_call extends org.apache.thrift.async.TAsyncMethodCall { private ColumnStatistics stats_obj; - public update_partition_column_statistics_call(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public update_partition_column_statistics_call(ColumnStatistics stats_obj, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.stats_obj = stats_obj; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("update_partition_column_statistics", org.apache.thrift.protocol.TMessageType.CALL, 0)); update_partition_column_statistics_args args = new update_partition_column_statistics_args(); args.setStats_obj(stats_obj); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -15845,7 +15917,7 @@ protected boolean isOneway() { public create_table_result getResult(I iface, create_table_args args) throws org.apache.thrift.TException { create_table_result result = new create_table_result(); try { - iface.create_table(args.tbl); + iface.create_table(args.tbl, args.validWriteIdList); } catch (AlreadyExistsException o1) { result.o1 = o1; } catch (InvalidObjectException o2) { @@ -15875,7 +15947,7 @@ protected boolean isOneway() { public create_table_with_environment_context_result getResult(I iface, create_table_with_environment_context_args args) throws org.apache.thrift.TException { create_table_with_environment_context_result result = new create_table_with_environment_context_result(); try { - iface.create_table_with_environment_context(args.tbl, args.environment_context); + iface.create_table_with_environment_context(args.tbl, args.environment_context, args.validWriteIdList); } catch (AlreadyExistsException o1) { result.o1 = o1; } catch (InvalidObjectException o2) { @@ -15905,7 +15977,7 @@ protected boolean isOneway() { public create_table_with_constraints_result getResult(I iface, create_table_with_constraints_args args) throws org.apache.thrift.TException { create_table_with_constraints_result result = new create_table_with_constraints_result(); try { - iface.create_table_with_constraints(args.tbl, args.primaryKeys, args.foreignKeys, args.uniqueConstraints, args.notNullConstraints, args.defaultConstraints, args.checkConstraints); + iface.create_table_with_constraints(args.tbl, args.primaryKeys, args.foreignKeys, args.uniqueConstraints, args.notNullConstraints, args.defaultConstraints, args.checkConstraints, args.validWriteIdList); } catch (AlreadyExistsException o1) { result.o1 = o1; } catch (InvalidObjectException o2) { @@ -16545,7 +16617,7 @@ protected boolean isOneway() { public alter_table_result getResult(I iface, alter_table_args args) throws org.apache.thrift.TException { alter_table_result result = new alter_table_result(); try { - iface.alter_table(args.dbname, args.tbl_name, args.new_tbl); + iface.alter_table(args.dbname, args.tbl_name, args.new_tbl, args.validWriteIdList); } catch (InvalidOperationException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -16571,7 +16643,7 @@ protected boolean isOneway() { public alter_table_with_environment_context_result getResult(I iface, alter_table_with_environment_context_args args) throws org.apache.thrift.TException { alter_table_with_environment_context_result result = new alter_table_with_environment_context_result(); try { - iface.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context); + iface.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context, args.validWriteIdList); } catch (InvalidOperationException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -16597,7 +16669,7 @@ protected boolean isOneway() { public alter_table_with_cascade_result getResult(I iface, alter_table_with_cascade_args args) throws org.apache.thrift.TException { alter_table_with_cascade_result result = new alter_table_with_cascade_result(); try { - iface.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade); + iface.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade, args.validWriteIdList); } catch (InvalidOperationException o1) { result.o1 = o1; } catch (MetaException o2) { @@ -16649,7 +16721,7 @@ protected boolean isOneway() { public add_partition_result getResult(I iface, add_partition_args args) throws org.apache.thrift.TException { add_partition_result result = new add_partition_result(); try { - result.success = iface.add_partition(args.new_part); + result.success = iface.add_partition(args.new_part, args.validWriteIdList); } catch (InvalidObjectException o1) { result.o1 = o1; } catch (AlreadyExistsException o2) { @@ -16677,7 +16749,7 @@ protected boolean isOneway() { public add_partition_with_environment_context_result getResult(I iface, add_partition_with_environment_context_args args) throws org.apache.thrift.TException { add_partition_with_environment_context_result result = new add_partition_with_environment_context_result(); try { - result.success = iface.add_partition_with_environment_context(args.new_part, args.environment_context); + result.success = iface.add_partition_with_environment_context(args.new_part, args.environment_context, args.validWriteIdList); } catch (InvalidObjectException o1) { result.o1 = o1; } catch (AlreadyExistsException o2) { @@ -16705,7 +16777,7 @@ protected boolean isOneway() { public add_partitions_result getResult(I iface, add_partitions_args args) throws org.apache.thrift.TException { add_partitions_result result = new add_partitions_result(); try { - result.success = iface.add_partitions(args.new_parts); + result.success = iface.add_partitions(args.new_parts, args.validWriteIdList); result.setSuccessIsSet(true); } catch (InvalidObjectException o1) { result.o1 = o1; @@ -16734,7 +16806,7 @@ protected boolean isOneway() { public add_partitions_pspec_result getResult(I iface, add_partitions_pspec_args args) throws org.apache.thrift.TException { add_partitions_pspec_result result = new add_partitions_pspec_result(); try { - result.success = iface.add_partitions_pspec(args.new_parts); + result.success = iface.add_partitions_pspec(args.new_parts, args.validWriteIdList); result.setSuccessIsSet(true); } catch (InvalidObjectException o1) { result.o1 = o1; @@ -16763,7 +16835,7 @@ protected boolean isOneway() { public append_partition_result getResult(I iface, append_partition_args args) throws org.apache.thrift.TException { append_partition_result result = new append_partition_result(); try { - result.success = iface.append_partition(args.db_name, args.tbl_name, args.part_vals); + result.success = iface.append_partition(args.db_name, args.tbl_name, args.part_vals, args.validWriteIdList); } catch (InvalidObjectException o1) { result.o1 = o1; } catch (AlreadyExistsException o2) { @@ -16819,7 +16891,7 @@ protected boolean isOneway() { public append_partition_with_environment_context_result getResult(I iface, append_partition_with_environment_context_args args) throws org.apache.thrift.TException { append_partition_with_environment_context_result result = new append_partition_with_environment_context_result(); try { - result.success = iface.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context); + result.success = iface.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context, args.validWriteIdList); } catch (InvalidObjectException o1) { result.o1 = o1; } catch (AlreadyExistsException o2) { @@ -16847,7 +16919,7 @@ protected boolean isOneway() { public append_partition_by_name_result getResult(I iface, append_partition_by_name_args args) throws org.apache.thrift.TException { append_partition_by_name_result result = new append_partition_by_name_result(); try { - result.success = iface.append_partition_by_name(args.db_name, args.tbl_name, args.part_name); + result.success = iface.append_partition_by_name(args.db_name, args.tbl_name, args.part_name, args.validWriteIdList); } catch (InvalidObjectException o1) { result.o1 = o1; } catch (AlreadyExistsException o2) { @@ -16875,7 +16947,7 @@ protected boolean isOneway() { public append_partition_by_name_with_environment_context_result getResult(I iface, append_partition_by_name_with_environment_context_args args) throws org.apache.thrift.TException { append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result(); try { - result.success = iface.append_partition_by_name_with_environment_context(args.db_name, args.tbl_name, args.part_name, args.environment_context); + result.success = iface.append_partition_by_name_with_environment_context(args.db_name, args.tbl_name, args.part_name, args.environment_context, args.validWriteIdList); } catch (InvalidObjectException o1) { result.o1 = o1; } catch (AlreadyExistsException o2) { @@ -17063,7 +17135,7 @@ protected boolean isOneway() { public exchange_partition_result getResult(I iface, exchange_partition_args args) throws org.apache.thrift.TException { exchange_partition_result result = new exchange_partition_result(); try { - result.success = iface.exchange_partition(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name); + result.success = iface.exchange_partition(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name, args.validWriteIdList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -17093,7 +17165,7 @@ protected boolean isOneway() { public exchange_partitions_result getResult(I iface, exchange_partitions_args args) throws org.apache.thrift.TException { exchange_partitions_result result = new exchange_partitions_result(); try { - result.success = iface.exchange_partitions(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name); + result.success = iface.exchange_partitions(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name, args.validWriteIdList); } catch (MetaException o1) { result.o1 = o1; } catch (NoSuchObjectException o2) { @@ -18044,7 +18116,7 @@ protected boolean isOneway() { public update_table_column_statistics_result getResult(I iface, update_table_column_statistics_args args) throws org.apache.thrift.TException { update_table_column_statistics_result result = new update_table_column_statistics_result(); try { - result.success = iface.update_table_column_statistics(args.stats_obj); + result.success = iface.update_table_column_statistics(args.stats_obj, args.validWriteIdList); result.setSuccessIsSet(true); } catch (NoSuchObjectException o1) { result.o1 = o1; @@ -18075,7 +18147,7 @@ protected boolean isOneway() { public update_partition_column_statistics_result getResult(I iface, update_partition_column_statistics_args args) throws org.apache.thrift.TException { update_partition_column_statistics_result result = new update_partition_column_statistics_result(); try { - result.success = iface.update_partition_column_statistics(args.stats_obj); + result.success = iface.update_partition_column_statistics(args.stats_obj, args.validWriteIdList); result.setSuccessIsSet(true); } catch (NoSuchObjectException o1) { result.o1 = o1; @@ -22619,7 +22691,7 @@ protected boolean isOneway() { } public void start(I iface, create_table_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.create_table(args.tbl,resultHandler); + iface.create_table(args.tbl, args.validWriteIdList,resultHandler); } } @@ -22690,7 +22762,7 @@ protected boolean isOneway() { } public void start(I iface, create_table_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.create_table_with_environment_context(args.tbl, args.environment_context,resultHandler); + iface.create_table_with_environment_context(args.tbl, args.environment_context, args.validWriteIdList,resultHandler); } } @@ -22761,7 +22833,7 @@ protected boolean isOneway() { } public void start(I iface, create_table_with_constraints_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.create_table_with_constraints(args.tbl, args.primaryKeys, args.foreignKeys, args.uniqueConstraints, args.notNullConstraints, args.defaultConstraints, args.checkConstraints,resultHandler); + iface.create_table_with_constraints(args.tbl, args.primaryKeys, args.foreignKeys, args.uniqueConstraints, args.notNullConstraints, args.defaultConstraints, args.checkConstraints, args.validWriteIdList,resultHandler); } } @@ -24268,7 +24340,7 @@ protected boolean isOneway() { } public void start(I iface, alter_table_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.alter_table(args.dbname, args.tbl_name, args.new_tbl,resultHandler); + iface.alter_table(args.dbname, args.tbl_name, args.new_tbl, args.validWriteIdList,resultHandler); } } @@ -24329,7 +24401,7 @@ protected boolean isOneway() { } public void start(I iface, alter_table_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context,resultHandler); + iface.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context, args.validWriteIdList,resultHandler); } } @@ -24390,7 +24462,7 @@ protected boolean isOneway() { } public void start(I iface, alter_table_with_cascade_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade,resultHandler); + iface.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade, args.validWriteIdList,resultHandler); } } @@ -24519,7 +24591,7 @@ protected boolean isOneway() { } public void start(I iface, add_partition_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.add_partition(args.new_part,resultHandler); + iface.add_partition(args.new_part, args.validWriteIdList,resultHandler); } } @@ -24586,7 +24658,7 @@ protected boolean isOneway() { } public void start(I iface, add_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.add_partition_with_environment_context(args.new_part, args.environment_context,resultHandler); + iface.add_partition_with_environment_context(args.new_part, args.environment_context, args.validWriteIdList,resultHandler); } } @@ -24654,7 +24726,7 @@ protected boolean isOneway() { } public void start(I iface, add_partitions_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.add_partitions(args.new_parts,resultHandler); + iface.add_partitions(args.new_parts, args.validWriteIdList,resultHandler); } } @@ -24722,7 +24794,7 @@ protected boolean isOneway() { } public void start(I iface, add_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.add_partitions_pspec(args.new_parts,resultHandler); + iface.add_partitions_pspec(args.new_parts, args.validWriteIdList,resultHandler); } } @@ -24789,7 +24861,7 @@ protected boolean isOneway() { } public void start(I iface, append_partition_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.append_partition(args.db_name, args.tbl_name, args.part_vals,resultHandler); + iface.append_partition(args.db_name, args.tbl_name, args.part_vals, args.validWriteIdList,resultHandler); } } @@ -24923,7 +24995,7 @@ protected boolean isOneway() { } public void start(I iface, append_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context,resultHandler); + iface.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context, args.validWriteIdList,resultHandler); } } @@ -24990,7 +25062,7 @@ protected boolean isOneway() { } public void start(I iface, append_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.append_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler); + iface.append_partition_by_name(args.db_name, args.tbl_name, args.part_name, args.validWriteIdList,resultHandler); } } @@ -25057,7 +25129,7 @@ protected boolean isOneway() { } public void start(I iface, append_partition_by_name_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.append_partition_by_name_with_environment_context(args.db_name, args.tbl_name, args.part_name, args.environment_context,resultHandler); + iface.append_partition_by_name_with_environment_context(args.db_name, args.tbl_name, args.part_name, args.environment_context, args.validWriteIdList,resultHandler); } } @@ -25505,7 +25577,7 @@ protected boolean isOneway() { } public void start(I iface, exchange_partition_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.exchange_partition(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name,resultHandler); + iface.exchange_partition(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name, args.validWriteIdList,resultHandler); } } @@ -25577,7 +25649,7 @@ protected boolean isOneway() { } public void start(I iface, exchange_partitions_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { - iface.exchange_partitions(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name,resultHandler); + iface.exchange_partitions(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name, args.validWriteIdList,resultHandler); } } @@ -27837,7 +27909,7 @@ protected boolean isOneway() { } public void start(I iface, update_table_column_statistics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.update_table_column_statistics(args.stats_obj,resultHandler); + iface.update_table_column_statistics(args.stats_obj, args.validWriteIdList,resultHandler); } } @@ -27910,7 +27982,7 @@ protected boolean isOneway() { } public void start(I iface, update_partition_column_statistics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.update_partition_column_statistics(args.stats_obj,resultHandler); + iface.update_partition_column_statistics(args.stats_obj, args.validWriteIdList,resultHandler); } } @@ -55338,6 +55410,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_table_args"); private static final org.apache.thrift.protocol.TField TBL_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55346,10 +55419,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi } private Table tbl; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TBL((short)1, "tbl"); + TBL((short)1, "tbl"), + VALID_WRITE_ID_LIST((short)2, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -55366,6 +55441,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // TBL return TBL; + case 2: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -55411,6 +55488,8 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.TBL, new org.apache.thrift.meta_data.FieldMetaData("tbl", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_table_args.class, metaDataMap); } @@ -55419,10 +55498,12 @@ public create_table_args() { } public create_table_args( - Table tbl) + Table tbl, + String validWriteIdList) { this(); this.tbl = tbl; + this.validWriteIdList = validWriteIdList; } /** @@ -55432,6 +55513,9 @@ public create_table_args(create_table_args other) { if (other.isSetTbl()) { this.tbl = new Table(other.tbl); } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public create_table_args deepCopy() { @@ -55441,6 +55525,7 @@ public create_table_args deepCopy() { @Override public void clear() { this.tbl = null; + this.validWriteIdList = null; } public Table getTbl() { @@ -55466,6 +55551,29 @@ public void setTblIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TBL: @@ -55476,6 +55584,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -55484,6 +55600,9 @@ public Object getFieldValue(_Fields field) { case TBL: return getTbl(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -55497,6 +55616,8 @@ public boolean isSet(_Fields field) { switch (field) { case TBL: return isSetTbl(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -55523,6 +55644,15 @@ public boolean equals(create_table_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -55535,6 +55665,11 @@ public int hashCode() { if (present_tbl) list.add(tbl); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -55556,6 +55691,16 @@ public int compareTo(create_table_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -55583,6 +55728,14 @@ public String toString() { sb.append(this.tbl); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -55638,6 +55791,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_args s org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -55656,6 +55817,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_args struct.tbl.write(oprot); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -55677,21 +55843,31 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_args s if (struct.isSetTbl()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetValidWriteIdList()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetTbl()) { struct.tbl.write(oprot); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, create_table_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.tbl = new Table(); struct.tbl.read(iprot); struct.setTblIsSet(true); } + if (incoming.get(1)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -56380,6 +56556,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_result private static final org.apache.thrift.protocol.TField TBL_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environment_context", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -56389,11 +56566,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_result private Table tbl; // required private EnvironmentContext environment_context; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { TBL((short)1, "tbl"), - ENVIRONMENT_CONTEXT((short)2, "environment_context"); + ENVIRONMENT_CONTEXT((short)2, "environment_context"), + VALID_WRITE_ID_LIST((short)3, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -56412,6 +56591,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL; case 2: // ENVIRONMENT_CONTEXT return ENVIRONMENT_CONTEXT; + case 3: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -56459,6 +56640,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class))); tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environment_context", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_table_with_environment_context_args.class, metaDataMap); } @@ -56468,11 +56651,13 @@ public create_table_with_environment_context_args() { public create_table_with_environment_context_args( Table tbl, - EnvironmentContext environment_context) + EnvironmentContext environment_context, + String validWriteIdList) { this(); this.tbl = tbl; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } /** @@ -56485,6 +56670,9 @@ public create_table_with_environment_context_args(create_table_with_environment_ if (other.isSetEnvironment_context()) { this.environment_context = new EnvironmentContext(other.environment_context); } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public create_table_with_environment_context_args deepCopy() { @@ -56495,6 +56683,7 @@ public create_table_with_environment_context_args deepCopy() { public void clear() { this.tbl = null; this.environment_context = null; + this.validWriteIdList = null; } public Table getTbl() { @@ -56543,6 +56732,29 @@ public void setEnvironment_contextIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TBL: @@ -56561,6 +56773,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -56572,6 +56792,9 @@ public Object getFieldValue(_Fields field) { case ENVIRONMENT_CONTEXT: return getEnvironment_context(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -56587,6 +56810,8 @@ public boolean isSet(_Fields field) { return isSetTbl(); case ENVIRONMENT_CONTEXT: return isSetEnvironment_context(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -56622,6 +56847,15 @@ public boolean equals(create_table_with_environment_context_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -56639,6 +56873,11 @@ public int hashCode() { if (present_environment_context) list.add(environment_context); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -56670,6 +56909,16 @@ public int compareTo(create_table_with_environment_context_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -56705,6 +56954,14 @@ public String toString() { sb.append(this.environment_context); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -56772,6 +57029,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_e org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -56795,6 +57060,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ struct.environment_context.write(oprot); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -56819,19 +57089,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_e if (struct.isSetEnvironment_context()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetValidWriteIdList()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetTbl()) { struct.tbl.write(oprot); } if (struct.isSetEnvironment_context()) { struct.environment_context.write(oprot); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.tbl = new Table(); struct.tbl.read(iprot); @@ -56842,6 +57118,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_en struct.environment_context.read(iprot); struct.setEnvironment_contextIsSet(true); } + if (incoming.get(2)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -57535,6 +57815,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_en private static final org.apache.thrift.protocol.TField NOT_NULL_CONSTRAINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("notNullConstraints", org.apache.thrift.protocol.TType.LIST, (short)5); private static final org.apache.thrift.protocol.TField DEFAULT_CONSTRAINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("defaultConstraints", org.apache.thrift.protocol.TType.LIST, (short)6); private static final org.apache.thrift.protocol.TField CHECK_CONSTRAINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("checkConstraints", org.apache.thrift.protocol.TType.LIST, (short)7); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)8); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -57549,6 +57830,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_en private List notNullConstraints; // required private List defaultConstraints; // required private List checkConstraints; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -57558,7 +57840,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_en UNIQUE_CONSTRAINTS((short)4, "uniqueConstraints"), NOT_NULL_CONSTRAINTS((short)5, "notNullConstraints"), DEFAULT_CONSTRAINTS((short)6, "defaultConstraints"), - CHECK_CONSTRAINTS((short)7, "checkConstraints"); + CHECK_CONSTRAINTS((short)7, "checkConstraints"), + VALID_WRITE_ID_LIST((short)8, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -57587,6 +57870,8 @@ public static _Fields findByThriftId(int fieldId) { return DEFAULT_CONSTRAINTS; case 7: // CHECK_CONSTRAINTS return CHECK_CONSTRAINTS; + case 8: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -57650,6 +57935,8 @@ public String getFieldName() { tmpMap.put(_Fields.CHECK_CONSTRAINTS, new org.apache.thrift.meta_data.FieldMetaData("checkConstraints", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLCheckConstraint.class)))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_table_with_constraints_args.class, metaDataMap); } @@ -57664,7 +57951,8 @@ public create_table_with_constraints_args( List uniqueConstraints, List notNullConstraints, List defaultConstraints, - List checkConstraints) + List checkConstraints, + String validWriteIdList) { this(); this.tbl = tbl; @@ -57674,6 +57962,7 @@ public create_table_with_constraints_args( this.notNullConstraints = notNullConstraints; this.defaultConstraints = defaultConstraints; this.checkConstraints = checkConstraints; + this.validWriteIdList = validWriteIdList; } /** @@ -57725,6 +58014,9 @@ public create_table_with_constraints_args(create_table_with_constraints_args oth } this.checkConstraints = __this__checkConstraints; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public create_table_with_constraints_args deepCopy() { @@ -57740,6 +58032,7 @@ public void clear() { this.notNullConstraints = null; this.defaultConstraints = null; this.checkConstraints = null; + this.validWriteIdList = null; } public Table getTbl() { @@ -57993,6 +58286,29 @@ public void setCheckConstraintsIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TBL: @@ -58051,6 +58367,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -58077,6 +58401,9 @@ public Object getFieldValue(_Fields field) { case CHECK_CONSTRAINTS: return getCheckConstraints(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -58102,6 +58429,8 @@ public boolean isSet(_Fields field) { return isSetDefaultConstraints(); case CHECK_CONSTRAINTS: return isSetCheckConstraints(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -58182,6 +58511,15 @@ public boolean equals(create_table_with_constraints_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -58224,6 +58562,11 @@ public int hashCode() { if (present_checkConstraints) list.add(checkConstraints); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -58305,6 +58648,16 @@ public int compareTo(create_table_with_constraints_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -58380,6 +58733,14 @@ public String toString() { sb.append(this.checkConstraints); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -58549,6 +58910,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 8: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -58639,6 +59008,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ } oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -58678,7 +59052,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c if (struct.isSetCheckConstraints()) { optionals.set(6); } - oprot.writeBitSet(optionals, 7); + if (struct.isSetValidWriteIdList()) { + optionals.set(7); + } + oprot.writeBitSet(optionals, 8); if (struct.isSetTbl()) { struct.tbl.write(oprot); } @@ -58736,12 +59113,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c } } } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_constraints_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(7); + BitSet incoming = iprot.readBitSet(8); if (incoming.get(0)) { struct.tbl = new Table(); struct.tbl.read(iprot); @@ -58831,6 +59211,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_co } struct.setCheckConstraintsIsSet(true); } + if (incoming.get(7)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -82620,6 +83004,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField NEW_TBL_FIELD_DESC = new org.apache.thrift.protocol.TField("new_tbl", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -82630,12 +83015,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f private String dbname; // required private String tbl_name; // required private Table new_tbl; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DBNAME((short)1, "dbname"), TBL_NAME((short)2, "tbl_name"), - NEW_TBL((short)3, "new_tbl"); + NEW_TBL((short)3, "new_tbl"), + VALID_WRITE_ID_LIST((short)4, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -82656,6 +83043,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // NEW_TBL return NEW_TBL; + case 4: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -82705,6 +83094,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.NEW_TBL, new org.apache.thrift.meta_data.FieldMetaData("new_tbl", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_table_args.class, metaDataMap); } @@ -82715,12 +83106,14 @@ public alter_table_args() { public alter_table_args( String dbname, String tbl_name, - Table new_tbl) + Table new_tbl, + String validWriteIdList) { this(); this.dbname = dbname; this.tbl_name = tbl_name; this.new_tbl = new_tbl; + this.validWriteIdList = validWriteIdList; } /** @@ -82736,6 +83129,9 @@ public alter_table_args(alter_table_args other) { if (other.isSetNew_tbl()) { this.new_tbl = new Table(other.new_tbl); } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public alter_table_args deepCopy() { @@ -82747,6 +83143,7 @@ public void clear() { this.dbname = null; this.tbl_name = null; this.new_tbl = null; + this.validWriteIdList = null; } public String getDbname() { @@ -82818,6 +83215,29 @@ public void setNew_tblIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DBNAME: @@ -82844,6 +83264,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -82858,6 +83286,9 @@ public Object getFieldValue(_Fields field) { case NEW_TBL: return getNew_tbl(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -82875,6 +83306,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case NEW_TBL: return isSetNew_tbl(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -82919,6 +83352,15 @@ public boolean equals(alter_table_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -82941,6 +83383,11 @@ public int hashCode() { if (present_new_tbl) list.add(new_tbl); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -82982,6 +83429,16 @@ public int compareTo(alter_table_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -83025,6 +83482,14 @@ public String toString() { sb.append(this.new_tbl); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -83096,6 +83561,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_args st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -83124,6 +83597,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_table_args s struct.new_tbl.write(oprot); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -83151,7 +83629,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_args st if (struct.isSetNew_tbl()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidWriteIdList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDbname()) { oprot.writeString(struct.dbname); } @@ -83161,12 +83642,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_args st if (struct.isSetNew_tbl()) { struct.new_tbl.write(oprot); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.dbname = iprot.readString(); struct.setDbnameIsSet(true); @@ -83180,6 +83664,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_args str struct.new_tbl.read(iprot); struct.setNew_tblIsSet(true); } + if (incoming.get(3)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -83658,6 +84146,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_result s private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField NEW_TBL_FIELD_DESC = new org.apache.thrift.protocol.TField("new_tbl", org.apache.thrift.protocol.TType.STRUCT, (short)3); private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environment_context", org.apache.thrift.protocol.TType.STRUCT, (short)4); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -83669,13 +84158,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_result s private String tbl_name; // required private Table new_tbl; // required private EnvironmentContext environment_context; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DBNAME((short)1, "dbname"), TBL_NAME((short)2, "tbl_name"), NEW_TBL((short)3, "new_tbl"), - ENVIRONMENT_CONTEXT((short)4, "environment_context"); + ENVIRONMENT_CONTEXT((short)4, "environment_context"), + VALID_WRITE_ID_LIST((short)5, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -83698,6 +84189,8 @@ public static _Fields findByThriftId(int fieldId) { return NEW_TBL; case 4: // ENVIRONMENT_CONTEXT return ENVIRONMENT_CONTEXT; + case 5: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -83749,6 +84242,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class))); tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environment_context", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_table_with_environment_context_args.class, metaDataMap); } @@ -83760,13 +84255,15 @@ public alter_table_with_environment_context_args( String dbname, String tbl_name, Table new_tbl, - EnvironmentContext environment_context) + EnvironmentContext environment_context, + String validWriteIdList) { this(); this.dbname = dbname; this.tbl_name = tbl_name; this.new_tbl = new_tbl; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } /** @@ -83785,6 +84282,9 @@ public alter_table_with_environment_context_args(alter_table_with_environment_co if (other.isSetEnvironment_context()) { this.environment_context = new EnvironmentContext(other.environment_context); } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public alter_table_with_environment_context_args deepCopy() { @@ -83797,6 +84297,7 @@ public void clear() { this.tbl_name = null; this.new_tbl = null; this.environment_context = null; + this.validWriteIdList = null; } public String getDbname() { @@ -83891,6 +84392,29 @@ public void setEnvironment_contextIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DBNAME: @@ -83925,6 +84449,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -83942,6 +84474,9 @@ public Object getFieldValue(_Fields field) { case ENVIRONMENT_CONTEXT: return getEnvironment_context(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -83961,6 +84496,8 @@ public boolean isSet(_Fields field) { return isSetNew_tbl(); case ENVIRONMENT_CONTEXT: return isSetEnvironment_context(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -84014,6 +84551,15 @@ public boolean equals(alter_table_with_environment_context_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -84041,6 +84587,11 @@ public int hashCode() { if (present_environment_context) list.add(environment_context); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -84092,6 +84643,16 @@ public int compareTo(alter_table_with_environment_context_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -84143,6 +84704,14 @@ public String toString() { sb.append(this.environment_context); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -84226,6 +84795,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_with_en org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -84259,6 +84836,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_table_with_e struct.environment_context.write(oprot); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -84289,7 +84871,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_with_en if (struct.isSetEnvironment_context()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetValidWriteIdList()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetDbname()) { oprot.writeString(struct.dbname); } @@ -84302,12 +84887,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_with_en if (struct.isSetEnvironment_context()) { struct.environment_context.write(oprot); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.dbname = iprot.readString(); struct.setDbnameIsSet(true); @@ -84326,6 +84914,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_with_env struct.environment_context.read(iprot); struct.setEnvironment_contextIsSet(true); } + if (incoming.get(4)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -84804,6 +85396,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_with_env private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField NEW_TBL_FIELD_DESC = new org.apache.thrift.protocol.TField("new_tbl", org.apache.thrift.protocol.TType.STRUCT, (short)3); private static final org.apache.thrift.protocol.TField CASCADE_FIELD_DESC = new org.apache.thrift.protocol.TField("cascade", org.apache.thrift.protocol.TType.BOOL, (short)4); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -84815,13 +85408,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_with_env private String tbl_name; // required private Table new_tbl; // required private boolean cascade; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DBNAME((short)1, "dbname"), TBL_NAME((short)2, "tbl_name"), NEW_TBL((short)3, "new_tbl"), - CASCADE((short)4, "cascade"); + CASCADE((short)4, "cascade"), + VALID_WRITE_ID_LIST((short)5, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -84844,6 +85439,8 @@ public static _Fields findByThriftId(int fieldId) { return NEW_TBL; case 4: // CASCADE return CASCADE; + case 5: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -84897,6 +85494,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class))); tmpMap.put(_Fields.CASCADE, new org.apache.thrift.meta_data.FieldMetaData("cascade", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_table_with_cascade_args.class, metaDataMap); } @@ -84908,7 +85507,8 @@ public alter_table_with_cascade_args( String dbname, String tbl_name, Table new_tbl, - boolean cascade) + boolean cascade, + String validWriteIdList) { this(); this.dbname = dbname; @@ -84916,6 +85516,7 @@ public alter_table_with_cascade_args( this.new_tbl = new_tbl; this.cascade = cascade; setCascadeIsSet(true); + this.validWriteIdList = validWriteIdList; } /** @@ -84933,6 +85534,9 @@ public alter_table_with_cascade_args(alter_table_with_cascade_args other) { this.new_tbl = new Table(other.new_tbl); } this.cascade = other.cascade; + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public alter_table_with_cascade_args deepCopy() { @@ -84946,6 +85550,7 @@ public void clear() { this.new_tbl = null; setCascadeIsSet(false); this.cascade = false; + this.validWriteIdList = null; } public String getDbname() { @@ -85039,6 +85644,29 @@ public void setCascadeIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CASCADE_ISSET_ID, value); } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DBNAME: @@ -85073,6 +85701,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -85090,6 +85726,9 @@ public Object getFieldValue(_Fields field) { case CASCADE: return isCascade(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -85109,6 +85748,8 @@ public boolean isSet(_Fields field) { return isSetNew_tbl(); case CASCADE: return isSetCascade(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -85162,6 +85803,15 @@ public boolean equals(alter_table_with_cascade_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -85189,6 +85839,11 @@ public int hashCode() { if (present_cascade) list.add(cascade); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -85240,6 +85895,16 @@ public int compareTo(alter_table_with_cascade_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -85287,6 +85952,14 @@ public String toString() { sb.append("cascade:"); sb.append(this.cascade); first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -85368,6 +86041,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_table_with_ca org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -85399,6 +86080,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_table_with_c oprot.writeFieldBegin(CASCADE_FIELD_DESC); oprot.writeBool(struct.cascade); oprot.writeFieldEnd(); + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -85429,7 +86115,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_with_ca if (struct.isSetCascade()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetValidWriteIdList()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetDbname()) { oprot.writeString(struct.dbname); } @@ -85442,12 +86131,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_table_with_ca if (struct.isSetCascade()) { oprot.writeBool(struct.cascade); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_with_cascade_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.dbname = iprot.readString(); struct.setDbnameIsSet(true); @@ -85465,6 +86157,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_with_cas struct.cascade = iprot.readBool(); struct.setCascadeIsSet(true); } + if (incoming.get(4)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -86878,6 +87574,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_req_resu private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_partition_args"); private static final org.apache.thrift.protocol.TField NEW_PART_FIELD_DESC = new org.apache.thrift.protocol.TField("new_part", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -86886,10 +87583,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_table_req_resu } private Partition new_part; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - NEW_PART((short)1, "new_part"); + NEW_PART((short)1, "new_part"), + VALID_WRITE_ID_LIST((short)2, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -86906,6 +87605,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // NEW_PART return NEW_PART; + case 2: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -86951,6 +87652,8 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.NEW_PART, new org.apache.thrift.meta_data.FieldMetaData("new_part", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_partition_args.class, metaDataMap); } @@ -86959,10 +87662,12 @@ public add_partition_args() { } public add_partition_args( - Partition new_part) + Partition new_part, + String validWriteIdList) { this(); this.new_part = new_part; + this.validWriteIdList = validWriteIdList; } /** @@ -86972,6 +87677,9 @@ public add_partition_args(add_partition_args other) { if (other.isSetNew_part()) { this.new_part = new Partition(other.new_part); } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public add_partition_args deepCopy() { @@ -86981,6 +87689,7 @@ public add_partition_args deepCopy() { @Override public void clear() { this.new_part = null; + this.validWriteIdList = null; } public Partition getNew_part() { @@ -87006,6 +87715,29 @@ public void setNew_partIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case NEW_PART: @@ -87016,6 +87748,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -87024,6 +87764,9 @@ public Object getFieldValue(_Fields field) { case NEW_PART: return getNew_part(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -87037,6 +87780,8 @@ public boolean isSet(_Fields field) { switch (field) { case NEW_PART: return isSetNew_part(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -87063,6 +87808,15 @@ public boolean equals(add_partition_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -87075,6 +87829,11 @@ public int hashCode() { if (present_new_part) list.add(new_part); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -87096,6 +87855,16 @@ public int compareTo(add_partition_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -87123,6 +87892,14 @@ public String toString() { sb.append(this.new_part); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -87178,6 +87955,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partition_args org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -87196,6 +87981,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partition_args struct.new_part.write(oprot); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -87217,21 +88007,31 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partition_args if (struct.isSetNew_part()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetValidWriteIdList()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetNew_part()) { struct.new_part.write(oprot); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, add_partition_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.new_part = new Partition(); struct.new_part.read(iprot); struct.setNew_partIsSet(true); } + if (incoming.get(1)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -87923,6 +88723,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partition_result private static final org.apache.thrift.protocol.TField NEW_PART_FIELD_DESC = new org.apache.thrift.protocol.TField("new_part", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environment_context", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -87932,11 +88733,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partition_result private Partition new_part; // required private EnvironmentContext environment_context; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { NEW_PART((short)1, "new_part"), - ENVIRONMENT_CONTEXT((short)2, "environment_context"); + ENVIRONMENT_CONTEXT((short)2, "environment_context"), + VALID_WRITE_ID_LIST((short)3, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -87955,6 +88758,8 @@ public static _Fields findByThriftId(int fieldId) { return NEW_PART; case 2: // ENVIRONMENT_CONTEXT return ENVIRONMENT_CONTEXT; + case 3: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -88002,6 +88807,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))); tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environment_context", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_partition_with_environment_context_args.class, metaDataMap); } @@ -88011,11 +88818,13 @@ public add_partition_with_environment_context_args() { public add_partition_with_environment_context_args( Partition new_part, - EnvironmentContext environment_context) + EnvironmentContext environment_context, + String validWriteIdList) { this(); this.new_part = new_part; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } /** @@ -88028,6 +88837,9 @@ public add_partition_with_environment_context_args(add_partition_with_environmen if (other.isSetEnvironment_context()) { this.environment_context = new EnvironmentContext(other.environment_context); } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public add_partition_with_environment_context_args deepCopy() { @@ -88038,6 +88850,7 @@ public add_partition_with_environment_context_args deepCopy() { public void clear() { this.new_part = null; this.environment_context = null; + this.validWriteIdList = null; } public Partition getNew_part() { @@ -88086,6 +88899,29 @@ public void setEnvironment_contextIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case NEW_PART: @@ -88104,6 +88940,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -88115,6 +88959,9 @@ public Object getFieldValue(_Fields field) { case ENVIRONMENT_CONTEXT: return getEnvironment_context(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -88130,6 +88977,8 @@ public boolean isSet(_Fields field) { return isSetNew_part(); case ENVIRONMENT_CONTEXT: return isSetEnvironment_context(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -88165,6 +89014,15 @@ public boolean equals(add_partition_with_environment_context_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -88182,6 +89040,11 @@ public int hashCode() { if (present_environment_context) list.add(environment_context); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -88213,6 +89076,16 @@ public int compareTo(add_partition_with_environment_context_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -88248,6 +89121,14 @@ public String toString() { sb.append(this.environment_context); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -88315,6 +89196,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partition_with_ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -88338,6 +89227,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partition_with struct.environment_context.write(oprot); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -88362,19 +89256,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partition_with_ if (struct.isSetEnvironment_context()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetValidWriteIdList()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetNew_part()) { struct.new_part.write(oprot); } if (struct.isSetEnvironment_context()) { struct.environment_context.write(oprot); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, add_partition_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.new_part = new Partition(); struct.new_part.read(iprot); @@ -88385,6 +89285,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partition_with_e struct.environment_context.read(iprot); struct.setEnvironment_contextIsSet(true); } + if (incoming.get(2)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -89075,6 +89979,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partition_with_e private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_partitions_args"); private static final org.apache.thrift.protocol.TField NEW_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("new_parts", org.apache.thrift.protocol.TType.LIST, (short)1); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -89083,10 +89988,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partition_with_e } private List new_parts; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - NEW_PARTS((short)1, "new_parts"); + NEW_PARTS((short)1, "new_parts"), + VALID_WRITE_ID_LIST((short)2, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -89103,6 +90010,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // NEW_PARTS return NEW_PARTS; + case 2: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -89149,6 +90058,8 @@ public String getFieldName() { tmpMap.put(_Fields.NEW_PARTS, new org.apache.thrift.meta_data.FieldMetaData("new_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class)))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_partitions_args.class, metaDataMap); } @@ -89157,10 +90068,12 @@ public add_partitions_args() { } public add_partitions_args( - List new_parts) + List new_parts, + String validWriteIdList) { this(); this.new_parts = new_parts; + this.validWriteIdList = validWriteIdList; } /** @@ -89174,6 +90087,9 @@ public add_partitions_args(add_partitions_args other) { } this.new_parts = __this__new_parts; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public add_partitions_args deepCopy() { @@ -89183,6 +90099,7 @@ public add_partitions_args deepCopy() { @Override public void clear() { this.new_parts = null; + this.validWriteIdList = null; } public int getNew_partsSize() { @@ -89223,6 +90140,29 @@ public void setNew_partsIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case NEW_PARTS: @@ -89233,6 +90173,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -89241,6 +90189,9 @@ public Object getFieldValue(_Fields field) { case NEW_PARTS: return getNew_parts(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -89254,6 +90205,8 @@ public boolean isSet(_Fields field) { switch (field) { case NEW_PARTS: return isSetNew_parts(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -89280,6 +90233,15 @@ public boolean equals(add_partitions_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -89292,6 +90254,11 @@ public int hashCode() { if (present_new_parts) list.add(new_parts); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -89313,6 +90280,16 @@ public int compareTo(add_partitions_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -89340,6 +90317,14 @@ public String toString() { sb.append(this.new_parts); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -89402,6 +90387,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -89427,6 +90420,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg } oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -89448,7 +90446,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetValidWriteIdList()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); @@ -89458,12 +90459,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args } } } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list1231 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); @@ -89478,6 +90482,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args } struct.setNew_partsIsSet(true); } + if (incoming.get(1)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -90163,6 +91171,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_resul private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_partitions_pspec_args"); private static final org.apache.thrift.protocol.TField NEW_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("new_parts", org.apache.thrift.protocol.TType.LIST, (short)1); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -90171,10 +91180,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_resul } private List new_parts; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - NEW_PARTS((short)1, "new_parts"); + NEW_PARTS((short)1, "new_parts"), + VALID_WRITE_ID_LIST((short)2, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -90191,6 +91202,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // NEW_PARTS return NEW_PARTS; + case 2: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -90237,6 +91250,8 @@ public String getFieldName() { tmpMap.put(_Fields.NEW_PARTS, new org.apache.thrift.meta_data.FieldMetaData("new_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionSpec.class)))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_partitions_pspec_args.class, metaDataMap); } @@ -90245,10 +91260,12 @@ public add_partitions_pspec_args() { } public add_partitions_pspec_args( - List new_parts) + List new_parts, + String validWriteIdList) { this(); this.new_parts = new_parts; + this.validWriteIdList = validWriteIdList; } /** @@ -90262,6 +91279,9 @@ public add_partitions_pspec_args(add_partitions_pspec_args other) { } this.new_parts = __this__new_parts; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public add_partitions_pspec_args deepCopy() { @@ -90271,6 +91291,7 @@ public add_partitions_pspec_args deepCopy() { @Override public void clear() { this.new_parts = null; + this.validWriteIdList = null; } public int getNew_partsSize() { @@ -90311,6 +91332,29 @@ public void setNew_partsIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case NEW_PARTS: @@ -90321,6 +91365,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -90329,6 +91381,9 @@ public Object getFieldValue(_Fields field) { case NEW_PARTS: return getNew_parts(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -90342,6 +91397,8 @@ public boolean isSet(_Fields field) { switch (field) { case NEW_PARTS: return isSetNew_parts(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -90368,6 +91425,15 @@ public boolean equals(add_partitions_pspec_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -90380,6 +91446,11 @@ public int hashCode() { if (present_new_parts) list.add(new_parts); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -90401,6 +91472,16 @@ public int compareTo(add_partitions_pspec_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -90428,6 +91509,14 @@ public String toString() { sb.append(this.new_parts); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -90490,6 +91579,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -90515,6 +91612,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp } oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -90536,7 +91638,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetValidWriteIdList()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); @@ -90546,12 +91651,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe } } } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list1239 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); @@ -90566,6 +91674,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec } struct.setNew_partsIsSet(true); } + if (incoming.get(1)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -91253,6 +92365,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("part_vals", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -91263,12 +92376,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec private String db_name; // required private String tbl_name; // required private List part_vals; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - PART_VALS((short)3, "part_vals"); + PART_VALS((short)3, "part_vals"), + VALID_WRITE_ID_LIST((short)4, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -91289,6 +92404,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // PART_VALS return PART_VALS; + case 4: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -91339,6 +92456,8 @@ public String getFieldName() { tmpMap.put(_Fields.PART_VALS, new org.apache.thrift.meta_data.FieldMetaData("part_vals", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(append_partition_args.class, metaDataMap); } @@ -91349,12 +92468,14 @@ public append_partition_args() { public append_partition_args( String db_name, String tbl_name, - List part_vals) + List part_vals, + String validWriteIdList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.part_vals = part_vals; + this.validWriteIdList = validWriteIdList; } /** @@ -91371,6 +92492,9 @@ public append_partition_args(append_partition_args other) { List __this__part_vals = new ArrayList(other.part_vals); this.part_vals = __this__part_vals; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public append_partition_args deepCopy() { @@ -91382,6 +92506,7 @@ public void clear() { this.db_name = null; this.tbl_name = null; this.part_vals = null; + this.validWriteIdList = null; } public String getDb_name() { @@ -91468,6 +92593,29 @@ public void setPart_valsIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -91494,6 +92642,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -91508,6 +92664,9 @@ public Object getFieldValue(_Fields field) { case PART_VALS: return getPart_vals(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -91525,6 +92684,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case PART_VALS: return isSetPart_vals(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -91569,6 +92730,15 @@ public boolean equals(append_partition_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -91591,6 +92761,11 @@ public int hashCode() { if (present_part_vals) list.add(part_vals); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -91632,6 +92807,16 @@ public int compareTo(append_partition_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -91675,6 +92860,14 @@ public String toString() { sb.append(this.part_vals); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -91752,6 +92945,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -91787,6 +92988,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a } oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -91814,7 +93020,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidWriteIdList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -91830,12 +93039,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar } } } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -91857,6 +93069,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } struct.setPart_valsIsSet(true); } + if (incoming.get(3)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -93594,6 +94810,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_req_r private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("part_vals", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environment_context", org.apache.thrift.protocol.TType.STRUCT, (short)4); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -93605,13 +94822,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_req_r private String tbl_name; // required private List part_vals; // required private EnvironmentContext environment_context; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), PART_VALS((short)3, "part_vals"), - ENVIRONMENT_CONTEXT((short)4, "environment_context"); + ENVIRONMENT_CONTEXT((short)4, "environment_context"), + VALID_WRITE_ID_LIST((short)5, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -93634,6 +94853,8 @@ public static _Fields findByThriftId(int fieldId) { return PART_VALS; case 4: // ENVIRONMENT_CONTEXT return ENVIRONMENT_CONTEXT; + case 5: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -93686,6 +94907,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environment_context", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(append_partition_with_environment_context_args.class, metaDataMap); } @@ -93697,13 +94920,15 @@ public append_partition_with_environment_context_args( String db_name, String tbl_name, List part_vals, - EnvironmentContext environment_context) + EnvironmentContext environment_context, + String validWriteIdList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.part_vals = part_vals; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } /** @@ -93723,6 +94948,9 @@ public append_partition_with_environment_context_args(append_partition_with_envi if (other.isSetEnvironment_context()) { this.environment_context = new EnvironmentContext(other.environment_context); } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public append_partition_with_environment_context_args deepCopy() { @@ -93735,6 +94963,7 @@ public void clear() { this.tbl_name = null; this.part_vals = null; this.environment_context = null; + this.validWriteIdList = null; } public String getDb_name() { @@ -93844,6 +95073,29 @@ public void setEnvironment_contextIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -93878,6 +95130,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -93895,6 +95155,9 @@ public Object getFieldValue(_Fields field) { case ENVIRONMENT_CONTEXT: return getEnvironment_context(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -93914,6 +95177,8 @@ public boolean isSet(_Fields field) { return isSetPart_vals(); case ENVIRONMENT_CONTEXT: return isSetEnvironment_context(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -93967,6 +95232,15 @@ public boolean equals(append_partition_with_environment_context_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -93994,6 +95268,11 @@ public int hashCode() { if (present_environment_context) list.add(environment_context); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -94045,6 +95324,16 @@ public int compareTo(append_partition_with_environment_context_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -94096,6 +95385,14 @@ public String toString() { sb.append(this.environment_context); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -94185,6 +95482,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -94225,6 +95530,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w struct.environment_context.write(oprot); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -94255,7 +95565,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetEnvironment_context()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetValidWriteIdList()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -94274,12 +95587,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetEnvironment_context()) { struct.environment_context.write(oprot); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -94306,6 +95622,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit struct.environment_context.read(iprot); struct.setEnvironment_contextIsSet(true); } + if (incoming.get(4)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -94998,6 +96318,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField PART_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("part_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -95008,12 +96329,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit private String db_name; // required private String tbl_name; // required private String part_name; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - PART_NAME((short)3, "part_name"); + PART_NAME((short)3, "part_name"), + VALID_WRITE_ID_LIST((short)4, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -95034,6 +96357,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // PART_NAME return PART_NAME; + case 4: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -95083,6 +96408,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.PART_NAME, new org.apache.thrift.meta_data.FieldMetaData("part_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(append_partition_by_name_args.class, metaDataMap); } @@ -95093,12 +96420,14 @@ public append_partition_by_name_args() { public append_partition_by_name_args( String db_name, String tbl_name, - String part_name) + String part_name, + String validWriteIdList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.part_name = part_name; + this.validWriteIdList = validWriteIdList; } /** @@ -95114,6 +96443,9 @@ public append_partition_by_name_args(append_partition_by_name_args other) { if (other.isSetPart_name()) { this.part_name = other.part_name; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public append_partition_by_name_args deepCopy() { @@ -95125,6 +96457,7 @@ public void clear() { this.db_name = null; this.tbl_name = null; this.part_name = null; + this.validWriteIdList = null; } public String getDb_name() { @@ -95196,6 +96529,29 @@ public void setPart_nameIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -95222,6 +96578,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -95236,6 +96600,9 @@ public Object getFieldValue(_Fields field) { case PART_NAME: return getPart_name(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -95253,6 +96620,8 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case PART_NAME: return isSetPart_name(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -95297,6 +96666,15 @@ public boolean equals(append_partition_by_name_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -95319,6 +96697,11 @@ public int hashCode() { if (present_part_name) list.add(part_name); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -95360,6 +96743,16 @@ public int compareTo(append_partition_by_name_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -95403,6 +96796,14 @@ public String toString() { sb.append(this.part_name); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -95470,6 +96871,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_by org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -95498,6 +96907,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_b oprot.writeString(struct.part_name); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -95525,7 +96939,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_by if (struct.isSetPart_name()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetValidWriteIdList()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -95535,12 +96952,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_by if (struct.isSetPart_name()) { oprot.writeString(struct.part_name); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_by_name_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -95553,6 +96973,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_by_ struct.part_name = iprot.readString(); struct.setPart_nameIsSet(true); } + if (incoming.get(3)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -96246,6 +97670,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_by_ private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField PART_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("part_name", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environment_context", org.apache.thrift.protocol.TType.STRUCT, (short)4); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -96257,13 +97682,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_by_ private String tbl_name; // required private String part_name; // required private EnvironmentContext environment_context; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), PART_NAME((short)3, "part_name"), - ENVIRONMENT_CONTEXT((short)4, "environment_context"); + ENVIRONMENT_CONTEXT((short)4, "environment_context"), + VALID_WRITE_ID_LIST((short)5, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -96286,6 +97713,8 @@ public static _Fields findByThriftId(int fieldId) { return PART_NAME; case 4: // ENVIRONMENT_CONTEXT return ENVIRONMENT_CONTEXT; + case 5: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -96337,6 +97766,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environment_context", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(append_partition_by_name_with_environment_context_args.class, metaDataMap); } @@ -96348,13 +97779,15 @@ public append_partition_by_name_with_environment_context_args( String db_name, String tbl_name, String part_name, - EnvironmentContext environment_context) + EnvironmentContext environment_context, + String validWriteIdList) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.part_name = part_name; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } /** @@ -96373,6 +97806,9 @@ public append_partition_by_name_with_environment_context_args(append_partition_b if (other.isSetEnvironment_context()) { this.environment_context = new EnvironmentContext(other.environment_context); } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public append_partition_by_name_with_environment_context_args deepCopy() { @@ -96385,6 +97821,7 @@ public void clear() { this.tbl_name = null; this.part_name = null; this.environment_context = null; + this.validWriteIdList = null; } public String getDb_name() { @@ -96479,6 +97916,29 @@ public void setEnvironment_contextIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -96513,6 +97973,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -96530,6 +97998,9 @@ public Object getFieldValue(_Fields field) { case ENVIRONMENT_CONTEXT: return getEnvironment_context(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -96549,6 +98020,8 @@ public boolean isSet(_Fields field) { return isSetPart_name(); case ENVIRONMENT_CONTEXT: return isSetEnvironment_context(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -96602,6 +98075,15 @@ public boolean equals(append_partition_by_name_with_environment_context_args tha return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -96629,6 +98111,11 @@ public int hashCode() { if (present_environment_context) list.add(environment_context); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -96680,6 +98167,16 @@ public int compareTo(append_partition_by_name_with_environment_context_args othe return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -96731,6 +98228,14 @@ public String toString() { sb.append(this.environment_context); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -96810,6 +98315,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_by org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -96843,6 +98356,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_b struct.environment_context.write(oprot); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -96873,7 +98391,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_by if (struct.isSetEnvironment_context()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetValidWriteIdList()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -96886,12 +98407,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_by if (struct.isSetEnvironment_context()) { struct.environment_context.write(oprot); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_by_name_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -96909,6 +98433,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_by_ struct.environment_context.read(iprot); struct.setEnvironment_contextIsSet(true); } + if (incoming.get(4)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -104999,6 +106527,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_result private static final org.apache.thrift.protocol.TField SOURCE_TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("source_table_name", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField DEST_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("dest_db", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField DEST_TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dest_table_name", org.apache.thrift.protocol.TType.STRING, (short)5); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -105011,6 +106540,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_result private String source_table_name; // required private String dest_db; // required private String dest_table_name; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -105018,7 +106548,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_result SOURCE_DB((short)2, "source_db"), SOURCE_TABLE_NAME((short)3, "source_table_name"), DEST_DB((short)4, "dest_db"), - DEST_TABLE_NAME((short)5, "dest_table_name"); + DEST_TABLE_NAME((short)5, "dest_table_name"), + VALID_WRITE_ID_LIST((short)6, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -105043,6 +106574,8 @@ public static _Fields findByThriftId(int fieldId) { return DEST_DB; case 5: // DEST_TABLE_NAME return DEST_TABLE_NAME; + case 6: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -105098,6 +106631,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.DEST_TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("dest_table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(exchange_partition_args.class, metaDataMap); } @@ -105110,7 +106645,8 @@ public exchange_partition_args( String source_db, String source_table_name, String dest_db, - String dest_table_name) + String dest_table_name, + String validWriteIdList) { this(); this.partitionSpecs = partitionSpecs; @@ -105118,6 +106654,7 @@ public exchange_partition_args( this.source_table_name = source_table_name; this.dest_db = dest_db; this.dest_table_name = dest_table_name; + this.validWriteIdList = validWriteIdList; } /** @@ -105140,6 +106677,9 @@ public exchange_partition_args(exchange_partition_args other) { if (other.isSetDest_table_name()) { this.dest_table_name = other.dest_table_name; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public exchange_partition_args deepCopy() { @@ -105153,6 +106693,7 @@ public void clear() { this.source_table_name = null; this.dest_db = null; this.dest_table_name = null; + this.validWriteIdList = null; } public int getPartitionSpecsSize() { @@ -105281,6 +106822,29 @@ public void setDest_table_nameIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case PARTITION_SPECS: @@ -105323,6 +106887,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -105343,6 +106915,9 @@ public Object getFieldValue(_Fields field) { case DEST_TABLE_NAME: return getDest_table_name(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -105364,6 +106939,8 @@ public boolean isSet(_Fields field) { return isSetDest_db(); case DEST_TABLE_NAME: return isSetDest_table_name(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -105426,6 +107003,15 @@ public boolean equals(exchange_partition_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -105458,6 +107044,11 @@ public int hashCode() { if (present_dest_table_name) list.add(dest_table_name); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -105519,6 +107110,16 @@ public int compareTo(exchange_partition_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -105578,6 +107179,14 @@ public String toString() { sb.append(this.dest_table_name); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -105673,6 +107282,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -105719,6 +107336,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeString(struct.dest_table_name); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -105752,7 +107374,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetDest_table_name()) { optionals.set(4); } - oprot.writeBitSet(optionals, 5); + if (struct.isSetValidWriteIdList()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); @@ -105775,12 +107400,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetDest_table_name()) { oprot.writeString(struct.dest_table_name); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { { org.apache.thrift.protocol.TMap _map1288 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); @@ -105812,6 +107440,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a struct.dest_table_name = iprot.readString(); struct.setDest_table_nameIsSet(true); } + if (incoming.get(5)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -106612,6 +108244,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_r private static final org.apache.thrift.protocol.TField SOURCE_TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("source_table_name", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField DEST_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("dest_db", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField DEST_TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dest_table_name", org.apache.thrift.protocol.TType.STRING, (short)5); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -106624,6 +108257,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_r private String source_table_name; // required private String dest_db; // required private String dest_table_name; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -106631,7 +108265,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_r SOURCE_DB((short)2, "source_db"), SOURCE_TABLE_NAME((short)3, "source_table_name"), DEST_DB((short)4, "dest_db"), - DEST_TABLE_NAME((short)5, "dest_table_name"); + DEST_TABLE_NAME((short)5, "dest_table_name"), + VALID_WRITE_ID_LIST((short)6, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -106656,6 +108291,8 @@ public static _Fields findByThriftId(int fieldId) { return DEST_DB; case 5: // DEST_TABLE_NAME return DEST_TABLE_NAME; + case 6: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -106711,6 +108348,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.DEST_TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("dest_table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(exchange_partitions_args.class, metaDataMap); } @@ -106723,7 +108362,8 @@ public exchange_partitions_args( String source_db, String source_table_name, String dest_db, - String dest_table_name) + String dest_table_name, + String validWriteIdList) { this(); this.partitionSpecs = partitionSpecs; @@ -106731,6 +108371,7 @@ public exchange_partitions_args( this.source_table_name = source_table_name; this.dest_db = dest_db; this.dest_table_name = dest_table_name; + this.validWriteIdList = validWriteIdList; } /** @@ -106753,6 +108394,9 @@ public exchange_partitions_args(exchange_partitions_args other) { if (other.isSetDest_table_name()) { this.dest_table_name = other.dest_table_name; } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public exchange_partitions_args deepCopy() { @@ -106766,6 +108410,7 @@ public void clear() { this.source_table_name = null; this.dest_db = null; this.dest_table_name = null; + this.validWriteIdList = null; } public int getPartitionSpecsSize() { @@ -106894,6 +108539,29 @@ public void setDest_table_nameIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case PARTITION_SPECS: @@ -106936,6 +108604,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -106956,6 +108632,9 @@ public Object getFieldValue(_Fields field) { case DEST_TABLE_NAME: return getDest_table_name(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -106977,6 +108656,8 @@ public boolean isSet(_Fields field) { return isSetDest_db(); case DEST_TABLE_NAME: return isSetDest_table_name(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -107039,6 +108720,15 @@ public boolean equals(exchange_partitions_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -107071,6 +108761,11 @@ public int hashCode() { if (present_dest_table_name) list.add(dest_table_name); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -107132,6 +108827,16 @@ public int compareTo(exchange_partitions_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -107191,6 +108896,14 @@ public String toString() { sb.append(this.dest_table_name); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -107286,6 +108999,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -107332,6 +109053,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeString(struct.dest_table_name); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -107365,7 +109091,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetDest_table_name()) { optionals.set(4); } - oprot.writeBitSet(optionals, 5); + if (struct.isSetValidWriteIdList()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); @@ -107388,12 +109117,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetDest_table_name()) { oprot.writeString(struct.dest_table_name); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { { org.apache.thrift.protocol.TMap _map1298 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); @@ -107425,6 +109157,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ struct.dest_table_name = iprot.readString(); struct.setDest_table_nameIsSet(true); } + if (incoming.get(5)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -148082,6 +149818,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_check_constraint private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_table_column_statistics_args"); private static final org.apache.thrift.protocol.TField STATS_OBJ_FIELD_DESC = new org.apache.thrift.protocol.TField("stats_obj", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -148090,10 +149827,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_check_constraint } private ColumnStatistics stats_obj; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATS_OBJ((short)1, "stats_obj"); + STATS_OBJ((short)1, "stats_obj"), + VALID_WRITE_ID_LIST((short)2, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -148110,6 +149849,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // STATS_OBJ return STATS_OBJ; + case 2: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -148155,6 +149896,8 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.STATS_OBJ, new org.apache.thrift.meta_data.FieldMetaData("stats_obj", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatistics.class))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_table_column_statistics_args.class, metaDataMap); } @@ -148163,10 +149906,12 @@ public update_table_column_statistics_args() { } public update_table_column_statistics_args( - ColumnStatistics stats_obj) + ColumnStatistics stats_obj, + String validWriteIdList) { this(); this.stats_obj = stats_obj; + this.validWriteIdList = validWriteIdList; } /** @@ -148176,6 +149921,9 @@ public update_table_column_statistics_args(update_table_column_statistics_args o if (other.isSetStats_obj()) { this.stats_obj = new ColumnStatistics(other.stats_obj); } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public update_table_column_statistics_args deepCopy() { @@ -148185,6 +149933,7 @@ public update_table_column_statistics_args deepCopy() { @Override public void clear() { this.stats_obj = null; + this.validWriteIdList = null; } public ColumnStatistics getStats_obj() { @@ -148210,6 +149959,29 @@ public void setStats_objIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case STATS_OBJ: @@ -148220,6 +149992,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -148228,6 +150008,9 @@ public Object getFieldValue(_Fields field) { case STATS_OBJ: return getStats_obj(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -148241,6 +150024,8 @@ public boolean isSet(_Fields field) { switch (field) { case STATS_OBJ: return isSetStats_obj(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -148267,6 +150052,15 @@ public boolean equals(update_table_column_statistics_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -148279,6 +150073,11 @@ public int hashCode() { if (present_stats_obj) list.add(stats_obj); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -148300,6 +150099,16 @@ public int compareTo(update_table_column_statistics_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -148327,6 +150136,14 @@ public String toString() { sb.append(this.stats_obj); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -148382,6 +150199,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_table_column org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -148400,6 +150225,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, update_table_colum struct.stats_obj.write(oprot); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -148421,21 +150251,31 @@ public void write(org.apache.thrift.protocol.TProtocol prot, update_table_column if (struct.isSetStats_obj()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetValidWriteIdList()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetStats_obj()) { struct.stats_obj.write(oprot); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, update_table_column_statistics_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.stats_obj = new ColumnStatistics(); struct.stats_obj.read(iprot); struct.setStats_objIsSet(true); } + if (incoming.get(1)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } @@ -149227,6 +151067,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, update_table_column_ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_partition_column_statistics_args"); private static final org.apache.thrift.protocol.TField STATS_OBJ_FIELD_DESC = new org.apache.thrift.protocol.TField("stats_obj", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -149235,10 +151076,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, update_table_column_ } private ColumnStatistics stats_obj; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATS_OBJ((short)1, "stats_obj"); + STATS_OBJ((short)1, "stats_obj"), + VALID_WRITE_ID_LIST((short)2, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -149255,6 +151098,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // STATS_OBJ return STATS_OBJ; + case 2: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -149300,6 +151145,8 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.STATS_OBJ, new org.apache.thrift.meta_data.FieldMetaData("stats_obj", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatistics.class))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_partition_column_statistics_args.class, metaDataMap); } @@ -149308,10 +151155,12 @@ public update_partition_column_statistics_args() { } public update_partition_column_statistics_args( - ColumnStatistics stats_obj) + ColumnStatistics stats_obj, + String validWriteIdList) { this(); this.stats_obj = stats_obj; + this.validWriteIdList = validWriteIdList; } /** @@ -149321,6 +151170,9 @@ public update_partition_column_statistics_args(update_partition_column_statistic if (other.isSetStats_obj()) { this.stats_obj = new ColumnStatistics(other.stats_obj); } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public update_partition_column_statistics_args deepCopy() { @@ -149330,6 +151182,7 @@ public update_partition_column_statistics_args deepCopy() { @Override public void clear() { this.stats_obj = null; + this.validWriteIdList = null; } public ColumnStatistics getStats_obj() { @@ -149355,6 +151208,29 @@ public void setStats_objIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case STATS_OBJ: @@ -149365,6 +151241,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -149373,6 +151257,9 @@ public Object getFieldValue(_Fields field) { case STATS_OBJ: return getStats_obj(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -149386,6 +151273,8 @@ public boolean isSet(_Fields field) { switch (field) { case STATS_OBJ: return isSetStats_obj(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -149412,6 +151301,15 @@ public boolean equals(update_partition_column_statistics_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -149424,6 +151322,11 @@ public int hashCode() { if (present_stats_obj) list.add(stats_obj); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -149445,6 +151348,16 @@ public int compareTo(update_partition_column_statistics_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -149472,6 +151385,14 @@ public String toString() { sb.append(this.stats_obj); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -149527,6 +151448,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_partition_co org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -149545,6 +151474,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, update_partition_c struct.stats_obj.write(oprot); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -149566,21 +151500,31 @@ public void write(org.apache.thrift.protocol.TProtocol prot, update_partition_co if (struct.isSetStats_obj()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetValidWriteIdList()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetStats_obj()) { struct.stats_obj.write(oprot); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, update_partition_column_statistics_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.stats_obj = new ColumnStatistics(); struct.stats_obj.read(iprot); struct.setStats_objIsSet(true); } + if (incoming.get(1)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index f41a02b..03e74d3 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -174,21 +174,23 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { public function get_schema_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context); /** * @param \metastore\Table $tbl + * @param string $validWriteIdList * @throws \metastore\AlreadyExistsException * @throws \metastore\InvalidObjectException * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function create_table(\metastore\Table $tbl); + public function create_table(\metastore\Table $tbl, $validWriteIdList); /** * @param \metastore\Table $tbl * @param \metastore\EnvironmentContext $environment_context + * @param string $validWriteIdList * @throws \metastore\AlreadyExistsException * @throws \metastore\InvalidObjectException * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function create_table_with_environment_context(\metastore\Table $tbl, \metastore\EnvironmentContext $environment_context); + public function create_table_with_environment_context(\metastore\Table $tbl, \metastore\EnvironmentContext $environment_context, $validWriteIdList); /** * @param \metastore\Table $tbl * @param \metastore\SQLPrimaryKey[] $primaryKeys @@ -197,12 +199,13 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @param \metastore\SQLNotNullConstraint[] $notNullConstraints * @param \metastore\SQLDefaultConstraint[] $defaultConstraints * @param \metastore\SQLCheckConstraint[] $checkConstraints + * @param string $validWriteIdList * @throws \metastore\AlreadyExistsException * @throws \metastore\InvalidObjectException * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function create_table_with_constraints(\metastore\Table $tbl, array $primaryKeys, array $foreignKeys, array $uniqueConstraints, array $notNullConstraints, array $defaultConstraints, array $checkConstraints); + public function create_table_with_constraints(\metastore\Table $tbl, array $primaryKeys, array $foreignKeys, array $uniqueConstraints, array $notNullConstraints, array $defaultConstraints, array $checkConstraints, $validWriteIdList); /** * @param \metastore\DropConstraintRequest $req * @throws \metastore\NoSuchObjectException @@ -377,28 +380,31 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @param string $dbname * @param string $tbl_name * @param \metastore\Table $new_tbl + * @param string $validWriteIdList * @throws \metastore\InvalidOperationException * @throws \metastore\MetaException */ - public function alter_table($dbname, $tbl_name, \metastore\Table $new_tbl); + public function alter_table($dbname, $tbl_name, \metastore\Table $new_tbl, $validWriteIdList); /** * @param string $dbname * @param string $tbl_name * @param \metastore\Table $new_tbl * @param \metastore\EnvironmentContext $environment_context + * @param string $validWriteIdList * @throws \metastore\InvalidOperationException * @throws \metastore\MetaException */ - public function alter_table_with_environment_context($dbname, $tbl_name, \metastore\Table $new_tbl, \metastore\EnvironmentContext $environment_context); + public function alter_table_with_environment_context($dbname, $tbl_name, \metastore\Table $new_tbl, \metastore\EnvironmentContext $environment_context, $validWriteIdList); /** * @param string $dbname * @param string $tbl_name * @param \metastore\Table $new_tbl * @param bool $cascade + * @param string $validWriteIdList * @throws \metastore\InvalidOperationException * @throws \metastore\MetaException */ - public function alter_table_with_cascade($dbname, $tbl_name, \metastore\Table $new_tbl, $cascade); + public function alter_table_with_cascade($dbname, $tbl_name, \metastore\Table $new_tbl, $cascade, $validWriteIdList); /** * @param \metastore\AlterTableRequest $req * @return \metastore\AlterTableResponse @@ -408,47 +414,52 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { public function alter_table_req(\metastore\AlterTableRequest $req); /** * @param \metastore\Partition $new_part + * @param string $validWriteIdList * @return \metastore\Partition * @throws \metastore\InvalidObjectException * @throws \metastore\AlreadyExistsException * @throws \metastore\MetaException */ - public function add_partition(\metastore\Partition $new_part); + public function add_partition(\metastore\Partition $new_part, $validWriteIdList); /** * @param \metastore\Partition $new_part * @param \metastore\EnvironmentContext $environment_context + * @param string $validWriteIdList * @return \metastore\Partition * @throws \metastore\InvalidObjectException * @throws \metastore\AlreadyExistsException * @throws \metastore\MetaException */ - public function add_partition_with_environment_context(\metastore\Partition $new_part, \metastore\EnvironmentContext $environment_context); + public function add_partition_with_environment_context(\metastore\Partition $new_part, \metastore\EnvironmentContext $environment_context, $validWriteIdList); /** * @param \metastore\Partition[] $new_parts + * @param string $validWriteIdList * @return int * @throws \metastore\InvalidObjectException * @throws \metastore\AlreadyExistsException * @throws \metastore\MetaException */ - public function add_partitions(array $new_parts); + public function add_partitions(array $new_parts, $validWriteIdList); /** * @param \metastore\PartitionSpec[] $new_parts + * @param string $validWriteIdList * @return int * @throws \metastore\InvalidObjectException * @throws \metastore\AlreadyExistsException * @throws \metastore\MetaException */ - public function add_partitions_pspec(array $new_parts); + public function add_partitions_pspec(array $new_parts, $validWriteIdList); /** * @param string $db_name * @param string $tbl_name * @param string[] $part_vals + * @param string $validWriteIdList * @return \metastore\Partition * @throws \metastore\InvalidObjectException * @throws \metastore\AlreadyExistsException * @throws \metastore\MetaException */ - public function append_partition($db_name, $tbl_name, array $part_vals); + public function append_partition($db_name, $tbl_name, array $part_vals, $validWriteIdList); /** * @param \metastore\AddPartitionsRequest $request * @return \metastore\AddPartitionsResult @@ -462,33 +473,36 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @param string $tbl_name * @param string[] $part_vals * @param \metastore\EnvironmentContext $environment_context + * @param string $validWriteIdList * @return \metastore\Partition * @throws \metastore\InvalidObjectException * @throws \metastore\AlreadyExistsException * @throws \metastore\MetaException */ - public function append_partition_with_environment_context($db_name, $tbl_name, array $part_vals, \metastore\EnvironmentContext $environment_context); + public function append_partition_with_environment_context($db_name, $tbl_name, array $part_vals, \metastore\EnvironmentContext $environment_context, $validWriteIdList); /** * @param string $db_name * @param string $tbl_name * @param string $part_name + * @param string $validWriteIdList * @return \metastore\Partition * @throws \metastore\InvalidObjectException * @throws \metastore\AlreadyExistsException * @throws \metastore\MetaException */ - public function append_partition_by_name($db_name, $tbl_name, $part_name); + public function append_partition_by_name($db_name, $tbl_name, $part_name, $validWriteIdList); /** * @param string $db_name * @param string $tbl_name * @param string $part_name * @param \metastore\EnvironmentContext $environment_context + * @param string $validWriteIdList * @return \metastore\Partition * @throws \metastore\InvalidObjectException * @throws \metastore\AlreadyExistsException * @throws \metastore\MetaException */ - public function append_partition_by_name_with_environment_context($db_name, $tbl_name, $part_name, \metastore\EnvironmentContext $environment_context); + public function append_partition_by_name_with_environment_context($db_name, $tbl_name, $part_name, \metastore\EnvironmentContext $environment_context, $validWriteIdList); /** * @param string $db_name * @param string $tbl_name @@ -553,26 +567,28 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @param string $source_table_name * @param string $dest_db * @param string $dest_table_name + * @param string $validWriteIdList * @return \metastore\Partition * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException * @throws \metastore\InvalidObjectException * @throws \metastore\InvalidInputException */ - public function exchange_partition(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name); + public function exchange_partition(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name, $validWriteIdList); /** * @param array $partitionSpecs * @param string $source_db * @param string $source_table_name * @param string $dest_db * @param string $dest_table_name + * @param string $validWriteIdList * @return \metastore\Partition[] * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException * @throws \metastore\InvalidObjectException * @throws \metastore\InvalidInputException */ - public function exchange_partitions(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name); + public function exchange_partitions(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name, $validWriteIdList); /** * @param string $db_name * @param string $tbl_name @@ -876,22 +892,24 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { public function get_check_constraints(\metastore\CheckConstraintsRequest $request); /** * @param \metastore\ColumnStatistics $stats_obj + * @param string $validWriteIdList * @return bool * @throws \metastore\NoSuchObjectException * @throws \metastore\InvalidObjectException * @throws \metastore\MetaException * @throws \metastore\InvalidInputException */ - public function update_table_column_statistics(\metastore\ColumnStatistics $stats_obj); + public function update_table_column_statistics(\metastore\ColumnStatistics $stats_obj, $validWriteIdList); /** * @param \metastore\ColumnStatistics $stats_obj + * @param string $validWriteIdList * @return bool * @throws \metastore\NoSuchObjectException * @throws \metastore\InvalidObjectException * @throws \metastore\MetaException * @throws \metastore\InvalidInputException */ - public function update_partition_column_statistics(\metastore\ColumnStatistics $stats_obj); + public function update_partition_column_statistics(\metastore\ColumnStatistics $stats_obj, $validWriteIdList); /** * @param \metastore\SetPartitionsStatsRequest $req * @return \metastore\SetPartitionsStatsResponse @@ -2865,16 +2883,17 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_schema_with_environment_context failed: unknown result"); } - public function create_table(\metastore\Table $tbl) + public function create_table(\metastore\Table $tbl, $validWriteIdList) { - $this->send_create_table($tbl); + $this->send_create_table($tbl, $validWriteIdList); $this->recv_create_table(); } - public function send_create_table(\metastore\Table $tbl) + public function send_create_table(\metastore\Table $tbl, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_create_table_args(); $args->tbl = $tbl; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -2925,17 +2944,18 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas return; } - public function create_table_with_environment_context(\metastore\Table $tbl, \metastore\EnvironmentContext $environment_context) + public function create_table_with_environment_context(\metastore\Table $tbl, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { - $this->send_create_table_with_environment_context($tbl, $environment_context); + $this->send_create_table_with_environment_context($tbl, $environment_context, $validWriteIdList); $this->recv_create_table_with_environment_context(); } - public function send_create_table_with_environment_context(\metastore\Table $tbl, \metastore\EnvironmentContext $environment_context) + public function send_create_table_with_environment_context(\metastore\Table $tbl, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_create_table_with_environment_context_args(); $args->tbl = $tbl; $args->environment_context = $environment_context; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -2986,13 +3006,13 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas return; } - public function create_table_with_constraints(\metastore\Table $tbl, array $primaryKeys, array $foreignKeys, array $uniqueConstraints, array $notNullConstraints, array $defaultConstraints, array $checkConstraints) + public function create_table_with_constraints(\metastore\Table $tbl, array $primaryKeys, array $foreignKeys, array $uniqueConstraints, array $notNullConstraints, array $defaultConstraints, array $checkConstraints, $validWriteIdList) { - $this->send_create_table_with_constraints($tbl, $primaryKeys, $foreignKeys, $uniqueConstraints, $notNullConstraints, $defaultConstraints, $checkConstraints); + $this->send_create_table_with_constraints($tbl, $primaryKeys, $foreignKeys, $uniqueConstraints, $notNullConstraints, $defaultConstraints, $checkConstraints, $validWriteIdList); $this->recv_create_table_with_constraints(); } - public function send_create_table_with_constraints(\metastore\Table $tbl, array $primaryKeys, array $foreignKeys, array $uniqueConstraints, array $notNullConstraints, array $defaultConstraints, array $checkConstraints) + public function send_create_table_with_constraints(\metastore\Table $tbl, array $primaryKeys, array $foreignKeys, array $uniqueConstraints, array $notNullConstraints, array $defaultConstraints, array $checkConstraints, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_create_table_with_constraints_args(); $args->tbl = $tbl; @@ -3002,6 +3022,7 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas $args->notNullConstraints = $notNullConstraints; $args->defaultConstraints = $defaultConstraints; $args->checkConstraints = $checkConstraints; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -4388,18 +4409,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_table_names_by_filter failed: unknown result"); } - public function alter_table($dbname, $tbl_name, \metastore\Table $new_tbl) + public function alter_table($dbname, $tbl_name, \metastore\Table $new_tbl, $validWriteIdList) { - $this->send_alter_table($dbname, $tbl_name, $new_tbl); + $this->send_alter_table($dbname, $tbl_name, $new_tbl, $validWriteIdList); $this->recv_alter_table(); } - public function send_alter_table($dbname, $tbl_name, \metastore\Table $new_tbl) + public function send_alter_table($dbname, $tbl_name, \metastore\Table $new_tbl, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_alter_table_args(); $args->dbname = $dbname; $args->tbl_name = $tbl_name; $args->new_tbl = $new_tbl; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -4444,19 +4466,20 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas return; } - public function alter_table_with_environment_context($dbname, $tbl_name, \metastore\Table $new_tbl, \metastore\EnvironmentContext $environment_context) + public function alter_table_with_environment_context($dbname, $tbl_name, \metastore\Table $new_tbl, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { - $this->send_alter_table_with_environment_context($dbname, $tbl_name, $new_tbl, $environment_context); + $this->send_alter_table_with_environment_context($dbname, $tbl_name, $new_tbl, $environment_context, $validWriteIdList); $this->recv_alter_table_with_environment_context(); } - public function send_alter_table_with_environment_context($dbname, $tbl_name, \metastore\Table $new_tbl, \metastore\EnvironmentContext $environment_context) + public function send_alter_table_with_environment_context($dbname, $tbl_name, \metastore\Table $new_tbl, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_alter_table_with_environment_context_args(); $args->dbname = $dbname; $args->tbl_name = $tbl_name; $args->new_tbl = $new_tbl; $args->environment_context = $environment_context; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -4501,19 +4524,20 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas return; } - public function alter_table_with_cascade($dbname, $tbl_name, \metastore\Table $new_tbl, $cascade) + public function alter_table_with_cascade($dbname, $tbl_name, \metastore\Table $new_tbl, $cascade, $validWriteIdList) { - $this->send_alter_table_with_cascade($dbname, $tbl_name, $new_tbl, $cascade); + $this->send_alter_table_with_cascade($dbname, $tbl_name, $new_tbl, $cascade, $validWriteIdList); $this->recv_alter_table_with_cascade(); } - public function send_alter_table_with_cascade($dbname, $tbl_name, \metastore\Table $new_tbl, $cascade) + public function send_alter_table_with_cascade($dbname, $tbl_name, \metastore\Table $new_tbl, $cascade, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_alter_table_with_cascade_args(); $args->dbname = $dbname; $args->tbl_name = $tbl_name; $args->new_tbl = $new_tbl; $args->cascade = $cascade; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -4615,16 +4639,17 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("alter_table_req failed: unknown result"); } - public function add_partition(\metastore\Partition $new_part) + public function add_partition(\metastore\Partition $new_part, $validWriteIdList) { - $this->send_add_partition($new_part); + $this->send_add_partition($new_part, $validWriteIdList); return $this->recv_add_partition(); } - public function send_add_partition(\metastore\Partition $new_part) + public function send_add_partition(\metastore\Partition $new_part, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_add_partition_args(); $args->new_part = $new_part; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -4675,17 +4700,18 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("add_partition failed: unknown result"); } - public function add_partition_with_environment_context(\metastore\Partition $new_part, \metastore\EnvironmentContext $environment_context) + public function add_partition_with_environment_context(\metastore\Partition $new_part, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { - $this->send_add_partition_with_environment_context($new_part, $environment_context); + $this->send_add_partition_with_environment_context($new_part, $environment_context, $validWriteIdList); return $this->recv_add_partition_with_environment_context(); } - public function send_add_partition_with_environment_context(\metastore\Partition $new_part, \metastore\EnvironmentContext $environment_context) + public function send_add_partition_with_environment_context(\metastore\Partition $new_part, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_add_partition_with_environment_context_args(); $args->new_part = $new_part; $args->environment_context = $environment_context; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -4736,16 +4762,17 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("add_partition_with_environment_context failed: unknown result"); } - public function add_partitions(array $new_parts) + public function add_partitions(array $new_parts, $validWriteIdList) { - $this->send_add_partitions($new_parts); + $this->send_add_partitions($new_parts, $validWriteIdList); return $this->recv_add_partitions(); } - public function send_add_partitions(array $new_parts) + public function send_add_partitions(array $new_parts, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_add_partitions_args(); $args->new_parts = $new_parts; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -4796,16 +4823,17 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("add_partitions failed: unknown result"); } - public function add_partitions_pspec(array $new_parts) + public function add_partitions_pspec(array $new_parts, $validWriteIdList) { - $this->send_add_partitions_pspec($new_parts); + $this->send_add_partitions_pspec($new_parts, $validWriteIdList); return $this->recv_add_partitions_pspec(); } - public function send_add_partitions_pspec(array $new_parts) + public function send_add_partitions_pspec(array $new_parts, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_add_partitions_pspec_args(); $args->new_parts = $new_parts; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -4856,18 +4884,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("add_partitions_pspec failed: unknown result"); } - public function append_partition($db_name, $tbl_name, array $part_vals) + public function append_partition($db_name, $tbl_name, array $part_vals, $validWriteIdList) { - $this->send_append_partition($db_name, $tbl_name, $part_vals); + $this->send_append_partition($db_name, $tbl_name, $part_vals, $validWriteIdList); return $this->recv_append_partition(); } - public function send_append_partition($db_name, $tbl_name, array $part_vals) + public function send_append_partition($db_name, $tbl_name, array $part_vals, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_append_partition_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->part_vals = $part_vals; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -4978,19 +5007,20 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("add_partitions_req failed: unknown result"); } - public function append_partition_with_environment_context($db_name, $tbl_name, array $part_vals, \metastore\EnvironmentContext $environment_context) + public function append_partition_with_environment_context($db_name, $tbl_name, array $part_vals, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { - $this->send_append_partition_with_environment_context($db_name, $tbl_name, $part_vals, $environment_context); + $this->send_append_partition_with_environment_context($db_name, $tbl_name, $part_vals, $environment_context, $validWriteIdList); return $this->recv_append_partition_with_environment_context(); } - public function send_append_partition_with_environment_context($db_name, $tbl_name, array $part_vals, \metastore\EnvironmentContext $environment_context) + public function send_append_partition_with_environment_context($db_name, $tbl_name, array $part_vals, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_append_partition_with_environment_context_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->part_vals = $part_vals; $args->environment_context = $environment_context; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -5041,18 +5071,19 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("append_partition_with_environment_context failed: unknown result"); } - public function append_partition_by_name($db_name, $tbl_name, $part_name) + public function append_partition_by_name($db_name, $tbl_name, $part_name, $validWriteIdList) { - $this->send_append_partition_by_name($db_name, $tbl_name, $part_name); + $this->send_append_partition_by_name($db_name, $tbl_name, $part_name, $validWriteIdList); return $this->recv_append_partition_by_name(); } - public function send_append_partition_by_name($db_name, $tbl_name, $part_name) + public function send_append_partition_by_name($db_name, $tbl_name, $part_name, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_append_partition_by_name_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->part_name = $part_name; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -5103,19 +5134,20 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("append_partition_by_name failed: unknown result"); } - public function append_partition_by_name_with_environment_context($db_name, $tbl_name, $part_name, \metastore\EnvironmentContext $environment_context) + public function append_partition_by_name_with_environment_context($db_name, $tbl_name, $part_name, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { - $this->send_append_partition_by_name_with_environment_context($db_name, $tbl_name, $part_name, $environment_context); + $this->send_append_partition_by_name_with_environment_context($db_name, $tbl_name, $part_name, $environment_context, $validWriteIdList); return $this->recv_append_partition_by_name_with_environment_context(); } - public function send_append_partition_by_name_with_environment_context($db_name, $tbl_name, $part_name, \metastore\EnvironmentContext $environment_context) + public function send_append_partition_by_name_with_environment_context($db_name, $tbl_name, $part_name, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_append_partition_by_name_with_environment_context_args(); $args->db_name = $db_name; $args->tbl_name = $tbl_name; $args->part_name = $part_name; $args->environment_context = $environment_context; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -5524,13 +5556,13 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partition failed: unknown result"); } - public function exchange_partition(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name) + public function exchange_partition(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name, $validWriteIdList) { - $this->send_exchange_partition($partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name); + $this->send_exchange_partition($partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name, $validWriteIdList); return $this->recv_exchange_partition(); } - public function send_exchange_partition(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name) + public function send_exchange_partition(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_exchange_partition_args(); $args->partitionSpecs = $partitionSpecs; @@ -5538,6 +5570,7 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas $args->source_table_name = $source_table_name; $args->dest_db = $dest_db; $args->dest_table_name = $dest_table_name; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -5591,13 +5624,13 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("exchange_partition failed: unknown result"); } - public function exchange_partitions(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name) + public function exchange_partitions(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name, $validWriteIdList) { - $this->send_exchange_partitions($partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name); + $this->send_exchange_partitions($partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name, $validWriteIdList); return $this->recv_exchange_partitions(); } - public function send_exchange_partitions(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name) + public function send_exchange_partitions(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_exchange_partitions_args(); $args->partitionSpecs = $partitionSpecs; @@ -5605,6 +5638,7 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas $args->source_table_name = $source_table_name; $args->dest_db = $dest_db; $args->dest_table_name = $dest_table_name; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -7705,16 +7739,17 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_check_constraints failed: unknown result"); } - public function update_table_column_statistics(\metastore\ColumnStatistics $stats_obj) + public function update_table_column_statistics(\metastore\ColumnStatistics $stats_obj, $validWriteIdList) { - $this->send_update_table_column_statistics($stats_obj); + $this->send_update_table_column_statistics($stats_obj, $validWriteIdList); return $this->recv_update_table_column_statistics(); } - public function send_update_table_column_statistics(\metastore\ColumnStatistics $stats_obj) + public function send_update_table_column_statistics(\metastore\ColumnStatistics $stats_obj, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_update_table_column_statistics_args(); $args->stats_obj = $stats_obj; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -7768,16 +7803,17 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("update_table_column_statistics failed: unknown result"); } - public function update_partition_column_statistics(\metastore\ColumnStatistics $stats_obj) + public function update_partition_column_statistics(\metastore\ColumnStatistics $stats_obj, $validWriteIdList) { - $this->send_update_partition_column_statistics($stats_obj); + $this->send_update_partition_column_statistics($stats_obj, $validWriteIdList); return $this->recv_update_partition_column_statistics(); } - public function send_update_partition_column_statistics(\metastore\ColumnStatistics $stats_obj) + public function send_update_partition_column_statistics(\metastore\ColumnStatistics $stats_obj, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_update_partition_column_statistics_args(); $args->stats_obj = $stats_obj; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -18924,6 +18960,10 @@ class ThriftHiveMetastore_create_table_args { * @var \metastore\Table */ public $tbl = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -18933,12 +18973,19 @@ class ThriftHiveMetastore_create_table_args { 'type' => TType::STRUCT, 'class' => '\metastore\Table', ), + 2 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { if (isset($vals['tbl'])) { $this->tbl = $vals['tbl']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -18969,6 +19016,13 @@ class ThriftHiveMetastore_create_table_args { $xfer += $input->skip($ftype); } break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -18990,6 +19044,11 @@ class ThriftHiveMetastore_create_table_args { $xfer += $this->tbl->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 2); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -19160,6 +19219,10 @@ class ThriftHiveMetastore_create_table_with_environment_context_args { * @var \metastore\EnvironmentContext */ public $environment_context = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -19174,6 +19237,10 @@ class ThriftHiveMetastore_create_table_with_environment_context_args { 'type' => TType::STRUCT, 'class' => '\metastore\EnvironmentContext', ), + 3 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -19183,6 +19250,9 @@ class ThriftHiveMetastore_create_table_with_environment_context_args { if (isset($vals['environment_context'])) { $this->environment_context = $vals['environment_context']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -19221,6 +19291,13 @@ class ThriftHiveMetastore_create_table_with_environment_context_args { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -19250,6 +19327,11 @@ class ThriftHiveMetastore_create_table_with_environment_context_args { $xfer += $this->environment_context->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 3); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -19440,6 +19522,10 @@ class ThriftHiveMetastore_create_table_with_constraints_args { * @var \metastore\SQLCheckConstraint[] */ public $checkConstraints = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -19503,6 +19589,10 @@ class ThriftHiveMetastore_create_table_with_constraints_args { 'class' => '\metastore\SQLCheckConstraint', ), ), + 8 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -19527,6 +19617,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { if (isset($vals['checkConstraints'])) { $this->checkConstraints = $vals['checkConstraints']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -19665,6 +19758,13 @@ class ThriftHiveMetastore_create_table_with_constraints_args { $xfer += $input->skip($ftype); } break; + case 8: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -19788,6 +19888,11 @@ class ThriftHiveMetastore_create_table_with_constraints_args { } $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 8); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -25176,6 +25281,10 @@ class ThriftHiveMetastore_alter_table_args { * @var \metastore\Table */ public $new_tbl = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -25193,6 +25302,10 @@ class ThriftHiveMetastore_alter_table_args { 'type' => TType::STRUCT, 'class' => '\metastore\Table', ), + 4 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -25205,6 +25318,9 @@ class ThriftHiveMetastore_alter_table_args { if (isset($vals['new_tbl'])) { $this->new_tbl = $vals['new_tbl']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -25249,6 +25365,13 @@ class ThriftHiveMetastore_alter_table_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -25280,6 +25403,11 @@ class ThriftHiveMetastore_alter_table_args { $xfer += $this->new_tbl->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -25408,6 +25536,10 @@ class ThriftHiveMetastore_alter_table_with_environment_context_args { * @var \metastore\EnvironmentContext */ public $environment_context = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -25430,6 +25562,10 @@ class ThriftHiveMetastore_alter_table_with_environment_context_args { 'type' => TType::STRUCT, 'class' => '\metastore\EnvironmentContext', ), + 5 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -25445,6 +25581,9 @@ class ThriftHiveMetastore_alter_table_with_environment_context_args { if (isset($vals['environment_context'])) { $this->environment_context = $vals['environment_context']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -25497,6 +25636,13 @@ class ThriftHiveMetastore_alter_table_with_environment_context_args { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -25536,6 +25682,11 @@ class ThriftHiveMetastore_alter_table_with_environment_context_args { $xfer += $this->environment_context->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 5); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -25664,6 +25815,10 @@ class ThriftHiveMetastore_alter_table_with_cascade_args { * @var bool */ public $cascade = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -25685,6 +25840,10 @@ class ThriftHiveMetastore_alter_table_with_cascade_args { 'var' => 'cascade', 'type' => TType::BOOL, ), + 5 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -25700,6 +25859,9 @@ class ThriftHiveMetastore_alter_table_with_cascade_args { if (isset($vals['cascade'])) { $this->cascade = $vals['cascade']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -25751,6 +25913,13 @@ class ThriftHiveMetastore_alter_table_with_cascade_args { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -25787,6 +25956,11 @@ class ThriftHiveMetastore_alter_table_with_cascade_args { $xfer += $output->writeBool($this->cascade); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 5); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -26113,6 +26287,10 @@ class ThriftHiveMetastore_add_partition_args { * @var \metastore\Partition */ public $new_part = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -26122,12 +26300,19 @@ class ThriftHiveMetastore_add_partition_args { 'type' => TType::STRUCT, 'class' => '\metastore\Partition', ), + 2 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { if (isset($vals['new_part'])) { $this->new_part = $vals['new_part']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -26158,6 +26343,13 @@ class ThriftHiveMetastore_add_partition_args { $xfer += $input->skip($ftype); } break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -26179,6 +26371,11 @@ class ThriftHiveMetastore_add_partition_args { $xfer += $this->new_part->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 2); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -26352,6 +26549,10 @@ class ThriftHiveMetastore_add_partition_with_environment_context_args { * @var \metastore\EnvironmentContext */ public $environment_context = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -26366,6 +26567,10 @@ class ThriftHiveMetastore_add_partition_with_environment_context_args { 'type' => TType::STRUCT, 'class' => '\metastore\EnvironmentContext', ), + 3 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -26375,6 +26580,9 @@ class ThriftHiveMetastore_add_partition_with_environment_context_args { if (isset($vals['environment_context'])) { $this->environment_context = $vals['environment_context']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -26413,6 +26621,13 @@ class ThriftHiveMetastore_add_partition_with_environment_context_args { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -26442,6 +26657,11 @@ class ThriftHiveMetastore_add_partition_with_environment_context_args { $xfer += $this->environment_context->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 3); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -26611,6 +26831,10 @@ class ThriftHiveMetastore_add_partitions_args { * @var \metastore\Partition[] */ public $new_parts = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -26624,12 +26848,19 @@ class ThriftHiveMetastore_add_partitions_args { 'class' => '\metastore\Partition', ), ), + 2 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { if (isset($vals['new_parts'])) { $this->new_parts = $vals['new_parts']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -26670,6 +26901,13 @@ class ThriftHiveMetastore_add_partitions_args { $xfer += $input->skip($ftype); } break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -26700,6 +26938,11 @@ class ThriftHiveMetastore_add_partitions_args { } $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 2); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -26864,6 +27107,10 @@ class ThriftHiveMetastore_add_partitions_pspec_args { * @var \metastore\PartitionSpec[] */ public $new_parts = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -26877,12 +27124,19 @@ class ThriftHiveMetastore_add_partitions_pspec_args { 'class' => '\metastore\PartitionSpec', ), ), + 2 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { if (isset($vals['new_parts'])) { $this->new_parts = $vals['new_parts']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -26923,6 +27177,13 @@ class ThriftHiveMetastore_add_partitions_pspec_args { $xfer += $input->skip($ftype); } break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -26953,6 +27214,11 @@ class ThriftHiveMetastore_add_partitions_pspec_args { } $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 2); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -27125,6 +27391,10 @@ class ThriftHiveMetastore_append_partition_args { * @var string[] */ public $part_vals = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -27145,6 +27415,10 @@ class ThriftHiveMetastore_append_partition_args { 'type' => TType::STRING, ), ), + 4 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -27157,6 +27431,9 @@ class ThriftHiveMetastore_append_partition_args { if (isset($vals['part_vals'])) { $this->part_vals = $vals['part_vals']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -27210,6 +27487,13 @@ class ThriftHiveMetastore_append_partition_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -27250,6 +27534,11 @@ class ThriftHiveMetastore_append_partition_args { } $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -27666,6 +27955,10 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { * @var \metastore\EnvironmentContext */ public $environment_context = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -27691,6 +27984,10 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { 'type' => TType::STRUCT, 'class' => '\metastore\EnvironmentContext', ), + 5 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -27706,6 +28003,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { if (isset($vals['environment_context'])) { $this->environment_context = $vals['environment_context']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -27767,6 +28067,13 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -27815,6 +28122,11 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { $xfer += $this->environment_context->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 5); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -27992,6 +28304,10 @@ class ThriftHiveMetastore_append_partition_by_name_args { * @var string */ public $part_name = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -28008,6 +28324,10 @@ class ThriftHiveMetastore_append_partition_by_name_args { 'var' => 'part_name', 'type' => TType::STRING, ), + 4 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -28020,6 +28340,9 @@ class ThriftHiveMetastore_append_partition_by_name_args { if (isset($vals['part_name'])) { $this->part_name = $vals['part_name']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -28063,6 +28386,13 @@ class ThriftHiveMetastore_append_partition_by_name_args { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -28091,6 +28421,11 @@ class ThriftHiveMetastore_append_partition_by_name_args { $xfer += $output->writeString($this->part_name); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -28272,6 +28607,10 @@ class ThriftHiveMetastore_append_partition_by_name_with_environment_context_args * @var \metastore\EnvironmentContext */ public $environment_context = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -28293,6 +28632,10 @@ class ThriftHiveMetastore_append_partition_by_name_with_environment_context_args 'type' => TType::STRUCT, 'class' => '\metastore\EnvironmentContext', ), + 5 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -28308,6 +28651,9 @@ class ThriftHiveMetastore_append_partition_by_name_with_environment_context_args if (isset($vals['environment_context'])) { $this->environment_context = $vals['environment_context']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -28359,6 +28705,13 @@ class ThriftHiveMetastore_append_partition_by_name_with_environment_context_args $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -28395,6 +28748,11 @@ class ThriftHiveMetastore_append_partition_by_name_with_environment_context_args $xfer += $this->environment_context->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 5); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -30251,6 +30609,10 @@ class ThriftHiveMetastore_exchange_partition_args { * @var string */ public $dest_table_name = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -30283,6 +30645,10 @@ class ThriftHiveMetastore_exchange_partition_args { 'var' => 'dest_table_name', 'type' => TType::STRING, ), + 6 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -30301,6 +30667,9 @@ class ThriftHiveMetastore_exchange_partition_args { if (isset($vals['dest_table_name'])) { $this->dest_table_name = $vals['dest_table_name']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -30371,6 +30740,13 @@ class ThriftHiveMetastore_exchange_partition_args { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -30422,6 +30798,11 @@ class ThriftHiveMetastore_exchange_partition_args { $xfer += $output->writeString($this->dest_table_name); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 6); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -30632,6 +31013,10 @@ class ThriftHiveMetastore_exchange_partitions_args { * @var string */ public $dest_table_name = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -30664,6 +31049,10 @@ class ThriftHiveMetastore_exchange_partitions_args { 'var' => 'dest_table_name', 'type' => TType::STRING, ), + 6 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -30682,6 +31071,9 @@ class ThriftHiveMetastore_exchange_partitions_args { if (isset($vals['dest_table_name'])) { $this->dest_table_name = $vals['dest_table_name']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -30752,6 +31144,13 @@ class ThriftHiveMetastore_exchange_partitions_args { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -30803,6 +31202,11 @@ class ThriftHiveMetastore_exchange_partitions_args { $xfer += $output->writeString($this->dest_table_name); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 6); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -40181,6 +40585,10 @@ class ThriftHiveMetastore_update_table_column_statistics_args { * @var \metastore\ColumnStatistics */ public $stats_obj = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -40190,12 +40598,19 @@ class ThriftHiveMetastore_update_table_column_statistics_args { 'type' => TType::STRUCT, 'class' => '\metastore\ColumnStatistics', ), + 2 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { if (isset($vals['stats_obj'])) { $this->stats_obj = $vals['stats_obj']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -40226,6 +40641,13 @@ class ThriftHiveMetastore_update_table_column_statistics_args { $xfer += $input->skip($ftype); } break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -40247,6 +40669,11 @@ class ThriftHiveMetastore_update_table_column_statistics_args { $xfer += $this->stats_obj->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 2); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -40436,6 +40863,10 @@ class ThriftHiveMetastore_update_partition_column_statistics_args { * @var \metastore\ColumnStatistics */ public $stats_obj = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -40445,12 +40876,19 @@ class ThriftHiveMetastore_update_partition_column_statistics_args { 'type' => TType::STRUCT, 'class' => '\metastore\ColumnStatistics', ), + 2 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { if (isset($vals['stats_obj'])) { $this->stats_obj = $vals['stats_obj']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -40481,6 +40919,13 @@ class ThriftHiveMetastore_update_partition_column_statistics_args { $xfer += $input->skip($ftype); } break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -40502,6 +40947,11 @@ class ThriftHiveMetastore_update_partition_column_statistics_args { $xfer += $this->stats_obj->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 2); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php index ec1ed6c..30482cb 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php @@ -18475,6 +18475,10 @@ class CommitTxnRequest { * @var \metastore\ReplLastIdInfo */ public $replLastIdInfo = null; + /** + * @var string + */ + public $txnWriteIds = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -18506,6 +18510,10 @@ class CommitTxnRequest { 'type' => TType::STRUCT, 'class' => '\metastore\ReplLastIdInfo', ), + 6 => array( + 'var' => 'txnWriteIds', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -18524,6 +18532,9 @@ class CommitTxnRequest { if (isset($vals['replLastIdInfo'])) { $this->replLastIdInfo = $vals['replLastIdInfo']; } + if (isset($vals['txnWriteIds'])) { + $this->txnWriteIds = $vals['txnWriteIds']; + } } } @@ -18594,6 +18605,13 @@ class CommitTxnRequest { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->txnWriteIds); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -18650,6 +18668,11 @@ class CommitTxnRequest { $xfer += $this->replLastIdInfo->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->txnWriteIds !== null) { + $xfer += $output->writeFieldBegin('txnWriteIds', TType::STRING, 6); + $xfer += $output->writeString($this->txnWriteIds); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index fb2747c..a00b401 100755 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -45,9 +45,9 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' get_fields_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)') print(' get_schema(string db_name, string table_name)') print(' get_schema_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)') - print(' void create_table(Table tbl)') - print(' void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context)') - print(' void create_table_with_constraints(Table tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints)') + print(' void create_table(Table tbl, string validWriteIdList)') + print(' void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, string validWriteIdList)') + print(' void create_table_with_constraints(Table tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, string validWriteIdList)') print(' void drop_constraint(DropConstraintRequest req)') print(' void add_primary_key(AddPrimaryKeyRequest req)') print(' void add_foreign_key(AddForeignKeyRequest req)') @@ -72,27 +72,27 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' Materialization get_materialization_invalidation_info(CreationMetadata creation_metadata, string validTxnList)') print(' void update_creation_metadata(string catName, string dbname, string tbl_name, CreationMetadata creation_metadata)') print(' get_table_names_by_filter(string dbname, string filter, i16 max_tables)') - print(' void alter_table(string dbname, string tbl_name, Table new_tbl)') - print(' void alter_table_with_environment_context(string dbname, string tbl_name, Table new_tbl, EnvironmentContext environment_context)') - print(' void alter_table_with_cascade(string dbname, string tbl_name, Table new_tbl, bool cascade)') + print(' void alter_table(string dbname, string tbl_name, Table new_tbl, string validWriteIdList)') + print(' void alter_table_with_environment_context(string dbname, string tbl_name, Table new_tbl, EnvironmentContext environment_context, string validWriteIdList)') + print(' void alter_table_with_cascade(string dbname, string tbl_name, Table new_tbl, bool cascade, string validWriteIdList)') print(' AlterTableResponse alter_table_req(AlterTableRequest req)') - print(' Partition add_partition(Partition new_part)') - print(' Partition add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context)') - print(' i32 add_partitions( new_parts)') - print(' i32 add_partitions_pspec( new_parts)') - print(' Partition append_partition(string db_name, string tbl_name, part_vals)') + print(' Partition add_partition(Partition new_part, string validWriteIdList)') + print(' Partition add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context, string validWriteIdList)') + print(' i32 add_partitions( new_parts, string validWriteIdList)') + print(' i32 add_partitions_pspec( new_parts, string validWriteIdList)') + print(' Partition append_partition(string db_name, string tbl_name, part_vals, string validWriteIdList)') print(' AddPartitionsResult add_partitions_req(AddPartitionsRequest request)') - print(' Partition append_partition_with_environment_context(string db_name, string tbl_name, part_vals, EnvironmentContext environment_context)') - print(' Partition append_partition_by_name(string db_name, string tbl_name, string part_name)') - print(' Partition append_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, EnvironmentContext environment_context)') + print(' Partition append_partition_with_environment_context(string db_name, string tbl_name, part_vals, EnvironmentContext environment_context, string validWriteIdList)') + print(' Partition append_partition_by_name(string db_name, string tbl_name, string part_name, string validWriteIdList)') + print(' Partition append_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, EnvironmentContext environment_context, string validWriteIdList)') print(' bool drop_partition(string db_name, string tbl_name, part_vals, bool deleteData)') print(' bool drop_partition_with_environment_context(string db_name, string tbl_name, part_vals, bool deleteData, EnvironmentContext environment_context)') print(' bool drop_partition_by_name(string db_name, string tbl_name, string part_name, bool deleteData)') print(' bool drop_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, bool deleteData, EnvironmentContext environment_context)') print(' DropPartitionsResult drop_partitions_req(DropPartitionsRequest req)') print(' Partition get_partition(string db_name, string tbl_name, part_vals)') - print(' Partition exchange_partition( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)') - print(' exchange_partitions( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)') + print(' Partition exchange_partition( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name, string validWriteIdList)') + print(' exchange_partitions( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name, string validWriteIdList)') print(' Partition get_partition_with_auth(string db_name, string tbl_name, part_vals, string user_name, group_names)') print(' Partition get_partition_by_name(string db_name, string tbl_name, string part_name)') print(' get_partitions(string db_name, string tbl_name, i16 max_parts)') @@ -128,8 +128,8 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' NotNullConstraintsResponse get_not_null_constraints(NotNullConstraintsRequest request)') print(' DefaultConstraintsResponse get_default_constraints(DefaultConstraintsRequest request)') print(' CheckConstraintsResponse get_check_constraints(CheckConstraintsRequest request)') - print(' bool update_table_column_statistics(ColumnStatistics stats_obj)') - print(' bool update_partition_column_statistics(ColumnStatistics stats_obj)') + print(' bool update_table_column_statistics(ColumnStatistics stats_obj, string validWriteIdList)') + print(' bool update_partition_column_statistics(ColumnStatistics stats_obj, string validWriteIdList)') print(' SetPartitionsStatsResponse update_table_column_statistics_req(SetPartitionsStatsRequest req)') print(' SetPartitionsStatsResponse update_partition_column_statistics_req(SetPartitionsStatsRequest req)') print(' ColumnStatistics get_table_column_statistics(string db_name, string tbl_name, string col_name)') @@ -444,22 +444,22 @@ elif cmd == 'get_schema_with_environment_context': pp.pprint(client.get_schema_with_environment_context(args[0],args[1],eval(args[2]),)) elif cmd == 'create_table': - if len(args) != 1: - print('create_table requires 1 args') + if len(args) != 2: + print('create_table requires 2 args') sys.exit(1) - pp.pprint(client.create_table(eval(args[0]),)) + pp.pprint(client.create_table(eval(args[0]),args[1],)) elif cmd == 'create_table_with_environment_context': - if len(args) != 2: - print('create_table_with_environment_context requires 2 args') + if len(args) != 3: + print('create_table_with_environment_context requires 3 args') sys.exit(1) - pp.pprint(client.create_table_with_environment_context(eval(args[0]),eval(args[1]),)) + pp.pprint(client.create_table_with_environment_context(eval(args[0]),eval(args[1]),args[2],)) elif cmd == 'create_table_with_constraints': - if len(args) != 7: - print('create_table_with_constraints requires 7 args') + if len(args) != 8: + print('create_table_with_constraints requires 8 args') sys.exit(1) - pp.pprint(client.create_table_with_constraints(eval(args[0]),eval(args[1]),eval(args[2]),eval(args[3]),eval(args[4]),eval(args[5]),eval(args[6]),)) + pp.pprint(client.create_table_with_constraints(eval(args[0]),eval(args[1]),eval(args[2]),eval(args[3]),eval(args[4]),eval(args[5]),eval(args[6]),args[7],)) elif cmd == 'drop_constraint': if len(args) != 1: @@ -606,22 +606,22 @@ elif cmd == 'get_table_names_by_filter': pp.pprint(client.get_table_names_by_filter(args[0],args[1],eval(args[2]),)) elif cmd == 'alter_table': - if len(args) != 3: - print('alter_table requires 3 args') + if len(args) != 4: + print('alter_table requires 4 args') sys.exit(1) - pp.pprint(client.alter_table(args[0],args[1],eval(args[2]),)) + pp.pprint(client.alter_table(args[0],args[1],eval(args[2]),args[3],)) elif cmd == 'alter_table_with_environment_context': - if len(args) != 4: - print('alter_table_with_environment_context requires 4 args') + if len(args) != 5: + print('alter_table_with_environment_context requires 5 args') sys.exit(1) - pp.pprint(client.alter_table_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),)) + pp.pprint(client.alter_table_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),args[4],)) elif cmd == 'alter_table_with_cascade': - if len(args) != 4: - print('alter_table_with_cascade requires 4 args') + if len(args) != 5: + print('alter_table_with_cascade requires 5 args') sys.exit(1) - pp.pprint(client.alter_table_with_cascade(args[0],args[1],eval(args[2]),eval(args[3]),)) + pp.pprint(client.alter_table_with_cascade(args[0],args[1],eval(args[2]),eval(args[3]),args[4],)) elif cmd == 'alter_table_req': if len(args) != 1: @@ -630,34 +630,34 @@ elif cmd == 'alter_table_req': pp.pprint(client.alter_table_req(eval(args[0]),)) elif cmd == 'add_partition': - if len(args) != 1: - print('add_partition requires 1 args') + if len(args) != 2: + print('add_partition requires 2 args') sys.exit(1) - pp.pprint(client.add_partition(eval(args[0]),)) + pp.pprint(client.add_partition(eval(args[0]),args[1],)) elif cmd == 'add_partition_with_environment_context': - if len(args) != 2: - print('add_partition_with_environment_context requires 2 args') + if len(args) != 3: + print('add_partition_with_environment_context requires 3 args') sys.exit(1) - pp.pprint(client.add_partition_with_environment_context(eval(args[0]),eval(args[1]),)) + pp.pprint(client.add_partition_with_environment_context(eval(args[0]),eval(args[1]),args[2],)) elif cmd == 'add_partitions': - if len(args) != 1: - print('add_partitions requires 1 args') + if len(args) != 2: + print('add_partitions requires 2 args') sys.exit(1) - pp.pprint(client.add_partitions(eval(args[0]),)) + pp.pprint(client.add_partitions(eval(args[0]),args[1],)) elif cmd == 'add_partitions_pspec': - if len(args) != 1: - print('add_partitions_pspec requires 1 args') + if len(args) != 2: + print('add_partitions_pspec requires 2 args') sys.exit(1) - pp.pprint(client.add_partitions_pspec(eval(args[0]),)) + pp.pprint(client.add_partitions_pspec(eval(args[0]),args[1],)) elif cmd == 'append_partition': - if len(args) != 3: - print('append_partition requires 3 args') + if len(args) != 4: + print('append_partition requires 4 args') sys.exit(1) - pp.pprint(client.append_partition(args[0],args[1],eval(args[2]),)) + pp.pprint(client.append_partition(args[0],args[1],eval(args[2]),args[3],)) elif cmd == 'add_partitions_req': if len(args) != 1: @@ -666,22 +666,22 @@ elif cmd == 'add_partitions_req': pp.pprint(client.add_partitions_req(eval(args[0]),)) elif cmd == 'append_partition_with_environment_context': - if len(args) != 4: - print('append_partition_with_environment_context requires 4 args') + if len(args) != 5: + print('append_partition_with_environment_context requires 5 args') sys.exit(1) - pp.pprint(client.append_partition_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),)) + pp.pprint(client.append_partition_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),args[4],)) elif cmd == 'append_partition_by_name': - if len(args) != 3: - print('append_partition_by_name requires 3 args') + if len(args) != 4: + print('append_partition_by_name requires 4 args') sys.exit(1) - pp.pprint(client.append_partition_by_name(args[0],args[1],args[2],)) + pp.pprint(client.append_partition_by_name(args[0],args[1],args[2],args[3],)) elif cmd == 'append_partition_by_name_with_environment_context': - if len(args) != 4: - print('append_partition_by_name_with_environment_context requires 4 args') + if len(args) != 5: + print('append_partition_by_name_with_environment_context requires 5 args') sys.exit(1) - pp.pprint(client.append_partition_by_name_with_environment_context(args[0],args[1],args[2],eval(args[3]),)) + pp.pprint(client.append_partition_by_name_with_environment_context(args[0],args[1],args[2],eval(args[3]),args[4],)) elif cmd == 'drop_partition': if len(args) != 4: @@ -720,16 +720,16 @@ elif cmd == 'get_partition': pp.pprint(client.get_partition(args[0],args[1],eval(args[2]),)) elif cmd == 'exchange_partition': - if len(args) != 5: - print('exchange_partition requires 5 args') + if len(args) != 6: + print('exchange_partition requires 6 args') sys.exit(1) - pp.pprint(client.exchange_partition(eval(args[0]),args[1],args[2],args[3],args[4],)) + pp.pprint(client.exchange_partition(eval(args[0]),args[1],args[2],args[3],args[4],args[5],)) elif cmd == 'exchange_partitions': - if len(args) != 5: - print('exchange_partitions requires 5 args') + if len(args) != 6: + print('exchange_partitions requires 6 args') sys.exit(1) - pp.pprint(client.exchange_partitions(eval(args[0]),args[1],args[2],args[3],args[4],)) + pp.pprint(client.exchange_partitions(eval(args[0]),args[1],args[2],args[3],args[4],args[5],)) elif cmd == 'get_partition_with_auth': if len(args) != 5: @@ -942,16 +942,16 @@ elif cmd == 'get_check_constraints': pp.pprint(client.get_check_constraints(eval(args[0]),)) elif cmd == 'update_table_column_statistics': - if len(args) != 1: - print('update_table_column_statistics requires 1 args') + if len(args) != 2: + print('update_table_column_statistics requires 2 args') sys.exit(1) - pp.pprint(client.update_table_column_statistics(eval(args[0]),)) + pp.pprint(client.update_table_column_statistics(eval(args[0]),args[1],)) elif cmd == 'update_partition_column_statistics': - if len(args) != 1: - print('update_partition_column_statistics requires 1 args') + if len(args) != 2: + print('update_partition_column_statistics requires 2 args') sys.exit(1) - pp.pprint(client.update_partition_column_statistics(eval(args[0]),)) + pp.pprint(client.update_partition_column_statistics(eval(args[0]),args[1],)) elif cmd == 'update_table_column_statistics_req': if len(args) != 1: diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 2be349f..37a8c9b 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -172,22 +172,24 @@ def get_schema_with_environment_context(self, db_name, table_name, environment_c """ pass - def create_table(self, tbl): + def create_table(self, tbl, validWriteIdList): """ Parameters: - tbl + - validWriteIdList """ pass - def create_table_with_environment_context(self, tbl, environment_context): + def create_table_with_environment_context(self, tbl, environment_context, validWriteIdList): """ Parameters: - tbl - environment_context + - validWriteIdList """ pass - def create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints): + def create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, validWriteIdList): """ Parameters: - tbl @@ -197,6 +199,7 @@ def create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueCon - notNullConstraints - defaultConstraints - checkConstraints + - validWriteIdList """ pass @@ -384,32 +387,35 @@ def get_table_names_by_filter(self, dbname, filter, max_tables): """ pass - def alter_table(self, dbname, tbl_name, new_tbl): + def alter_table(self, dbname, tbl_name, new_tbl, validWriteIdList): """ Parameters: - dbname - tbl_name - new_tbl + - validWriteIdList """ pass - def alter_table_with_environment_context(self, dbname, tbl_name, new_tbl, environment_context): + def alter_table_with_environment_context(self, dbname, tbl_name, new_tbl, environment_context, validWriteIdList): """ Parameters: - dbname - tbl_name - new_tbl - environment_context + - validWriteIdList """ pass - def alter_table_with_cascade(self, dbname, tbl_name, new_tbl, cascade): + def alter_table_with_cascade(self, dbname, tbl_name, new_tbl, cascade, validWriteIdList): """ Parameters: - dbname - tbl_name - new_tbl - cascade + - validWriteIdList """ pass @@ -420,41 +426,46 @@ def alter_table_req(self, req): """ pass - def add_partition(self, new_part): + def add_partition(self, new_part, validWriteIdList): """ Parameters: - new_part + - validWriteIdList """ pass - def add_partition_with_environment_context(self, new_part, environment_context): + def add_partition_with_environment_context(self, new_part, environment_context, validWriteIdList): """ Parameters: - new_part - environment_context + - validWriteIdList """ pass - def add_partitions(self, new_parts): + def add_partitions(self, new_parts, validWriteIdList): """ Parameters: - new_parts + - validWriteIdList """ pass - def add_partitions_pspec(self, new_parts): + def add_partitions_pspec(self, new_parts, validWriteIdList): """ Parameters: - new_parts + - validWriteIdList """ pass - def append_partition(self, db_name, tbl_name, part_vals): + def append_partition(self, db_name, tbl_name, part_vals, validWriteIdList): """ Parameters: - db_name - tbl_name - part_vals + - validWriteIdList """ pass @@ -465,32 +476,35 @@ def add_partitions_req(self, request): """ pass - def append_partition_with_environment_context(self, db_name, tbl_name, part_vals, environment_context): + def append_partition_with_environment_context(self, db_name, tbl_name, part_vals, environment_context, validWriteIdList): """ Parameters: - db_name - tbl_name - part_vals - environment_context + - validWriteIdList """ pass - def append_partition_by_name(self, db_name, tbl_name, part_name): + def append_partition_by_name(self, db_name, tbl_name, part_name, validWriteIdList): """ Parameters: - db_name - tbl_name - part_name + - validWriteIdList """ pass - def append_partition_by_name_with_environment_context(self, db_name, tbl_name, part_name, environment_context): + def append_partition_by_name_with_environment_context(self, db_name, tbl_name, part_name, environment_context, validWriteIdList): """ Parameters: - db_name - tbl_name - part_name - environment_context + - validWriteIdList """ pass @@ -552,7 +566,7 @@ def get_partition(self, db_name, tbl_name, part_vals): """ pass - def exchange_partition(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name): + def exchange_partition(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, validWriteIdList): """ Parameters: - partitionSpecs @@ -560,10 +574,11 @@ def exchange_partition(self, partitionSpecs, source_db, source_table_name, dest_ - source_table_name - dest_db - dest_table_name + - validWriteIdList """ pass - def exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name): + def exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, validWriteIdList): """ Parameters: - partitionSpecs @@ -571,6 +586,7 @@ def exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest - source_table_name - dest_db - dest_table_name + - validWriteIdList """ pass @@ -877,17 +893,19 @@ def get_check_constraints(self, request): """ pass - def update_table_column_statistics(self, stats_obj): + def update_table_column_statistics(self, stats_obj, validWriteIdList): """ Parameters: - stats_obj + - validWriteIdList """ pass - def update_partition_column_statistics(self, stats_obj): + def update_partition_column_statistics(self, stats_obj, validWriteIdList): """ Parameters: - stats_obj + - validWriteIdList """ pass @@ -2467,18 +2485,20 @@ def recv_get_schema_with_environment_context(self): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_schema_with_environment_context failed: unknown result") - def create_table(self, tbl): + def create_table(self, tbl, validWriteIdList): """ Parameters: - tbl + - validWriteIdList """ - self.send_create_table(tbl) + self.send_create_table(tbl, validWriteIdList) self.recv_create_table() - def send_create_table(self, tbl): + def send_create_table(self, tbl, validWriteIdList): self._oprot.writeMessageBegin('create_table', TMessageType.CALL, self._seqid) args = create_table_args() args.tbl = tbl + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -2504,20 +2524,22 @@ def recv_create_table(self): raise result.o4 return - def create_table_with_environment_context(self, tbl, environment_context): + def create_table_with_environment_context(self, tbl, environment_context, validWriteIdList): """ Parameters: - tbl - environment_context + - validWriteIdList """ - self.send_create_table_with_environment_context(tbl, environment_context) + self.send_create_table_with_environment_context(tbl, environment_context, validWriteIdList) self.recv_create_table_with_environment_context() - def send_create_table_with_environment_context(self, tbl, environment_context): + def send_create_table_with_environment_context(self, tbl, environment_context, validWriteIdList): self._oprot.writeMessageBegin('create_table_with_environment_context', TMessageType.CALL, self._seqid) args = create_table_with_environment_context_args() args.tbl = tbl args.environment_context = environment_context + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -2543,7 +2565,7 @@ def recv_create_table_with_environment_context(self): raise result.o4 return - def create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints): + def create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, validWriteIdList): """ Parameters: - tbl @@ -2553,11 +2575,12 @@ def create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueCon - notNullConstraints - defaultConstraints - checkConstraints + - validWriteIdList """ - self.send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints) + self.send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, validWriteIdList) self.recv_create_table_with_constraints() - def send_create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints): + def send_create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, validWriteIdList): self._oprot.writeMessageBegin('create_table_with_constraints', TMessageType.CALL, self._seqid) args = create_table_with_constraints_args() args.tbl = tbl @@ -2567,6 +2590,7 @@ def send_create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniq args.notNullConstraints = notNullConstraints args.defaultConstraints = defaultConstraints args.checkConstraints = checkConstraints + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -3433,22 +3457,24 @@ def recv_get_table_names_by_filter(self): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_names_by_filter failed: unknown result") - def alter_table(self, dbname, tbl_name, new_tbl): + def alter_table(self, dbname, tbl_name, new_tbl, validWriteIdList): """ Parameters: - dbname - tbl_name - new_tbl + - validWriteIdList """ - self.send_alter_table(dbname, tbl_name, new_tbl) + self.send_alter_table(dbname, tbl_name, new_tbl, validWriteIdList) self.recv_alter_table() - def send_alter_table(self, dbname, tbl_name, new_tbl): + def send_alter_table(self, dbname, tbl_name, new_tbl, validWriteIdList): self._oprot.writeMessageBegin('alter_table', TMessageType.CALL, self._seqid) args = alter_table_args() args.dbname = dbname args.tbl_name = tbl_name args.new_tbl = new_tbl + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -3470,24 +3496,26 @@ def recv_alter_table(self): raise result.o2 return - def alter_table_with_environment_context(self, dbname, tbl_name, new_tbl, environment_context): + def alter_table_with_environment_context(self, dbname, tbl_name, new_tbl, environment_context, validWriteIdList): """ Parameters: - dbname - tbl_name - new_tbl - environment_context + - validWriteIdList """ - self.send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context) + self.send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context, validWriteIdList) self.recv_alter_table_with_environment_context() - def send_alter_table_with_environment_context(self, dbname, tbl_name, new_tbl, environment_context): + def send_alter_table_with_environment_context(self, dbname, tbl_name, new_tbl, environment_context, validWriteIdList): self._oprot.writeMessageBegin('alter_table_with_environment_context', TMessageType.CALL, self._seqid) args = alter_table_with_environment_context_args() args.dbname = dbname args.tbl_name = tbl_name args.new_tbl = new_tbl args.environment_context = environment_context + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -3509,24 +3537,26 @@ def recv_alter_table_with_environment_context(self): raise result.o2 return - def alter_table_with_cascade(self, dbname, tbl_name, new_tbl, cascade): + def alter_table_with_cascade(self, dbname, tbl_name, new_tbl, cascade, validWriteIdList): """ Parameters: - dbname - tbl_name - new_tbl - cascade + - validWriteIdList """ - self.send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade) + self.send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade, validWriteIdList) self.recv_alter_table_with_cascade() - def send_alter_table_with_cascade(self, dbname, tbl_name, new_tbl, cascade): + def send_alter_table_with_cascade(self, dbname, tbl_name, new_tbl, cascade, validWriteIdList): self._oprot.writeMessageBegin('alter_table_with_cascade', TMessageType.CALL, self._seqid) args = alter_table_with_cascade_args() args.dbname = dbname args.tbl_name = tbl_name args.new_tbl = new_tbl args.cascade = cascade + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -3583,18 +3613,20 @@ def recv_alter_table_req(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "alter_table_req failed: unknown result") - def add_partition(self, new_part): + def add_partition(self, new_part, validWriteIdList): """ Parameters: - new_part + - validWriteIdList """ - self.send_add_partition(new_part) + self.send_add_partition(new_part, validWriteIdList) return self.recv_add_partition() - def send_add_partition(self, new_part): + def send_add_partition(self, new_part, validWriteIdList): self._oprot.writeMessageBegin('add_partition', TMessageType.CALL, self._seqid) args = add_partition_args() args.new_part = new_part + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -3620,20 +3652,22 @@ def recv_add_partition(self): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "add_partition failed: unknown result") - def add_partition_with_environment_context(self, new_part, environment_context): + def add_partition_with_environment_context(self, new_part, environment_context, validWriteIdList): """ Parameters: - new_part - environment_context + - validWriteIdList """ - self.send_add_partition_with_environment_context(new_part, environment_context) + self.send_add_partition_with_environment_context(new_part, environment_context, validWriteIdList) return self.recv_add_partition_with_environment_context() - def send_add_partition_with_environment_context(self, new_part, environment_context): + def send_add_partition_with_environment_context(self, new_part, environment_context, validWriteIdList): self._oprot.writeMessageBegin('add_partition_with_environment_context', TMessageType.CALL, self._seqid) args = add_partition_with_environment_context_args() args.new_part = new_part args.environment_context = environment_context + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -3659,18 +3693,20 @@ def recv_add_partition_with_environment_context(self): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "add_partition_with_environment_context failed: unknown result") - def add_partitions(self, new_parts): + def add_partitions(self, new_parts, validWriteIdList): """ Parameters: - new_parts + - validWriteIdList """ - self.send_add_partitions(new_parts) + self.send_add_partitions(new_parts, validWriteIdList) return self.recv_add_partitions() - def send_add_partitions(self, new_parts): + def send_add_partitions(self, new_parts, validWriteIdList): self._oprot.writeMessageBegin('add_partitions', TMessageType.CALL, self._seqid) args = add_partitions_args() args.new_parts = new_parts + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -3696,18 +3732,20 @@ def recv_add_partitions(self): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "add_partitions failed: unknown result") - def add_partitions_pspec(self, new_parts): + def add_partitions_pspec(self, new_parts, validWriteIdList): """ Parameters: - new_parts + - validWriteIdList """ - self.send_add_partitions_pspec(new_parts) + self.send_add_partitions_pspec(new_parts, validWriteIdList) return self.recv_add_partitions_pspec() - def send_add_partitions_pspec(self, new_parts): + def send_add_partitions_pspec(self, new_parts, validWriteIdList): self._oprot.writeMessageBegin('add_partitions_pspec', TMessageType.CALL, self._seqid) args = add_partitions_pspec_args() args.new_parts = new_parts + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -3733,22 +3771,24 @@ def recv_add_partitions_pspec(self): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "add_partitions_pspec failed: unknown result") - def append_partition(self, db_name, tbl_name, part_vals): + def append_partition(self, db_name, tbl_name, part_vals, validWriteIdList): """ Parameters: - db_name - tbl_name - part_vals + - validWriteIdList """ - self.send_append_partition(db_name, tbl_name, part_vals) + self.send_append_partition(db_name, tbl_name, part_vals, validWriteIdList) return self.recv_append_partition() - def send_append_partition(self, db_name, tbl_name, part_vals): + def send_append_partition(self, db_name, tbl_name, part_vals, validWriteIdList): self._oprot.writeMessageBegin('append_partition', TMessageType.CALL, self._seqid) args = append_partition_args() args.db_name = db_name args.tbl_name = tbl_name args.part_vals = part_vals + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -3811,24 +3851,26 @@ def recv_add_partitions_req(self): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "add_partitions_req failed: unknown result") - def append_partition_with_environment_context(self, db_name, tbl_name, part_vals, environment_context): + def append_partition_with_environment_context(self, db_name, tbl_name, part_vals, environment_context, validWriteIdList): """ Parameters: - db_name - tbl_name - part_vals - environment_context + - validWriteIdList """ - self.send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context) + self.send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context, validWriteIdList) return self.recv_append_partition_with_environment_context() - def send_append_partition_with_environment_context(self, db_name, tbl_name, part_vals, environment_context): + def send_append_partition_with_environment_context(self, db_name, tbl_name, part_vals, environment_context, validWriteIdList): self._oprot.writeMessageBegin('append_partition_with_environment_context', TMessageType.CALL, self._seqid) args = append_partition_with_environment_context_args() args.db_name = db_name args.tbl_name = tbl_name args.part_vals = part_vals args.environment_context = environment_context + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -3854,22 +3896,24 @@ def recv_append_partition_with_environment_context(self): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "append_partition_with_environment_context failed: unknown result") - def append_partition_by_name(self, db_name, tbl_name, part_name): + def append_partition_by_name(self, db_name, tbl_name, part_name, validWriteIdList): """ Parameters: - db_name - tbl_name - part_name + - validWriteIdList """ - self.send_append_partition_by_name(db_name, tbl_name, part_name) + self.send_append_partition_by_name(db_name, tbl_name, part_name, validWriteIdList) return self.recv_append_partition_by_name() - def send_append_partition_by_name(self, db_name, tbl_name, part_name): + def send_append_partition_by_name(self, db_name, tbl_name, part_name, validWriteIdList): self._oprot.writeMessageBegin('append_partition_by_name', TMessageType.CALL, self._seqid) args = append_partition_by_name_args() args.db_name = db_name args.tbl_name = tbl_name args.part_name = part_name + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -3895,24 +3939,26 @@ def recv_append_partition_by_name(self): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "append_partition_by_name failed: unknown result") - def append_partition_by_name_with_environment_context(self, db_name, tbl_name, part_name, environment_context): + def append_partition_by_name_with_environment_context(self, db_name, tbl_name, part_name, environment_context, validWriteIdList): """ Parameters: - db_name - tbl_name - part_name - environment_context + - validWriteIdList """ - self.send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context) + self.send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context, validWriteIdList) return self.recv_append_partition_by_name_with_environment_context() - def send_append_partition_by_name_with_environment_context(self, db_name, tbl_name, part_name, environment_context): + def send_append_partition_by_name_with_environment_context(self, db_name, tbl_name, part_name, environment_context, validWriteIdList): self._oprot.writeMessageBegin('append_partition_by_name_with_environment_context', TMessageType.CALL, self._seqid) args = append_partition_by_name_with_environment_context_args() args.db_name = db_name args.tbl_name = tbl_name args.part_name = part_name args.environment_context = environment_context + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4180,7 +4226,7 @@ def recv_get_partition(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition failed: unknown result") - def exchange_partition(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name): + def exchange_partition(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, validWriteIdList): """ Parameters: - partitionSpecs @@ -4188,11 +4234,12 @@ def exchange_partition(self, partitionSpecs, source_db, source_table_name, dest_ - source_table_name - dest_db - dest_table_name + - validWriteIdList """ - self.send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name) + self.send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, validWriteIdList) return self.recv_exchange_partition() - def send_exchange_partition(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name): + def send_exchange_partition(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, validWriteIdList): self._oprot.writeMessageBegin('exchange_partition', TMessageType.CALL, self._seqid) args = exchange_partition_args() args.partitionSpecs = partitionSpecs @@ -4200,6 +4247,7 @@ def send_exchange_partition(self, partitionSpecs, source_db, source_table_name, args.source_table_name = source_table_name args.dest_db = dest_db args.dest_table_name = dest_table_name + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -4227,7 +4275,7 @@ def recv_exchange_partition(self): raise result.o4 raise TApplicationException(TApplicationException.MISSING_RESULT, "exchange_partition failed: unknown result") - def exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name): + def exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, validWriteIdList): """ Parameters: - partitionSpecs @@ -4235,11 +4283,12 @@ def exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest - source_table_name - dest_db - dest_table_name + - validWriteIdList """ - self.send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name) + self.send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, validWriteIdList) return self.recv_exchange_partitions() - def send_exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name): + def send_exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, validWriteIdList): self._oprot.writeMessageBegin('exchange_partitions', TMessageType.CALL, self._seqid) args = exchange_partitions_args() args.partitionSpecs = partitionSpecs @@ -4247,6 +4296,7 @@ def send_exchange_partitions(self, partitionSpecs, source_db, source_table_name, args.source_table_name = source_table_name args.dest_db = dest_db args.dest_table_name = dest_table_name + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -5611,18 +5661,20 @@ def recv_get_check_constraints(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_check_constraints failed: unknown result") - def update_table_column_statistics(self, stats_obj): + def update_table_column_statistics(self, stats_obj, validWriteIdList): """ Parameters: - stats_obj + - validWriteIdList """ - self.send_update_table_column_statistics(stats_obj) + self.send_update_table_column_statistics(stats_obj, validWriteIdList) return self.recv_update_table_column_statistics() - def send_update_table_column_statistics(self, stats_obj): + def send_update_table_column_statistics(self, stats_obj, validWriteIdList): self._oprot.writeMessageBegin('update_table_column_statistics', TMessageType.CALL, self._seqid) args = update_table_column_statistics_args() args.stats_obj = stats_obj + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -5650,18 +5702,20 @@ def recv_update_table_column_statistics(self): raise result.o4 raise TApplicationException(TApplicationException.MISSING_RESULT, "update_table_column_statistics failed: unknown result") - def update_partition_column_statistics(self, stats_obj): + def update_partition_column_statistics(self, stats_obj, validWriteIdList): """ Parameters: - stats_obj + - validWriteIdList """ - self.send_update_partition_column_statistics(stats_obj) + self.send_update_partition_column_statistics(stats_obj, validWriteIdList) return self.recv_update_partition_column_statistics() - def send_update_partition_column_statistics(self, stats_obj): + def send_update_partition_column_statistics(self, stats_obj, validWriteIdList): self._oprot.writeMessageBegin('update_partition_column_statistics', TMessageType.CALL, self._seqid) args = update_partition_column_statistics_args() args.stats_obj = stats_obj + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -10467,7 +10521,7 @@ def process_create_table(self, seqid, iprot, oprot): iprot.readMessageEnd() result = create_table_result() try: - self._handler.create_table(args.tbl) + self._handler.create_table(args.tbl, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -10498,7 +10552,7 @@ def process_create_table_with_environment_context(self, seqid, iprot, oprot): iprot.readMessageEnd() result = create_table_with_environment_context_result() try: - self._handler.create_table_with_environment_context(args.tbl, args.environment_context) + self._handler.create_table_with_environment_context(args.tbl, args.environment_context, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -10529,7 +10583,7 @@ def process_create_table_with_constraints(self, seqid, iprot, oprot): iprot.readMessageEnd() result = create_table_with_constraints_result() try: - self._handler.create_table_with_constraints(args.tbl, args.primaryKeys, args.foreignKeys, args.uniqueConstraints, args.notNullConstraints, args.defaultConstraints, args.checkConstraints) + self._handler.create_table_with_constraints(args.tbl, args.primaryKeys, args.foreignKeys, args.uniqueConstraints, args.notNullConstraints, args.defaultConstraints, args.checkConstraints, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11142,7 +11196,7 @@ def process_alter_table(self, seqid, iprot, oprot): iprot.readMessageEnd() result = alter_table_result() try: - self._handler.alter_table(args.dbname, args.tbl_name, args.new_tbl) + self._handler.alter_table(args.dbname, args.tbl_name, args.new_tbl, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11167,7 +11221,7 @@ def process_alter_table_with_environment_context(self, seqid, iprot, oprot): iprot.readMessageEnd() result = alter_table_with_environment_context_result() try: - self._handler.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context) + self._handler.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11192,7 +11246,7 @@ def process_alter_table_with_cascade(self, seqid, iprot, oprot): iprot.readMessageEnd() result = alter_table_with_cascade_result() try: - self._handler.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade) + self._handler.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11242,7 +11296,7 @@ def process_add_partition(self, seqid, iprot, oprot): iprot.readMessageEnd() result = add_partition_result() try: - result.success = self._handler.add_partition(args.new_part) + result.success = self._handler.add_partition(args.new_part, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11270,7 +11324,7 @@ def process_add_partition_with_environment_context(self, seqid, iprot, oprot): iprot.readMessageEnd() result = add_partition_with_environment_context_result() try: - result.success = self._handler.add_partition_with_environment_context(args.new_part, args.environment_context) + result.success = self._handler.add_partition_with_environment_context(args.new_part, args.environment_context, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11298,7 +11352,7 @@ def process_add_partitions(self, seqid, iprot, oprot): iprot.readMessageEnd() result = add_partitions_result() try: - result.success = self._handler.add_partitions(args.new_parts) + result.success = self._handler.add_partitions(args.new_parts, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11326,7 +11380,7 @@ def process_add_partitions_pspec(self, seqid, iprot, oprot): iprot.readMessageEnd() result = add_partitions_pspec_result() try: - result.success = self._handler.add_partitions_pspec(args.new_parts) + result.success = self._handler.add_partitions_pspec(args.new_parts, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11354,7 +11408,7 @@ def process_append_partition(self, seqid, iprot, oprot): iprot.readMessageEnd() result = append_partition_result() try: - result.success = self._handler.append_partition(args.db_name, args.tbl_name, args.part_vals) + result.success = self._handler.append_partition(args.db_name, args.tbl_name, args.part_vals, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11410,7 +11464,7 @@ def process_append_partition_with_environment_context(self, seqid, iprot, oprot) iprot.readMessageEnd() result = append_partition_with_environment_context_result() try: - result.success = self._handler.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context) + result.success = self._handler.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11438,7 +11492,7 @@ def process_append_partition_by_name(self, seqid, iprot, oprot): iprot.readMessageEnd() result = append_partition_by_name_result() try: - result.success = self._handler.append_partition_by_name(args.db_name, args.tbl_name, args.part_name) + result.success = self._handler.append_partition_by_name(args.db_name, args.tbl_name, args.part_name, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11466,7 +11520,7 @@ def process_append_partition_by_name_with_environment_context(self, seqid, iprot iprot.readMessageEnd() result = append_partition_by_name_with_environment_context_result() try: - result.success = self._handler.append_partition_by_name_with_environment_context(args.db_name, args.tbl_name, args.part_name, args.environment_context) + result.success = self._handler.append_partition_by_name_with_environment_context(args.db_name, args.tbl_name, args.part_name, args.environment_context, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11644,7 +11698,7 @@ def process_exchange_partition(self, seqid, iprot, oprot): iprot.readMessageEnd() result = exchange_partition_result() try: - result.success = self._handler.exchange_partition(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name) + result.success = self._handler.exchange_partition(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -11675,7 +11729,7 @@ def process_exchange_partitions(self, seqid, iprot, oprot): iprot.readMessageEnd() result = exchange_partitions_result() try: - result.success = self._handler.exchange_partitions(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name) + result.success = self._handler.exchange_partitions(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -12593,7 +12647,7 @@ def process_update_table_column_statistics(self, seqid, iprot, oprot): iprot.readMessageEnd() result = update_table_column_statistics_result() try: - result.success = self._handler.update_table_column_statistics(args.stats_obj) + result.success = self._handler.update_table_column_statistics(args.stats_obj, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -12624,7 +12678,7 @@ def process_update_partition_column_statistics(self, seqid, iprot, oprot): iprot.readMessageEnd() result = update_partition_column_statistics_result() try: - result.success = self._handler.update_partition_column_statistics(args.stats_obj) + result.success = self._handler.update_partition_column_statistics(args.stats_obj, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -18869,15 +18923,18 @@ class create_table_args: """ Attributes: - tbl + - validWriteIdList """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'tbl', (Table, Table.thrift_spec), None, ), # 1 + (2, TType.STRING, 'validWriteIdList', None, None, ), # 2 ) - def __init__(self, tbl=None,): + def __init__(self, tbl=None, validWriteIdList=None,): self.tbl = tbl + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -18894,6 +18951,11 @@ def read(self, iprot): self.tbl.read(iprot) else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -18908,6 +18970,10 @@ def write(self, oprot): oprot.writeFieldBegin('tbl', TType.STRUCT, 1) self.tbl.write(oprot) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 2) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -18918,6 +18984,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.tbl) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -19044,17 +19111,20 @@ class create_table_with_environment_context_args: Attributes: - tbl - environment_context + - validWriteIdList """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'tbl', (Table, Table.thrift_spec), None, ), # 1 (2, TType.STRUCT, 'environment_context', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 2 + (3, TType.STRING, 'validWriteIdList', None, None, ), # 3 ) - def __init__(self, tbl=None, environment_context=None,): + def __init__(self, tbl=None, environment_context=None, validWriteIdList=None,): self.tbl = tbl self.environment_context = environment_context + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -19077,6 +19147,11 @@ def read(self, iprot): self.environment_context.read(iprot) else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -19095,6 +19170,10 @@ def write(self, oprot): oprot.writeFieldBegin('environment_context', TType.STRUCT, 2) self.environment_context.write(oprot) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 3) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -19106,6 +19185,7 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.tbl) value = (value * 31) ^ hash(self.environment_context) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -19237,6 +19317,7 @@ class create_table_with_constraints_args: - notNullConstraints - defaultConstraints - checkConstraints + - validWriteIdList """ thrift_spec = ( @@ -19248,9 +19329,10 @@ class create_table_with_constraints_args: (5, TType.LIST, 'notNullConstraints', (TType.STRUCT,(SQLNotNullConstraint, SQLNotNullConstraint.thrift_spec)), None, ), # 5 (6, TType.LIST, 'defaultConstraints', (TType.STRUCT,(SQLDefaultConstraint, SQLDefaultConstraint.thrift_spec)), None, ), # 6 (7, TType.LIST, 'checkConstraints', (TType.STRUCT,(SQLCheckConstraint, SQLCheckConstraint.thrift_spec)), None, ), # 7 + (8, TType.STRING, 'validWriteIdList', None, None, ), # 8 ) - def __init__(self, tbl=None, primaryKeys=None, foreignKeys=None, uniqueConstraints=None, notNullConstraints=None, defaultConstraints=None, checkConstraints=None,): + def __init__(self, tbl=None, primaryKeys=None, foreignKeys=None, uniqueConstraints=None, notNullConstraints=None, defaultConstraints=None, checkConstraints=None, validWriteIdList=None,): self.tbl = tbl self.primaryKeys = primaryKeys self.foreignKeys = foreignKeys @@ -19258,6 +19340,7 @@ def __init__(self, tbl=None, primaryKeys=None, foreignKeys=None, uniqueConstrain self.notNullConstraints = notNullConstraints self.defaultConstraints = defaultConstraints self.checkConstraints = checkConstraints + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -19340,6 +19423,11 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -19396,6 +19484,10 @@ def write(self, oprot): iter999.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 8) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -19412,6 +19504,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.notNullConstraints) value = (value * 31) ^ hash(self.defaultConstraints) value = (value * 31) ^ hash(self.checkConstraints) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -23441,6 +23534,7 @@ class alter_table_args: - dbname - tbl_name - new_tbl + - validWriteIdList """ thrift_spec = ( @@ -23448,12 +23542,14 @@ class alter_table_args: (1, TType.STRING, 'dbname', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRUCT, 'new_tbl', (Table, Table.thrift_spec), None, ), # 3 + (4, TType.STRING, 'validWriteIdList', None, None, ), # 4 ) - def __init__(self, dbname=None, tbl_name=None, new_tbl=None,): + def __init__(self, dbname=None, tbl_name=None, new_tbl=None, validWriteIdList=None,): self.dbname = dbname self.tbl_name = tbl_name self.new_tbl = new_tbl + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -23480,6 +23576,11 @@ def read(self, iprot): self.new_tbl.read(iprot) else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -23502,6 +23603,10 @@ def write(self, oprot): oprot.writeFieldBegin('new_tbl', TType.STRUCT, 3) self.new_tbl.write(oprot) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -23514,6 +23619,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.dbname) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.new_tbl) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -23614,6 +23720,7 @@ class alter_table_with_environment_context_args: - tbl_name - new_tbl - environment_context + - validWriteIdList """ thrift_spec = ( @@ -23622,13 +23729,15 @@ class alter_table_with_environment_context_args: (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRUCT, 'new_tbl', (Table, Table.thrift_spec), None, ), # 3 (4, TType.STRUCT, 'environment_context', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 4 + (5, TType.STRING, 'validWriteIdList', None, None, ), # 5 ) - def __init__(self, dbname=None, tbl_name=None, new_tbl=None, environment_context=None,): + def __init__(self, dbname=None, tbl_name=None, new_tbl=None, environment_context=None, validWriteIdList=None,): self.dbname = dbname self.tbl_name = tbl_name self.new_tbl = new_tbl self.environment_context = environment_context + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -23661,6 +23770,11 @@ def read(self, iprot): self.environment_context.read(iprot) else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -23687,6 +23801,10 @@ def write(self, oprot): oprot.writeFieldBegin('environment_context', TType.STRUCT, 4) self.environment_context.write(oprot) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 5) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -23700,6 +23818,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.new_tbl) value = (value * 31) ^ hash(self.environment_context) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -23800,6 +23919,7 @@ class alter_table_with_cascade_args: - tbl_name - new_tbl - cascade + - validWriteIdList """ thrift_spec = ( @@ -23808,13 +23928,15 @@ class alter_table_with_cascade_args: (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRUCT, 'new_tbl', (Table, Table.thrift_spec), None, ), # 3 (4, TType.BOOL, 'cascade', None, None, ), # 4 + (5, TType.STRING, 'validWriteIdList', None, None, ), # 5 ) - def __init__(self, dbname=None, tbl_name=None, new_tbl=None, cascade=None,): + def __init__(self, dbname=None, tbl_name=None, new_tbl=None, cascade=None, validWriteIdList=None,): self.dbname = dbname self.tbl_name = tbl_name self.new_tbl = new_tbl self.cascade = cascade + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -23846,6 +23968,11 @@ def read(self, iprot): self.cascade = iprot.readBool() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -23872,6 +23999,10 @@ def write(self, oprot): oprot.writeFieldBegin('cascade', TType.BOOL, 4) oprot.writeBool(self.cascade) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 5) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -23885,6 +24016,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.new_tbl) value = (value * 31) ^ hash(self.cascade) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -24141,15 +24273,18 @@ class add_partition_args: """ Attributes: - new_part + - validWriteIdList """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'new_part', (Partition, Partition.thrift_spec), None, ), # 1 + (2, TType.STRING, 'validWriteIdList', None, None, ), # 2 ) - def __init__(self, new_part=None,): + def __init__(self, new_part=None, validWriteIdList=None,): self.new_part = new_part + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -24166,6 +24301,11 @@ def read(self, iprot): self.new_part.read(iprot) else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -24180,6 +24320,10 @@ def write(self, oprot): oprot.writeFieldBegin('new_part', TType.STRUCT, 1) self.new_part.write(oprot) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 2) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -24190,6 +24334,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.new_part) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -24315,17 +24460,20 @@ class add_partition_with_environment_context_args: Attributes: - new_part - environment_context + - validWriteIdList """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'new_part', (Partition, Partition.thrift_spec), None, ), # 1 (2, TType.STRUCT, 'environment_context', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 2 + (3, TType.STRING, 'validWriteIdList', None, None, ), # 3 ) - def __init__(self, new_part=None, environment_context=None,): + def __init__(self, new_part=None, environment_context=None, validWriteIdList=None,): self.new_part = new_part self.environment_context = environment_context + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -24348,6 +24496,11 @@ def read(self, iprot): self.environment_context.read(iprot) else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -24366,6 +24519,10 @@ def write(self, oprot): oprot.writeFieldBegin('environment_context', TType.STRUCT, 2) self.environment_context.write(oprot) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 3) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -24377,6 +24534,7 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.new_part) value = (value * 31) ^ hash(self.environment_context) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -24501,15 +24659,18 @@ class add_partitions_args: """ Attributes: - new_parts + - validWriteIdList """ thrift_spec = ( None, # 0 (1, TType.LIST, 'new_parts', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 1 + (2, TType.STRING, 'validWriteIdList', None, None, ), # 2 ) - def __init__(self, new_parts=None,): + def __init__(self, new_parts=None, validWriteIdList=None,): self.new_parts = new_parts + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -24531,6 +24692,11 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -24548,6 +24714,10 @@ def write(self, oprot): iter1083.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 2) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -24558,6 +24728,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.new_parts) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -24681,15 +24852,18 @@ class add_partitions_pspec_args: """ Attributes: - new_parts + - validWriteIdList """ thrift_spec = ( None, # 0 (1, TType.LIST, 'new_parts', (TType.STRUCT,(PartitionSpec, PartitionSpec.thrift_spec)), None, ), # 1 + (2, TType.STRING, 'validWriteIdList', None, None, ), # 2 ) - def __init__(self, new_parts=None,): + def __init__(self, new_parts=None, validWriteIdList=None,): self.new_parts = new_parts + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -24711,6 +24885,11 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -24728,6 +24907,10 @@ def write(self, oprot): iter1090.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 2) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -24738,6 +24921,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.new_parts) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -24863,6 +25047,7 @@ class append_partition_args: - db_name - tbl_name - part_vals + - validWriteIdList """ thrift_spec = ( @@ -24870,12 +25055,14 @@ class append_partition_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.LIST, 'part_vals', (TType.STRING,None), None, ), # 3 + (4, TType.STRING, 'validWriteIdList', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, part_vals=None,): + def __init__(self, db_name=None, tbl_name=None, part_vals=None, validWriteIdList=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_vals = part_vals + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -24906,6 +25093,11 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -24931,6 +25123,10 @@ def write(self, oprot): oprot.writeString(iter1097) oprot.writeListEnd() oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -24943,6 +25139,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.part_vals) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -25243,6 +25440,7 @@ class append_partition_with_environment_context_args: - tbl_name - part_vals - environment_context + - validWriteIdList """ thrift_spec = ( @@ -25251,13 +25449,15 @@ class append_partition_with_environment_context_args: (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.LIST, 'part_vals', (TType.STRING,None), None, ), # 3 (4, TType.STRUCT, 'environment_context', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 4 + (5, TType.STRING, 'validWriteIdList', None, None, ), # 5 ) - def __init__(self, db_name=None, tbl_name=None, part_vals=None, environment_context=None,): + def __init__(self, db_name=None, tbl_name=None, part_vals=None, environment_context=None, validWriteIdList=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_vals = part_vals self.environment_context = environment_context + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -25294,6 +25494,11 @@ def read(self, iprot): self.environment_context.read(iprot) else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -25323,6 +25528,10 @@ def write(self, oprot): oprot.writeFieldBegin('environment_context', TType.STRUCT, 4) self.environment_context.write(oprot) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 5) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -25336,6 +25545,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.part_vals) value = (value * 31) ^ hash(self.environment_context) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -25462,6 +25672,7 @@ class append_partition_by_name_args: - db_name - tbl_name - part_name + - validWriteIdList """ thrift_spec = ( @@ -25469,12 +25680,14 @@ class append_partition_by_name_args: (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRING, 'part_name', None, None, ), # 3 + (4, TType.STRING, 'validWriteIdList', None, None, ), # 4 ) - def __init__(self, db_name=None, tbl_name=None, part_name=None,): + def __init__(self, db_name=None, tbl_name=None, part_name=None, validWriteIdList=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_name = part_name + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -25500,6 +25713,11 @@ def read(self, iprot): self.part_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -25522,6 +25740,10 @@ def write(self, oprot): oprot.writeFieldBegin('part_name', TType.STRING, 3) oprot.writeString(self.part_name) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -25534,6 +25756,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.part_name) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -25661,6 +25884,7 @@ class append_partition_by_name_with_environment_context_args: - tbl_name - part_name - environment_context + - validWriteIdList """ thrift_spec = ( @@ -25669,13 +25893,15 @@ class append_partition_by_name_with_environment_context_args: (2, TType.STRING, 'tbl_name', None, None, ), # 2 (3, TType.STRING, 'part_name', None, None, ), # 3 (4, TType.STRUCT, 'environment_context', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 4 + (5, TType.STRING, 'validWriteIdList', None, None, ), # 5 ) - def __init__(self, db_name=None, tbl_name=None, part_name=None, environment_context=None,): + def __init__(self, db_name=None, tbl_name=None, part_name=None, environment_context=None, validWriteIdList=None,): self.db_name = db_name self.tbl_name = tbl_name self.part_name = part_name self.environment_context = environment_context + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -25707,6 +25933,11 @@ def read(self, iprot): self.environment_context.read(iprot) else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -25733,6 +25964,10 @@ def write(self, oprot): oprot.writeFieldBegin('environment_context', TType.STRUCT, 4) self.environment_context.write(oprot) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 5) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -25746,6 +25981,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.part_name) value = (value * 31) ^ hash(self.environment_context) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -27053,6 +27289,7 @@ class exchange_partition_args: - source_table_name - dest_db - dest_table_name + - validWriteIdList """ thrift_spec = ( @@ -27062,14 +27299,16 @@ class exchange_partition_args: (3, TType.STRING, 'source_table_name', None, None, ), # 3 (4, TType.STRING, 'dest_db', None, None, ), # 4 (5, TType.STRING, 'dest_table_name', None, None, ), # 5 + (6, TType.STRING, 'validWriteIdList', None, None, ), # 6 ) - def __init__(self, partitionSpecs=None, source_db=None, source_table_name=None, dest_db=None, dest_table_name=None,): + def __init__(self, partitionSpecs=None, source_db=None, source_table_name=None, dest_db=None, dest_table_name=None, validWriteIdList=None,): self.partitionSpecs = partitionSpecs self.source_db = source_db self.source_table_name = source_table_name self.dest_db = dest_db self.dest_table_name = dest_table_name + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -27111,6 +27350,11 @@ def read(self, iprot): self.dest_table_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -27145,6 +27389,10 @@ def write(self, oprot): oprot.writeFieldBegin('dest_table_name', TType.STRING, 5) oprot.writeString(self.dest_table_name) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -27159,6 +27407,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.source_table_name) value = (value * 31) ^ hash(self.dest_db) value = (value * 31) ^ hash(self.dest_table_name) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -27301,6 +27550,7 @@ class exchange_partitions_args: - source_table_name - dest_db - dest_table_name + - validWriteIdList """ thrift_spec = ( @@ -27310,14 +27560,16 @@ class exchange_partitions_args: (3, TType.STRING, 'source_table_name', None, None, ), # 3 (4, TType.STRING, 'dest_db', None, None, ), # 4 (5, TType.STRING, 'dest_table_name', None, None, ), # 5 + (6, TType.STRING, 'validWriteIdList', None, None, ), # 6 ) - def __init__(self, partitionSpecs=None, source_db=None, source_table_name=None, dest_db=None, dest_table_name=None,): + def __init__(self, partitionSpecs=None, source_db=None, source_table_name=None, dest_db=None, dest_table_name=None, validWriteIdList=None,): self.partitionSpecs = partitionSpecs self.source_db = source_db self.source_table_name = source_table_name self.dest_db = dest_db self.dest_table_name = dest_table_name + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -27359,6 +27611,11 @@ def read(self, iprot): self.dest_table_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -27393,6 +27650,10 @@ def write(self, oprot): oprot.writeFieldBegin('dest_table_name', TType.STRING, 5) oprot.writeString(self.dest_table_name) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -27407,6 +27668,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.source_table_name) value = (value * 31) ^ hash(self.dest_db) value = (value * 31) ^ hash(self.dest_table_name) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -34039,15 +34301,18 @@ class update_table_column_statistics_args: """ Attributes: - stats_obj + - validWriteIdList """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'stats_obj', (ColumnStatistics, ColumnStatistics.thrift_spec), None, ), # 1 + (2, TType.STRING, 'validWriteIdList', None, None, ), # 2 ) - def __init__(self, stats_obj=None,): + def __init__(self, stats_obj=None, validWriteIdList=None,): self.stats_obj = stats_obj + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -34064,6 +34329,11 @@ def read(self, iprot): self.stats_obj.read(iprot) else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -34078,6 +34348,10 @@ def write(self, oprot): oprot.writeFieldBegin('stats_obj', TType.STRUCT, 1) self.stats_obj.write(oprot) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 2) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34088,6 +34362,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.stats_obj) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): @@ -34225,15 +34500,18 @@ class update_partition_column_statistics_args: """ Attributes: - stats_obj + - validWriteIdList """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'stats_obj', (ColumnStatistics, ColumnStatistics.thrift_spec), None, ), # 1 + (2, TType.STRING, 'validWriteIdList', None, None, ), # 2 ) - def __init__(self, stats_obj=None,): + def __init__(self, stats_obj=None, validWriteIdList=None,): self.stats_obj = stats_obj + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -34250,6 +34528,11 @@ def read(self, iprot): self.stats_obj.read(iprot) else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -34264,6 +34547,10 @@ def write(self, oprot): oprot.writeFieldBegin('stats_obj', TType.STRUCT, 1) self.stats_obj.write(oprot) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 2) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34274,6 +34561,7 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.stats_obj) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 617a3a2..18f0e33 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -12860,6 +12860,7 @@ class CommitTxnRequest: - writeEventInfos - keyValue - replLastIdInfo + - txnWriteIds """ thrift_spec = ( @@ -12869,14 +12870,16 @@ class CommitTxnRequest: (3, TType.LIST, 'writeEventInfos', (TType.STRUCT,(WriteEventInfo, WriteEventInfo.thrift_spec)), None, ), # 3 (4, TType.STRUCT, 'keyValue', (CommitTxnKeyValue, CommitTxnKeyValue.thrift_spec), None, ), # 4 (5, TType.STRUCT, 'replLastIdInfo', (ReplLastIdInfo, ReplLastIdInfo.thrift_spec), None, ), # 5 + (6, TType.STRING, 'txnWriteIds', None, None, ), # 6 ) - def __init__(self, txnid=None, replPolicy=None, writeEventInfos=None, keyValue=None, replLastIdInfo=None,): + def __init__(self, txnid=None, replPolicy=None, writeEventInfos=None, keyValue=None, replLastIdInfo=None, txnWriteIds=None,): self.txnid = txnid self.replPolicy = replPolicy self.writeEventInfos = writeEventInfos self.keyValue = keyValue self.replLastIdInfo = replLastIdInfo + self.txnWriteIds = txnWriteIds def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -12920,6 +12923,11 @@ def read(self, iprot): self.replLastIdInfo.read(iprot) else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.txnWriteIds = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -12953,6 +12961,10 @@ def write(self, oprot): oprot.writeFieldBegin('replLastIdInfo', TType.STRUCT, 5) self.replLastIdInfo.write(oprot) oprot.writeFieldEnd() + if self.txnWriteIds is not None: + oprot.writeFieldBegin('txnWriteIds', TType.STRING, 6) + oprot.writeString(self.txnWriteIds) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -12969,6 +12981,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.writeEventInfos) value = (value * 31) ^ hash(self.keyValue) value = (value * 31) ^ hash(self.replLastIdInfo) + value = (value * 31) ^ hash(self.txnWriteIds) return value def __repr__(self): diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb index f6da7eb..a08c3ab 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -2861,13 +2861,15 @@ class CommitTxnRequest WRITEEVENTINFOS = 3 KEYVALUE = 4 REPLLASTIDINFO = 5 + TXNWRITEIDS = 6 FIELDS = { TXNID => {:type => ::Thrift::Types::I64, :name => 'txnid'}, REPLPOLICY => {:type => ::Thrift::Types::STRING, :name => 'replPolicy', :optional => true}, WRITEEVENTINFOS => {:type => ::Thrift::Types::LIST, :name => 'writeEventInfos', :element => {:type => ::Thrift::Types::STRUCT, :class => ::WriteEventInfo}, :optional => true}, KEYVALUE => {:type => ::Thrift::Types::STRUCT, :name => 'keyValue', :class => ::CommitTxnKeyValue, :optional => true}, - REPLLASTIDINFO => {:type => ::Thrift::Types::STRUCT, :name => 'replLastIdInfo', :class => ::ReplLastIdInfo, :optional => true} + REPLLASTIDINFO => {:type => ::Thrift::Types::STRUCT, :name => 'replLastIdInfo', :class => ::ReplLastIdInfo, :optional => true}, + TXNWRITEIDS => {:type => ::Thrift::Types::STRING, :name => 'txnWriteIds', :optional => true} } def struct_fields; FIELDS; end diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 75d4de2..3279108 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -366,13 +366,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_schema_with_environment_context failed: unknown result') end - def create_table(tbl) - send_create_table(tbl) + def create_table(tbl, validWriteIdList) + send_create_table(tbl, validWriteIdList) recv_create_table() end - def send_create_table(tbl) - send_message('create_table', Create_table_args, :tbl => tbl) + def send_create_table(tbl, validWriteIdList) + send_message('create_table', Create_table_args, :tbl => tbl, :validWriteIdList => validWriteIdList) end def recv_create_table() @@ -384,13 +384,13 @@ module ThriftHiveMetastore return end - def create_table_with_environment_context(tbl, environment_context) - send_create_table_with_environment_context(tbl, environment_context) + def create_table_with_environment_context(tbl, environment_context, validWriteIdList) + send_create_table_with_environment_context(tbl, environment_context, validWriteIdList) recv_create_table_with_environment_context() end - def send_create_table_with_environment_context(tbl, environment_context) - send_message('create_table_with_environment_context', Create_table_with_environment_context_args, :tbl => tbl, :environment_context => environment_context) + def send_create_table_with_environment_context(tbl, environment_context, validWriteIdList) + send_message('create_table_with_environment_context', Create_table_with_environment_context_args, :tbl => tbl, :environment_context => environment_context, :validWriteIdList => validWriteIdList) end def recv_create_table_with_environment_context() @@ -402,13 +402,13 @@ module ThriftHiveMetastore return end - def create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints) - send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints) + def create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, validWriteIdList) + send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, validWriteIdList) recv_create_table_with_constraints() end - def send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints) - send_message('create_table_with_constraints', Create_table_with_constraints_args, :tbl => tbl, :primaryKeys => primaryKeys, :foreignKeys => foreignKeys, :uniqueConstraints => uniqueConstraints, :notNullConstraints => notNullConstraints, :defaultConstraints => defaultConstraints, :checkConstraints => checkConstraints) + def send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, validWriteIdList) + send_message('create_table_with_constraints', Create_table_with_constraints_args, :tbl => tbl, :primaryKeys => primaryKeys, :foreignKeys => foreignKeys, :uniqueConstraints => uniqueConstraints, :notNullConstraints => notNullConstraints, :defaultConstraints => defaultConstraints, :checkConstraints => checkConstraints, :validWriteIdList => validWriteIdList) end def recv_create_table_with_constraints() @@ -811,13 +811,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_table_names_by_filter failed: unknown result') end - def alter_table(dbname, tbl_name, new_tbl) - send_alter_table(dbname, tbl_name, new_tbl) + def alter_table(dbname, tbl_name, new_tbl, validWriteIdList) + send_alter_table(dbname, tbl_name, new_tbl, validWriteIdList) recv_alter_table() end - def send_alter_table(dbname, tbl_name, new_tbl) - send_message('alter_table', Alter_table_args, :dbname => dbname, :tbl_name => tbl_name, :new_tbl => new_tbl) + def send_alter_table(dbname, tbl_name, new_tbl, validWriteIdList) + send_message('alter_table', Alter_table_args, :dbname => dbname, :tbl_name => tbl_name, :new_tbl => new_tbl, :validWriteIdList => validWriteIdList) end def recv_alter_table() @@ -827,13 +827,13 @@ module ThriftHiveMetastore return end - def alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context) - send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context) + def alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context, validWriteIdList) + send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context, validWriteIdList) recv_alter_table_with_environment_context() end - def send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context) - send_message('alter_table_with_environment_context', Alter_table_with_environment_context_args, :dbname => dbname, :tbl_name => tbl_name, :new_tbl => new_tbl, :environment_context => environment_context) + def send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context, validWriteIdList) + send_message('alter_table_with_environment_context', Alter_table_with_environment_context_args, :dbname => dbname, :tbl_name => tbl_name, :new_tbl => new_tbl, :environment_context => environment_context, :validWriteIdList => validWriteIdList) end def recv_alter_table_with_environment_context() @@ -843,13 +843,13 @@ module ThriftHiveMetastore return end - def alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade) - send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade) + def alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade, validWriteIdList) + send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade, validWriteIdList) recv_alter_table_with_cascade() end - def send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade) - send_message('alter_table_with_cascade', Alter_table_with_cascade_args, :dbname => dbname, :tbl_name => tbl_name, :new_tbl => new_tbl, :cascade => cascade) + def send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade, validWriteIdList) + send_message('alter_table_with_cascade', Alter_table_with_cascade_args, :dbname => dbname, :tbl_name => tbl_name, :new_tbl => new_tbl, :cascade => cascade, :validWriteIdList => validWriteIdList) end def recv_alter_table_with_cascade() @@ -876,13 +876,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'alter_table_req failed: unknown result') end - def add_partition(new_part) - send_add_partition(new_part) + def add_partition(new_part, validWriteIdList) + send_add_partition(new_part, validWriteIdList) return recv_add_partition() end - def send_add_partition(new_part) - send_message('add_partition', Add_partition_args, :new_part => new_part) + def send_add_partition(new_part, validWriteIdList) + send_message('add_partition', Add_partition_args, :new_part => new_part, :validWriteIdList => validWriteIdList) end def recv_add_partition() @@ -894,13 +894,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'add_partition failed: unknown result') end - def add_partition_with_environment_context(new_part, environment_context) - send_add_partition_with_environment_context(new_part, environment_context) + def add_partition_with_environment_context(new_part, environment_context, validWriteIdList) + send_add_partition_with_environment_context(new_part, environment_context, validWriteIdList) return recv_add_partition_with_environment_context() end - def send_add_partition_with_environment_context(new_part, environment_context) - send_message('add_partition_with_environment_context', Add_partition_with_environment_context_args, :new_part => new_part, :environment_context => environment_context) + def send_add_partition_with_environment_context(new_part, environment_context, validWriteIdList) + send_message('add_partition_with_environment_context', Add_partition_with_environment_context_args, :new_part => new_part, :environment_context => environment_context, :validWriteIdList => validWriteIdList) end def recv_add_partition_with_environment_context() @@ -912,13 +912,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'add_partition_with_environment_context failed: unknown result') end - def add_partitions(new_parts) - send_add_partitions(new_parts) + def add_partitions(new_parts, validWriteIdList) + send_add_partitions(new_parts, validWriteIdList) return recv_add_partitions() end - def send_add_partitions(new_parts) - send_message('add_partitions', Add_partitions_args, :new_parts => new_parts) + def send_add_partitions(new_parts, validWriteIdList) + send_message('add_partitions', Add_partitions_args, :new_parts => new_parts, :validWriteIdList => validWriteIdList) end def recv_add_partitions() @@ -930,13 +930,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'add_partitions failed: unknown result') end - def add_partitions_pspec(new_parts) - send_add_partitions_pspec(new_parts) + def add_partitions_pspec(new_parts, validWriteIdList) + send_add_partitions_pspec(new_parts, validWriteIdList) return recv_add_partitions_pspec() end - def send_add_partitions_pspec(new_parts) - send_message('add_partitions_pspec', Add_partitions_pspec_args, :new_parts => new_parts) + def send_add_partitions_pspec(new_parts, validWriteIdList) + send_message('add_partitions_pspec', Add_partitions_pspec_args, :new_parts => new_parts, :validWriteIdList => validWriteIdList) end def recv_add_partitions_pspec() @@ -948,13 +948,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'add_partitions_pspec failed: unknown result') end - def append_partition(db_name, tbl_name, part_vals) - send_append_partition(db_name, tbl_name, part_vals) + def append_partition(db_name, tbl_name, part_vals, validWriteIdList) + send_append_partition(db_name, tbl_name, part_vals, validWriteIdList) return recv_append_partition() end - def send_append_partition(db_name, tbl_name, part_vals) - send_message('append_partition', Append_partition_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals) + def send_append_partition(db_name, tbl_name, part_vals, validWriteIdList) + send_message('append_partition', Append_partition_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :validWriteIdList => validWriteIdList) end def recv_append_partition() @@ -984,13 +984,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'add_partitions_req failed: unknown result') end - def append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context) - send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context) + def append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context, validWriteIdList) + send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context, validWriteIdList) return recv_append_partition_with_environment_context() end - def send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context) - send_message('append_partition_with_environment_context', Append_partition_with_environment_context_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :environment_context => environment_context) + def send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context, validWriteIdList) + send_message('append_partition_with_environment_context', Append_partition_with_environment_context_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :environment_context => environment_context, :validWriteIdList => validWriteIdList) end def recv_append_partition_with_environment_context() @@ -1002,13 +1002,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'append_partition_with_environment_context failed: unknown result') end - def append_partition_by_name(db_name, tbl_name, part_name) - send_append_partition_by_name(db_name, tbl_name, part_name) + def append_partition_by_name(db_name, tbl_name, part_name, validWriteIdList) + send_append_partition_by_name(db_name, tbl_name, part_name, validWriteIdList) return recv_append_partition_by_name() end - def send_append_partition_by_name(db_name, tbl_name, part_name) - send_message('append_partition_by_name', Append_partition_by_name_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name) + def send_append_partition_by_name(db_name, tbl_name, part_name, validWriteIdList) + send_message('append_partition_by_name', Append_partition_by_name_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name, :validWriteIdList => validWriteIdList) end def recv_append_partition_by_name() @@ -1020,13 +1020,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'append_partition_by_name failed: unknown result') end - def append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context) - send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context) + def append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context, validWriteIdList) + send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context, validWriteIdList) return recv_append_partition_by_name_with_environment_context() end - def send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context) - send_message('append_partition_by_name_with_environment_context', Append_partition_by_name_with_environment_context_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name, :environment_context => environment_context) + def send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context, validWriteIdList) + send_message('append_partition_by_name_with_environment_context', Append_partition_by_name_with_environment_context_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name, :environment_context => environment_context, :validWriteIdList => validWriteIdList) end def recv_append_partition_by_name_with_environment_context() @@ -1140,13 +1140,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition failed: unknown result') end - def exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name) - send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name) + def exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, validWriteIdList) + send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, validWriteIdList) return recv_exchange_partition() end - def send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name) - send_message('exchange_partition', Exchange_partition_args, :partitionSpecs => partitionSpecs, :source_db => source_db, :source_table_name => source_table_name, :dest_db => dest_db, :dest_table_name => dest_table_name) + def send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, validWriteIdList) + send_message('exchange_partition', Exchange_partition_args, :partitionSpecs => partitionSpecs, :source_db => source_db, :source_table_name => source_table_name, :dest_db => dest_db, :dest_table_name => dest_table_name, :validWriteIdList => validWriteIdList) end def recv_exchange_partition() @@ -1159,13 +1159,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'exchange_partition failed: unknown result') end - def exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name) - send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name) + def exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, validWriteIdList) + send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, validWriteIdList) return recv_exchange_partitions() end - def send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name) - send_message('exchange_partitions', Exchange_partitions_args, :partitionSpecs => partitionSpecs, :source_db => source_db, :source_table_name => source_table_name, :dest_db => dest_db, :dest_table_name => dest_table_name) + def send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name, validWriteIdList) + send_message('exchange_partitions', Exchange_partitions_args, :partitionSpecs => partitionSpecs, :source_db => source_db, :source_table_name => source_table_name, :dest_db => dest_db, :dest_table_name => dest_table_name, :validWriteIdList => validWriteIdList) end def recv_exchange_partitions() @@ -1771,13 +1771,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_check_constraints failed: unknown result') end - def update_table_column_statistics(stats_obj) - send_update_table_column_statistics(stats_obj) + def update_table_column_statistics(stats_obj, validWriteIdList) + send_update_table_column_statistics(stats_obj, validWriteIdList) return recv_update_table_column_statistics() end - def send_update_table_column_statistics(stats_obj) - send_message('update_table_column_statistics', Update_table_column_statistics_args, :stats_obj => stats_obj) + def send_update_table_column_statistics(stats_obj, validWriteIdList) + send_message('update_table_column_statistics', Update_table_column_statistics_args, :stats_obj => stats_obj, :validWriteIdList => validWriteIdList) end def recv_update_table_column_statistics() @@ -1790,13 +1790,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'update_table_column_statistics failed: unknown result') end - def update_partition_column_statistics(stats_obj) - send_update_partition_column_statistics(stats_obj) + def update_partition_column_statistics(stats_obj, validWriteIdList) + send_update_partition_column_statistics(stats_obj, validWriteIdList) return recv_update_partition_column_statistics() end - def send_update_partition_column_statistics(stats_obj) - send_message('update_partition_column_statistics', Update_partition_column_statistics_args, :stats_obj => stats_obj) + def send_update_partition_column_statistics(stats_obj, validWriteIdList) + send_message('update_partition_column_statistics', Update_partition_column_statistics_args, :stats_obj => stats_obj, :validWriteIdList => validWriteIdList) end def recv_update_partition_column_statistics() @@ -4019,7 +4019,7 @@ module ThriftHiveMetastore args = read_args(iprot, Create_table_args) result = Create_table_result.new() begin - @handler.create_table(args.tbl) + @handler.create_table(args.tbl, args.validWriteIdList) rescue ::AlreadyExistsException => o1 result.o1 = o1 rescue ::InvalidObjectException => o2 @@ -4036,7 +4036,7 @@ module ThriftHiveMetastore args = read_args(iprot, Create_table_with_environment_context_args) result = Create_table_with_environment_context_result.new() begin - @handler.create_table_with_environment_context(args.tbl, args.environment_context) + @handler.create_table_with_environment_context(args.tbl, args.environment_context, args.validWriteIdList) rescue ::AlreadyExistsException => o1 result.o1 = o1 rescue ::InvalidObjectException => o2 @@ -4053,7 +4053,7 @@ module ThriftHiveMetastore args = read_args(iprot, Create_table_with_constraints_args) result = Create_table_with_constraints_result.new() begin - @handler.create_table_with_constraints(args.tbl, args.primaryKeys, args.foreignKeys, args.uniqueConstraints, args.notNullConstraints, args.defaultConstraints, args.checkConstraints) + @handler.create_table_with_constraints(args.tbl, args.primaryKeys, args.foreignKeys, args.uniqueConstraints, args.notNullConstraints, args.defaultConstraints, args.checkConstraints, args.validWriteIdList) rescue ::AlreadyExistsException => o1 result.o1 = o1 rescue ::InvalidObjectException => o2 @@ -4368,7 +4368,7 @@ module ThriftHiveMetastore args = read_args(iprot, Alter_table_args) result = Alter_table_result.new() begin - @handler.alter_table(args.dbname, args.tbl_name, args.new_tbl) + @handler.alter_table(args.dbname, args.tbl_name, args.new_tbl, args.validWriteIdList) rescue ::InvalidOperationException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -4381,7 +4381,7 @@ module ThriftHiveMetastore args = read_args(iprot, Alter_table_with_environment_context_args) result = Alter_table_with_environment_context_result.new() begin - @handler.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context) + @handler.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context, args.validWriteIdList) rescue ::InvalidOperationException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -4394,7 +4394,7 @@ module ThriftHiveMetastore args = read_args(iprot, Alter_table_with_cascade_args) result = Alter_table_with_cascade_result.new() begin - @handler.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade) + @handler.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade, args.validWriteIdList) rescue ::InvalidOperationException => o1 result.o1 = o1 rescue ::MetaException => o2 @@ -4420,7 +4420,7 @@ module ThriftHiveMetastore args = read_args(iprot, Add_partition_args) result = Add_partition_result.new() begin - result.success = @handler.add_partition(args.new_part) + result.success = @handler.add_partition(args.new_part, args.validWriteIdList) rescue ::InvalidObjectException => o1 result.o1 = o1 rescue ::AlreadyExistsException => o2 @@ -4435,7 +4435,7 @@ module ThriftHiveMetastore args = read_args(iprot, Add_partition_with_environment_context_args) result = Add_partition_with_environment_context_result.new() begin - result.success = @handler.add_partition_with_environment_context(args.new_part, args.environment_context) + result.success = @handler.add_partition_with_environment_context(args.new_part, args.environment_context, args.validWriteIdList) rescue ::InvalidObjectException => o1 result.o1 = o1 rescue ::AlreadyExistsException => o2 @@ -4450,7 +4450,7 @@ module ThriftHiveMetastore args = read_args(iprot, Add_partitions_args) result = Add_partitions_result.new() begin - result.success = @handler.add_partitions(args.new_parts) + result.success = @handler.add_partitions(args.new_parts, args.validWriteIdList) rescue ::InvalidObjectException => o1 result.o1 = o1 rescue ::AlreadyExistsException => o2 @@ -4465,7 +4465,7 @@ module ThriftHiveMetastore args = read_args(iprot, Add_partitions_pspec_args) result = Add_partitions_pspec_result.new() begin - result.success = @handler.add_partitions_pspec(args.new_parts) + result.success = @handler.add_partitions_pspec(args.new_parts, args.validWriteIdList) rescue ::InvalidObjectException => o1 result.o1 = o1 rescue ::AlreadyExistsException => o2 @@ -4480,7 +4480,7 @@ module ThriftHiveMetastore args = read_args(iprot, Append_partition_args) result = Append_partition_result.new() begin - result.success = @handler.append_partition(args.db_name, args.tbl_name, args.part_vals) + result.success = @handler.append_partition(args.db_name, args.tbl_name, args.part_vals, args.validWriteIdList) rescue ::InvalidObjectException => o1 result.o1 = o1 rescue ::AlreadyExistsException => o2 @@ -4510,7 +4510,7 @@ module ThriftHiveMetastore args = read_args(iprot, Append_partition_with_environment_context_args) result = Append_partition_with_environment_context_result.new() begin - result.success = @handler.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context) + result.success = @handler.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context, args.validWriteIdList) rescue ::InvalidObjectException => o1 result.o1 = o1 rescue ::AlreadyExistsException => o2 @@ -4525,7 +4525,7 @@ module ThriftHiveMetastore args = read_args(iprot, Append_partition_by_name_args) result = Append_partition_by_name_result.new() begin - result.success = @handler.append_partition_by_name(args.db_name, args.tbl_name, args.part_name) + result.success = @handler.append_partition_by_name(args.db_name, args.tbl_name, args.part_name, args.validWriteIdList) rescue ::InvalidObjectException => o1 result.o1 = o1 rescue ::AlreadyExistsException => o2 @@ -4540,7 +4540,7 @@ module ThriftHiveMetastore args = read_args(iprot, Append_partition_by_name_with_environment_context_args) result = Append_partition_by_name_with_environment_context_result.new() begin - result.success = @handler.append_partition_by_name_with_environment_context(args.db_name, args.tbl_name, args.part_name, args.environment_context) + result.success = @handler.append_partition_by_name_with_environment_context(args.db_name, args.tbl_name, args.part_name, args.environment_context, args.validWriteIdList) rescue ::InvalidObjectException => o1 result.o1 = o1 rescue ::AlreadyExistsException => o2 @@ -4633,7 +4633,7 @@ module ThriftHiveMetastore args = read_args(iprot, Exchange_partition_args) result = Exchange_partition_result.new() begin - result.success = @handler.exchange_partition(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name) + result.success = @handler.exchange_partition(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name, args.validWriteIdList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -4650,7 +4650,7 @@ module ThriftHiveMetastore args = read_args(iprot, Exchange_partitions_args) result = Exchange_partitions_result.new() begin - result.success = @handler.exchange_partitions(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name) + result.success = @handler.exchange_partitions(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name, args.validWriteIdList) rescue ::MetaException => o1 result.o1 = o1 rescue ::NoSuchObjectException => o2 @@ -5130,7 +5130,7 @@ module ThriftHiveMetastore args = read_args(iprot, Update_table_column_statistics_args) result = Update_table_column_statistics_result.new() begin - result.success = @handler.update_table_column_statistics(args.stats_obj) + result.success = @handler.update_table_column_statistics(args.stats_obj, args.validWriteIdList) rescue ::NoSuchObjectException => o1 result.o1 = o1 rescue ::InvalidObjectException => o2 @@ -5147,7 +5147,7 @@ module ThriftHiveMetastore args = read_args(iprot, Update_partition_column_statistics_args) result = Update_partition_column_statistics_result.new() begin - result.success = @handler.update_partition_column_statistics(args.stats_obj) + result.success = @handler.update_partition_column_statistics(args.stats_obj, args.validWriteIdList) rescue ::NoSuchObjectException => o1 result.o1 = o1 rescue ::InvalidObjectException => o2 @@ -7291,9 +7291,11 @@ module ThriftHiveMetastore class Create_table_args include ::Thrift::Struct, ::Thrift::Struct_Union TBL = 1 + VALIDWRITEIDLIST = 2 FIELDS = { - TBL => {:type => ::Thrift::Types::STRUCT, :name => 'tbl', :class => ::Table} + TBL => {:type => ::Thrift::Types::STRUCT, :name => 'tbl', :class => ::Table}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -7330,10 +7332,12 @@ module ThriftHiveMetastore include ::Thrift::Struct, ::Thrift::Struct_Union TBL = 1 ENVIRONMENT_CONTEXT = 2 + VALIDWRITEIDLIST = 3 FIELDS = { TBL => {:type => ::Thrift::Types::STRUCT, :name => 'tbl', :class => ::Table}, - ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext} + ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -7375,6 +7379,7 @@ module ThriftHiveMetastore NOTNULLCONSTRAINTS = 5 DEFAULTCONSTRAINTS = 6 CHECKCONSTRAINTS = 7 + VALIDWRITEIDLIST = 8 FIELDS = { TBL => {:type => ::Thrift::Types::STRUCT, :name => 'tbl', :class => ::Table}, @@ -7383,7 +7388,8 @@ module ThriftHiveMetastore UNIQUECONSTRAINTS => {:type => ::Thrift::Types::LIST, :name => 'uniqueConstraints', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLUniqueConstraint}}, NOTNULLCONSTRAINTS => {:type => ::Thrift::Types::LIST, :name => 'notNullConstraints', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLNotNullConstraint}}, DEFAULTCONSTRAINTS => {:type => ::Thrift::Types::LIST, :name => 'defaultConstraints', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLDefaultConstraint}}, - CHECKCONSTRAINTS => {:type => ::Thrift::Types::LIST, :name => 'checkConstraints', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLCheckConstraint}} + CHECKCONSTRAINTS => {:type => ::Thrift::Types::LIST, :name => 'checkConstraints', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLCheckConstraint}}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -8290,11 +8296,13 @@ module ThriftHiveMetastore DBNAME = 1 TBL_NAME = 2 NEW_TBL = 3 + VALIDWRITEIDLIST = 4 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - NEW_TBL => {:type => ::Thrift::Types::STRUCT, :name => 'new_tbl', :class => ::Table} + NEW_TBL => {:type => ::Thrift::Types::STRUCT, :name => 'new_tbl', :class => ::Table}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -8329,12 +8337,14 @@ module ThriftHiveMetastore TBL_NAME = 2 NEW_TBL = 3 ENVIRONMENT_CONTEXT = 4 + VALIDWRITEIDLIST = 5 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, NEW_TBL => {:type => ::Thrift::Types::STRUCT, :name => 'new_tbl', :class => ::Table}, - ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext} + ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -8369,12 +8379,14 @@ module ThriftHiveMetastore TBL_NAME = 2 NEW_TBL = 3 CASCADE = 4 + VALIDWRITEIDLIST = 5 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, NEW_TBL => {:type => ::Thrift::Types::STRUCT, :name => 'new_tbl', :class => ::Table}, - CASCADE => {:type => ::Thrift::Types::BOOL, :name => 'cascade'} + CASCADE => {:type => ::Thrift::Types::BOOL, :name => 'cascade'}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -8442,9 +8454,11 @@ module ThriftHiveMetastore class Add_partition_args include ::Thrift::Struct, ::Thrift::Struct_Union NEW_PART = 1 + VALIDWRITEIDLIST = 2 FIELDS = { - NEW_PART => {:type => ::Thrift::Types::STRUCT, :name => 'new_part', :class => ::Partition} + NEW_PART => {:type => ::Thrift::Types::STRUCT, :name => 'new_part', :class => ::Partition}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -8481,10 +8495,12 @@ module ThriftHiveMetastore include ::Thrift::Struct, ::Thrift::Struct_Union NEW_PART = 1 ENVIRONMENT_CONTEXT = 2 + VALIDWRITEIDLIST = 3 FIELDS = { NEW_PART => {:type => ::Thrift::Types::STRUCT, :name => 'new_part', :class => ::Partition}, - ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext} + ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -8520,9 +8536,11 @@ module ThriftHiveMetastore class Add_partitions_args include ::Thrift::Struct, ::Thrift::Struct_Union NEW_PARTS = 1 + VALIDWRITEIDLIST = 2 FIELDS = { - NEW_PARTS => {:type => ::Thrift::Types::LIST, :name => 'new_parts', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}} + NEW_PARTS => {:type => ::Thrift::Types::LIST, :name => 'new_parts', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -8558,9 +8576,11 @@ module ThriftHiveMetastore class Add_partitions_pspec_args include ::Thrift::Struct, ::Thrift::Struct_Union NEW_PARTS = 1 + VALIDWRITEIDLIST = 2 FIELDS = { - NEW_PARTS => {:type => ::Thrift::Types::LIST, :name => 'new_parts', :element => {:type => ::Thrift::Types::STRUCT, :class => ::PartitionSpec}} + NEW_PARTS => {:type => ::Thrift::Types::LIST, :name => 'new_parts', :element => {:type => ::Thrift::Types::STRUCT, :class => ::PartitionSpec}}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -8598,11 +8618,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 PART_VALS = 3 + VALIDWRITEIDLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}} + PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -8679,12 +8701,14 @@ module ThriftHiveMetastore TBL_NAME = 2 PART_VALS = 3 ENVIRONMENT_CONTEXT = 4 + VALIDWRITEIDLIST = 5 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}}, - ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext} + ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -8722,11 +8746,13 @@ module ThriftHiveMetastore DB_NAME = 1 TBL_NAME = 2 PART_NAME = 3 + VALIDWRITEIDLIST = 4 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, - PART_NAME => {:type => ::Thrift::Types::STRING, :name => 'part_name'} + PART_NAME => {:type => ::Thrift::Types::STRING, :name => 'part_name'}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -8765,12 +8791,14 @@ module ThriftHiveMetastore TBL_NAME = 2 PART_NAME = 3 ENVIRONMENT_CONTEXT = 4 + VALIDWRITEIDLIST = 5 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, PART_NAME => {:type => ::Thrift::Types::STRING, :name => 'part_name'}, - ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext} + ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -9058,13 +9086,15 @@ module ThriftHiveMetastore SOURCE_TABLE_NAME = 3 DEST_DB = 4 DEST_TABLE_NAME = 5 + VALIDWRITEIDLIST = 6 FIELDS = { PARTITIONSPECS => {:type => ::Thrift::Types::MAP, :name => 'partitionSpecs', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}}, SOURCE_DB => {:type => ::Thrift::Types::STRING, :name => 'source_db'}, SOURCE_TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'source_table_name'}, DEST_DB => {:type => ::Thrift::Types::STRING, :name => 'dest_db'}, - DEST_TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'dest_table_name'} + DEST_TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'dest_table_name'}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -9106,13 +9136,15 @@ module ThriftHiveMetastore SOURCE_TABLE_NAME = 3 DEST_DB = 4 DEST_TABLE_NAME = 5 + VALIDWRITEIDLIST = 6 FIELDS = { PARTITIONSPECS => {:type => ::Thrift::Types::MAP, :name => 'partitionSpecs', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}}, SOURCE_DB => {:type => ::Thrift::Types::STRING, :name => 'source_db'}, SOURCE_TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'source_table_name'}, DEST_DB => {:type => ::Thrift::Types::STRING, :name => 'dest_db'}, - DEST_TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'dest_table_name'} + DEST_TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'dest_table_name'}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -10528,9 +10560,11 @@ module ThriftHiveMetastore class Update_table_column_statistics_args include ::Thrift::Struct, ::Thrift::Struct_Union STATS_OBJ = 1 + VALIDWRITEIDLIST = 2 FIELDS = { - STATS_OBJ => {:type => ::Thrift::Types::STRUCT, :name => 'stats_obj', :class => ::ColumnStatistics} + STATS_OBJ => {:type => ::Thrift::Types::STRUCT, :name => 'stats_obj', :class => ::ColumnStatistics}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end @@ -10568,9 +10602,11 @@ module ThriftHiveMetastore class Update_partition_column_statistics_args include ::Thrift::Struct, ::Thrift::Struct_Union STATS_OBJ = 1 + VALIDWRITEIDLIST = 2 FIELDS = { - STATS_OBJ => {:type => ::Thrift::Types::STRUCT, :name => 'stats_obj', :class => ::ColumnStatistics} + STATS_OBJ => {:type => ::Thrift::Types::STRUCT, :name => 'stats_obj', :class => ::ColumnStatistics}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index c55a680..c82ea95 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -59,6 +59,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -464,23 +465,23 @@ public void reconnect() throws MetaException { } @Override - public void alter_table(String dbname, String tbl_name, Table new_tbl) throws TException { - alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null); + public void alter_table(String dbname, String tbl_name, Table new_tbl, String validWriteIdList) throws TException { + alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null, validWriteIdList); } @Override public void alter_table(String defaultDatabaseName, String tblName, Table table, - boolean cascade) throws TException { + boolean cascade, String validWriteIdList) throws TException { EnvironmentContext environmentContext = new EnvironmentContext(); if (cascade) { environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE); } - alter_table_with_environmentContext(defaultDatabaseName, tblName, table, environmentContext); + alter_table_with_environmentContext(defaultDatabaseName, tblName, table, environmentContext, validWriteIdList); } @Override public void alter_table_with_environmentContext(String dbname, String tbl_name, Table new_tbl, - EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { + EnvironmentContext envContext, String validWriteIdList) throws InvalidOperationException, MetaException, TException { HiveMetaHook hook = getHook(new_tbl); if (hook != null) { hook.preAlterTable(new_tbl, envContext); @@ -488,16 +489,7 @@ public void alter_table_with_environmentContext(String dbname, String tbl_name, AlterTableRequest req = new AlterTableRequest(dbname, tbl_name, new_tbl); req.setCatName(MetaStoreUtils.getDefaultCatalog(conf)); req.setEnvironmentContext(envContext); - client.alter_table_req(req); - } - - @Override - public void alter_table(String catName, String dbName, String tblName, Table newTable, - EnvironmentContext envContext) throws TException { - // This never used to call the hook. Why? There's overload madness in metastore... - AlterTableRequest req = new AlterTableRequest(dbName, tblName, newTable); - req.setCatName(catName); - req.setEnvironmentContext(envContext); + req.setValidWriteIdList(validWriteIdList); client.alter_table_req(req); } @@ -513,6 +505,7 @@ public void alter_table(String catName, String dbName, String tbl_name, Table ne req.setCatName(catName); req.setValidWriteIdList(validWriteIds); req.setEnvironmentContext(envContext); + req.setValidWriteIdList(validWriteIds); client.alter_table_req(req); } @@ -768,16 +761,16 @@ public void dropCatalog(String catName) throws TException { * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partition(org.apache.hadoop.hive.metastore.api.Partition) */ @Override - public Partition add_partition(Partition new_part) throws TException { - return add_partition(new_part, null); + public Partition add_partition(Partition new_part, String txnWriteIds) throws TException { + return add_partition(new_part, null, txnWriteIds); } - public Partition add_partition(Partition new_part, EnvironmentContext envContext) + public Partition add_partition(Partition new_part, EnvironmentContext envContext, String txnWriteIds) throws TException { if (new_part != null && !new_part.isSetCatName()) { new_part.setCatName(getDefaultCatalog(conf)); } - Partition p = client.add_partition_with_environment_context(new_part, envContext); + Partition p = client.add_partition_with_environment_context(new_part, envContext, txnWriteIds); return deepCopy(p); } @@ -790,7 +783,7 @@ public Partition add_partition(Partition new_part, EnvironmentContext envContext * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partitions(List) */ @Override - public int add_partitions(List new_parts) throws TException { + public int add_partitions(List new_parts, String txnWriteIds) throws TException { if (new_parts == null || new_parts.contains(null)) { throw new MetaException("Partitions cannot be null."); } @@ -798,12 +791,12 @@ public int add_partitions(List new_parts) throws TException { final String defaultCat = getDefaultCatalog(conf); new_parts.forEach(p -> p.setCatName(defaultCat)); } - return client.add_partitions(new_parts); + return client.add_partitions(new_parts, txnWriteIds); } @Override public List add_partitions( - List parts, boolean ifNotExists, boolean needResults) throws TException { + List parts, boolean ifNotExists, boolean needResults, String txnWriteIds) throws TException { if (parts == null || parts.contains(null)) { throw new MetaException("Partitions cannot be null."); } @@ -820,55 +813,56 @@ public int add_partitions(List new_parts) throws TException { part.getDbName(), part.getTableName(), parts, ifNotExists); req.setCatName(part.isSetCatName() ? part.getCatName() : getDefaultCatalog(conf)); req.setNeedResult(needResults); + req.setValidWriteIdList(txnWriteIds); AddPartitionsResult result = client.add_partitions_req(req); return needResults ? FilterUtils.filterPartitionsIfEnabled( isClientFilterEnabled, filterHook, result.getPartitions()) : null; } @Override - public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TException { + public int add_partitions_pspec(PartitionSpecProxy partitionSpec, String txnWriteIds) throws TException { if (partitionSpec == null) { throw new MetaException("PartitionSpec cannot be null."); } if (partitionSpec.getCatName() == null) { partitionSpec.setCatName(getDefaultCatalog(conf)); } - return client.add_partitions_pspec(partitionSpec.toPartitionSpec()); + return client.add_partitions_pspec(partitionSpec.toPartitionSpec(), txnWriteIds); } @Override public Partition appendPartition(String db_name, String table_name, - List part_vals) throws TException { - return appendPartition(getDefaultCatalog(conf), db_name, table_name, part_vals); + List part_vals, String validWriteIdList) throws TException { + return appendPartition(getDefaultCatalog(conf), db_name, table_name, part_vals, validWriteIdList); } @Override - public Partition appendPartition(String dbName, String tableName, String partName) + public Partition appendPartition(String dbName, String tableName, String partName, String validWriteIdList) throws TException { - return appendPartition(getDefaultCatalog(conf), dbName, tableName, partName); + return appendPartition(getDefaultCatalog(conf), dbName, tableName, partName, validWriteIdList); } @Override public Partition appendPartition(String catName, String dbName, String tableName, - String name) throws TException { + String name, String validWriteIdList) throws TException { Partition p = client.append_partition_by_name(prependCatalogToDbName( - catName, dbName, conf), tableName, name); + catName, dbName, conf), tableName, name, validWriteIdList); return deepCopy(p); } @Override public Partition appendPartition(String catName, String dbName, String tableName, - List partVals) throws TException { + List partVals, String validWriteIdList) throws TException { Partition p = client.append_partition(prependCatalogToDbName( - catName, dbName, conf), tableName, partVals); + catName, dbName, conf), tableName, partVals, validWriteIdList); return deepCopy(p); } @Deprecated public Partition appendPartition(String dbName, String tableName, List partVals, - EnvironmentContext ec) throws TException { + EnvironmentContext ec, String validWriteIdList) throws TException { return client.append_partition_with_environment_context(prependCatalogToDbName(dbName, conf), - tableName, partVals, ec).deepCopy(); + tableName, partVals, ec, validWriteIdList).deepCopy(); } /** @@ -876,22 +870,23 @@ public Partition appendPartition(String dbName, String tableName, List p * @param partitionSpecs partitions specs of the parent partition to be exchanged * @param destDb the db of the destination table * @param destinationTableName the destination table name + * @param validWriteIdList writeIds snapshot * @return new partition after exchanging */ @Override public Partition exchange_partition(Map partitionSpecs, String sourceDb, String sourceTable, String destDb, - String destinationTableName) throws TException { + String destinationTableName, String validWriteIdList) throws TException { return exchange_partition(partitionSpecs, getDefaultCatalog(conf), sourceDb, sourceTable, - getDefaultCatalog(conf), destDb, destinationTableName); + getDefaultCatalog(conf), destDb, destinationTableName, validWriteIdList); } @Override public Partition exchange_partition(Map partitionSpecs, String sourceCat, String sourceDb, String sourceTable, String destCat, - String destDb, String destTableName) throws TException { + String destDb, String destTableName, String validWriteIdList) throws TException { return client.exchange_partition(partitionSpecs, prependCatalogToDbName(sourceCat, sourceDb, conf), - sourceTable, prependCatalogToDbName(destCat, destDb, conf), destTableName); + sourceTable, prependCatalogToDbName(destCat, destDb, conf), destTableName, validWriteIdList); } /** @@ -904,9 +899,9 @@ public Partition exchange_partition(Map partitionSpecs, String s @Override public List exchange_partitions(Map partitionSpecs, String sourceDb, String sourceTable, String destDb, - String destinationTableName) throws TException { + String destinationTableName, String validWriteIdList) throws TException { return exchange_partitions(partitionSpecs, getDefaultCatalog(conf), sourceDb, sourceTable, - getDefaultCatalog(conf), destDb, destinationTableName); + getDefaultCatalog(conf), destDb, destinationTableName, validWriteIdList); } @Override @@ -954,9 +949,9 @@ public AggrStats getAggrColStatsFor(String catName, String dbName, String tblNam @Override public List exchange_partitions(Map partitionSpecs, String sourceCat, String sourceDb, String sourceTable, String destCat, - String destDb, String destTableName) throws TException { + String destDb, String destTableName, String validWriteIdList) throws TException { return client.exchange_partitions(partitionSpecs, prependCatalogToDbName(sourceCat, sourceDb, conf), - sourceTable, prependCatalogToDbName(destCat, destDb, conf), destTableName); + sourceTable, prependCatalogToDbName(destCat, destDb, conf), destTableName, validWriteIdList); } @Override @@ -991,12 +986,12 @@ public void createDatabase(Database db) * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table) */ @Override - public void createTable(Table tbl) throws AlreadyExistsException, + public void createTable(Table tbl, String txnWriteIds) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { - createTable(tbl, null); + createTable(tbl, null, txnWriteIds); } - public void createTable(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException, + public void createTable(Table tbl, EnvironmentContext envContext, String txnWriteIds) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { if (!tbl.isSetCatName()) { tbl.setCatName(getDefaultCatalog(conf)); @@ -1008,7 +1003,7 @@ public void createTable(Table tbl, EnvironmentContext envContext) throws Already boolean success = false; try { // Subclasses can override this step (for example, for temporary tables) - create_table_with_environment_context(tbl, envContext); + create_table_with_environment_context(tbl, envContext, txnWriteIds); if (hook != null) { hook.commitCreateTable(tbl); } @@ -1031,7 +1026,8 @@ public void createTableWithConstraints(Table tbl, List uniqueConstraints, List notNullConstraints, List defaultConstraints, - List checkConstraints) + List checkConstraints, + String validWriteIdList) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { @@ -1065,7 +1061,7 @@ public void createTableWithConstraints(Table tbl, try { // Subclasses can override this step (for example, for temporary tables) client.create_table_with_constraints(tbl, primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); + uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, validWriteIdList); if (hook != null) { hook.commitCreateTable(tbl); } @@ -2310,7 +2306,7 @@ public void alterDatabase(String catName, String dbName, Database newDb) throws /** {@inheritDoc} */ @Override - public boolean updateTableColumnStatistics(ColumnStatistics statsObj) throws TException { + public boolean updateTableColumnStatistics(ColumnStatistics statsObj, String validWriteIdList) throws TException { if (!statsObj.getStatsDesc().isSetCatName()) { statsObj.getStatsDesc().setCatName(getDefaultCatalog(conf)); } @@ -2318,11 +2314,12 @@ public boolean updateTableColumnStatistics(ColumnStatistics statsObj) throws TEx SetPartitionsStatsRequest req = new SetPartitionsStatsRequest(); req.addToColStats(statsObj); req.setNeedMerge(false); + req.setValidWriteIdList(validWriteIdList); return client.update_table_column_statistics_req(req).isResult(); } @Override - public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) throws TException { + public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj, String validWriteIdList) throws TException { if (!statsObj.getStatsDesc().isSetCatName()) { statsObj.getStatsDesc().setCatName(getDefaultCatalog(conf)); } @@ -2330,6 +2327,7 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) throws SetPartitionsStatsRequest req = new SetPartitionsStatsRequest(); req.addToColStats(statsObj); req.setNeedMerge(false); + req.setValidWriteIdList(validWriteIdList); return client.update_partition_column_statistics_req(req).isResult(); } @@ -2472,16 +2470,16 @@ public Partition getPartition(String catName, String dbName, String tblName, Str return deepCopy(FilterUtils.filterPartitionIfEnabled(isClientFilterEnabled, filterHook, p)); } - public Partition appendPartitionByName(String dbName, String tableName, String partName) + public Partition appendPartitionByName(String dbName, String tableName, String partName, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, TException { - return appendPartitionByName(dbName, tableName, partName, null); + return appendPartitionByName(dbName, tableName, partName, null, validWriteIdList); } public Partition appendPartitionByName(String dbName, String tableName, String partName, - EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException, + EnvironmentContext envContext, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, TException { Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName, - partName, envContext); + partName, envContext, validWriteIdList); return deepCopy(p); } @@ -2960,9 +2958,11 @@ public void replRollbackTxn(long srcTxnId, String replPolicy) throws NoSuchTxnEx } @Override - public void commitTxn(long txnid) + public void commitTxn(long txnid, String txnWriteIds) throws NoSuchTxnException, TxnAbortedException, TException { - client.commit_txn(new CommitTxnRequest(txnid)); + CommitTxnRequest rqst = new CommitTxnRequest(txnid); + rqst.setTxnWriteIds(txnWriteIds); + client.commit_txn(rqst); } @Override @@ -3356,10 +3356,10 @@ public GetAllFunctionsResponse getAllFunctions() throws TException { return client.get_all_functions(); } - protected void create_table_with_environment_context(Table tbl, EnvironmentContext envContext) + protected void create_table_with_environment_context(Table tbl, EnvironmentContext envContext, String validWriteIdList) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { - client.create_table_with_environment_context(tbl, envContext); + client.create_table_with_environment_context(tbl, envContext, validWriteIdList); } protected void drop_table_with_environment_context(String catName, String dbname, String name, diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 8999d55..6e818d8 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -862,13 +862,14 @@ void updateCreationMetadata(String catName, String dbName, String tableName, Cre * @param dbName database name * @param tableName table name * @param partVals partition values + * @param validWriteIdList writeIds snapshot * @return the partition object * @throws InvalidObjectException no such table * @throws AlreadyExistsException a partition with these values already exists * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - Partition appendPartition(String dbName, String tableName, List partVals) + Partition appendPartition(String dbName, String tableName, List partVals, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; /** @@ -878,13 +879,14 @@ Partition appendPartition(String dbName, String tableName, List partVals * @param dbName database name * @param tableName table name * @param partVals partition values + * @param validWriteIdList writeIds snapshot * @return the partition object * @throws InvalidObjectException no such table * @throws AlreadyExistsException a partition with these values already exists * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - Partition appendPartition(String catName, String dbName, String tableName, List partVals) + Partition appendPartition(String catName, String dbName, String tableName, List partVals, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; /** @@ -899,7 +901,7 @@ Partition appendPartition(String catName, String dbName, String tableName, List< * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - Partition appendPartition(String dbName, String tableName, String name) + Partition appendPartition(String dbName, String tableName, String name, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; /** @@ -909,13 +911,14 @@ Partition appendPartition(String dbName, String tableName, String name) * @param dbName database name. * @param tableName table name. * @param name name of the partition, should be in the form partkey=partval. + * @param validWriteIdList writeIds snapshot * @return new partition object. * @throws InvalidObjectException No such table. * @throws AlreadyExistsException Partition of this name already exists. * @throws MetaException error accessing the RDBMS * @throws TException thrift transport error */ - Partition appendPartition(String catName, String dbName, String tableName, String name) + Partition appendPartition(String catName, String dbName, String tableName, String name, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; /** @@ -933,7 +936,7 @@ Partition appendPartition(String catName, String dbName, String tableName, Strin * @throws TException * Thrift exception */ - Partition add_partition(Partition partition) + Partition add_partition(Partition partition, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; /** @@ -950,7 +953,7 @@ Partition add_partition(Partition partition) * @throws TException * Thrift exception */ - int add_partitions(List partitions) + int add_partitions(List partitions, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; /** @@ -962,7 +965,7 @@ int add_partitions(List partitions) * @throws MetaException error accessing the RDBMS or storage. * @throws TException thrift transport error */ - int add_partitions_pspec(PartitionSpecProxy partitionSpec) + int add_partitions_pspec(PartitionSpecProxy partitionSpec, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; /** @@ -974,7 +977,7 @@ int add_partitions_pspec(PartitionSpecProxy partitionSpec) * @return the partitions that were added, or null if !needResults */ List add_partitions( - List partitions, boolean ifNotExists, boolean needResults) + List partitions, boolean ifNotExists, boolean needResults, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; /** @@ -1013,6 +1016,7 @@ Partition getPartition(String catName, String dbName, String tblName, List partitionSpecs, String sourceDb, String sourceTable, String destdb, - String destTableName) throws MetaException, NoSuchObjectException, + String destTableName, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException, TException; /** @@ -1033,6 +1037,7 @@ Partition exchange_partition(Map partitionSpecs, * @param destCat catalog of the destination table, for now must the same as sourceCat * @param destdb database of the destination table * @param destTableName name of the destination table + * @param validWriteIdList writeIds snapshot * @return partition object * @throws MetaException error accessing the RDBMS or storage * @throws NoSuchObjectException no such table, for either source or destination table @@ -1041,7 +1046,7 @@ Partition exchange_partition(Map partitionSpecs, */ Partition exchange_partition(Map partitionSpecs, String sourceCat, String sourceDb, String sourceTable, String destCat, String destdb, - String destTableName) throws MetaException, NoSuchObjectException, + String destTableName, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException, TException; /** @@ -1053,6 +1058,7 @@ Partition exchange_partition(Map partitionSpecs, String sourceCa * @param sourceTable name of the source table * @param destdb database of the destination table * @param destTableName name of the destination table + * @param validWriteIdList writeIds snapshot * @throws MetaException error accessing the RDBMS or storage * @throws NoSuchObjectException no such table, for either source or destination table * @throws InvalidObjectException error in partition specifications @@ -1061,7 +1067,7 @@ Partition exchange_partition(Map partitionSpecs, String sourceCa */ List exchange_partitions(Map partitionSpecs, String sourceDb, String sourceTable, String destdb, - String destTableName) throws MetaException, NoSuchObjectException, + String destTableName, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException, TException; /** @@ -1075,6 +1081,7 @@ Partition exchange_partition(Map partitionSpecs, String sourceCa * @param destCat catalog of the destination table, for now must the same as sourceCat * @param destdb database of the destination table * @param destTableName name of the destination table + * @param validWriteIdList writeIds snapshot * @throws MetaException error accessing the RDBMS or storage * @throws NoSuchObjectException no such table, for either source or destination table * @throws InvalidObjectException error in partition specifications @@ -1083,7 +1090,7 @@ Partition exchange_partition(Map partitionSpecs, String sourceCa */ List exchange_partitions(Map partitionSpecs, String sourceCat, String sourceDb, String sourceTable, String destCat, - String destdb, String destTableName) + String destdb, String destTableName, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException, TException; /** @@ -1649,6 +1656,7 @@ boolean isPartitionMarkedForEvent(String catName, String db_name, String tbl_nam /** * @param tbl + * @param validWriteIdList writeIds snapshot * @throws AlreadyExistsException * @throws InvalidObjectException * @throws MetaException @@ -1657,7 +1665,7 @@ boolean isPartitionMarkedForEvent(String catName, String db_name, String tbl_nam * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table) */ - void createTable(Table tbl) throws AlreadyExistsException, + void createTable(Table tbl, String validWriteIdList) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException; /** @@ -1666,12 +1674,13 @@ void createTable(Table tbl) throws AlreadyExistsException, * @param tblName table name * @param table new table object, should be complete representation of the table, not just the * things you want to change. + * @param validWriteIdList writeIds snapshot * @throws InvalidOperationException something is wrong with the new table object or an * operation was attempted that is not allowed (such as changing partition columns). * @throws MetaException something went wrong, usually in the RDBMS * @throws TException general thrift exception */ - void alter_table(String databaseName, String tblName, Table table) + void alter_table(String databaseName, String tblName, Table table, String validWriteIdList) throws InvalidOperationException, MetaException, TException; /** @@ -1683,14 +1692,15 @@ void alter_table(String databaseName, String tblName, Table table) * @param tblName table name. * @param newTable new table object, should be complete representation of the table, not just the * things you want to change. + * @param validWriteIdList writeIds snapshot * @throws InvalidOperationException something is wrong with the new table object or an * operation was attempted that is not allowed (such as changing partition columns). * @throws MetaException something went wrong, usually in the RDBMS * @throws TException general thrift exception */ - default void alter_table(String catName, String dbName, String tblName, Table newTable) + default void alter_table(String catName, String dbName, String tblName, Table newTable, String validWriteIdList) throws InvalidOperationException, MetaException, TException { - alter_table(catName, dbName, tblName, newTable, null); + alter_table(catName, dbName, tblName, newTable, null, validWriteIdList); } /** @@ -1701,13 +1711,14 @@ default void alter_table(String catName, String dbName, String tblName, Table ne * @param newTable new table object, should be complete representation of the table, not just the * things you want to change. * @param envContext options for the alter. + * @param validWriteIdList writeIds snapshot * @throws InvalidOperationException something is wrong with the new table object or an * operation was attempted that is not allowed (such as changing partition columns). * @throws MetaException something went wrong, usually in the RDBMS * @throws TException general thrift exception */ void alter_table(String catName, String dbName, String tblName, Table newTable, - EnvironmentContext envContext) + EnvironmentContext envContext, String validWriteIdList) throws InvalidOperationException, MetaException, TException; /** @@ -1716,7 +1727,7 @@ void alter_table(String catName, String dbName, String tblName, Table newTable, */ @Deprecated void alter_table(String defaultDatabaseName, String tblName, Table table, - boolean cascade) throws InvalidOperationException, MetaException, TException; + boolean cascade, String validWriteIdList) throws InvalidOperationException, MetaException, TException; /** * Alter a table. @@ -1725,6 +1736,7 @@ void alter_table(String defaultDatabaseName, String tblName, Table table, * @param table new table object, should be complete representation of the table, not just the * things you want to change. * @param environmentContext options for the alter. + * @param validWriteIdList writeIds snapshot * @throws InvalidOperationException something is wrong with the new table object or an * operation was attempted that is not allowed (such as changing partition columns). * @throws MetaException something went wrong, usually in the RDBMS @@ -1732,12 +1744,9 @@ void alter_table(String defaultDatabaseName, String tblName, Table table, */ @Deprecated void alter_table_with_environmentContext(String databaseName, String tblName, Table table, - EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, + EnvironmentContext environmentContext, String validWriteIdList) throws InvalidOperationException, MetaException, TException; - void alter_table(String catName, String databaseName, String tblName, Table table, - EnvironmentContext environmentContext, String validWriteIdList) - throws InvalidOperationException, MetaException, TException; /** * Create a new database. * @param db database object. If the catalog name is null it will be assumed to be @@ -2424,6 +2433,7 @@ String getConfigValue(String name, String defaultValue) /** * Write table level column statistics to persistent store * @param statsObj + * @param validWriteIdList writeIds snapshot * @return boolean indicating the status of the operation * @throws NoSuchObjectException * @throws InvalidObjectException @@ -2431,13 +2441,14 @@ String getConfigValue(String name, String defaultValue) * @throws TException * @throws InvalidInputException */ - boolean updateTableColumnStatistics(ColumnStatistics statsObj) + boolean updateTableColumnStatistics(ColumnStatistics statsObj, String validWriteIdList) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException; /** * Write partition level column statistics to persistent store * @param statsObj + * @param validWriteIdList writeIds snapshot * @return boolean indicating the status of the operation * @throws NoSuchObjectException * @throws InvalidObjectException @@ -2445,7 +2456,7 @@ boolean updateTableColumnStatistics(ColumnStatistics statsObj) * @throws TException * @throws InvalidInputException */ - boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) + boolean updatePartitionColumnStatistics(ColumnStatistics statsObj, String validWriteIdList) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException; @@ -3019,7 +3030,7 @@ Function getFunction(String catName, String dbName, String funcName) * aborted. This can result from the transaction timing out. * @throws TException */ - void commitTxn(long txnid) + void commitTxn(long txnid, String writeIds) throws NoSuchTxnException, TxnAbortedException, TException; /** @@ -3548,7 +3559,8 @@ void createTableWithConstraints( List uniqueConstraints, List notNullConstraints, List defaultConstraints, - List checkConstraints) + List checkConstraints, + String validWriteIdList) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException; /** diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index 9dd2580..fae15a9 100644 --- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -967,6 +967,8 @@ struct CommitTxnRequest { // Information to update the last repl id of table/partition along with commit txn (replication from 2.6 to 3.0) 5: optional ReplLastIdInfo replLastIdInfo, + // snapshot of table writeIds of the transaction + 6: optional string txnWriteIds, } struct ReplTblWriteIdStateRequest { @@ -1900,15 +1902,15 @@ service ThriftHiveMetastore extends fb303.FacebookService // sd.outputFormat (SequenceFileInputFormat (binary) or TextInputFormat) // sd.serdeInfo.serializationLib (SerDe class name eg org.apache.hadoop.hive.serde.simple_meta.MetadataTypedColumnsetSerDe // * See notes on DDL_TIME - void create_table(1:Table tbl) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4) + void create_table(1:Table tbl, 2:string validWriteIdList) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4) void create_table_with_environment_context(1:Table tbl, - 2:EnvironmentContext environment_context) + 2:EnvironmentContext environment_context, 3:string validWriteIdList) throws (1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4) void create_table_with_constraints(1:Table tbl, 2: list primaryKeys, 3: list foreignKeys, 4: list uniqueConstraints, 5: list notNullConstraints, - 6: list defaultConstraints, 7: list checkConstraints) + 6: list defaultConstraints, 7: list checkConstraints, 8: string validWriteIdList) throws (1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4) @@ -1995,13 +1997,13 @@ service ThriftHiveMetastore extends fb303.FacebookService // alter table applies to only future partitions not for existing partitions // * See notes on DDL_TIME - void alter_table(1:string dbname, 2:string tbl_name, 3:Table new_tbl) + void alter_table(1:string dbname, 2:string tbl_name, 3:Table new_tbl, 4:string validWriteIdList) throws (1:InvalidOperationException o1, 2:MetaException o2) void alter_table_with_environment_context(1:string dbname, 2:string tbl_name, - 3:Table new_tbl, 4:EnvironmentContext environment_context) + 3:Table new_tbl, 4:EnvironmentContext environment_context, 5:string validWriteIdList) throws (1:InvalidOperationException o1, 2:MetaException o2) // alter table not only applies to future partitions but also cascade to existing partitions - void alter_table_with_cascade(1:string dbname, 2:string tbl_name, 3:Table new_tbl, 4:bool cascade) + void alter_table_with_cascade(1:string dbname, 2:string tbl_name, 3:Table new_tbl, 4:bool cascade, 5:string validWriteIdList) throws (1:InvalidOperationException o1, 2:MetaException o2) AlterTableResponse alter_table_req(1:AlterTableRequest req) throws (1:InvalidOperationException o1, 2:MetaException o2) @@ -2010,27 +2012,27 @@ service ThriftHiveMetastore extends fb303.FacebookService // the following applies to only tables that have partitions // * See notes on DDL_TIME - Partition add_partition(1:Partition new_part) + Partition add_partition(1:Partition new_part, 2:string validWriteIdList) throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) Partition add_partition_with_environment_context(1:Partition new_part, - 2:EnvironmentContext environment_context) + 2:EnvironmentContext environment_context,3:string validWriteIdList) throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) - i32 add_partitions(1:list new_parts) + i32 add_partitions(1:list new_parts, 2:string validWriteIdList) throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) - i32 add_partitions_pspec(1:list new_parts) + i32 add_partitions_pspec(1:list new_parts, 2:string validWriteIdList) throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) - Partition append_partition(1:string db_name, 2:string tbl_name, 3:list part_vals) + Partition append_partition(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:string validWriteIdList) throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) AddPartitionsResult add_partitions_req(1:AddPartitionsRequest request) throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) Partition append_partition_with_environment_context(1:string db_name, 2:string tbl_name, - 3:list part_vals, 4:EnvironmentContext environment_context) + 3:list part_vals, 4:EnvironmentContext environment_context, 5:string validWriteIdList) throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) - Partition append_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name) + Partition append_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name, 4:string validWriteIdList) throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) Partition append_partition_by_name_with_environment_context(1:string db_name, 2:string tbl_name, - 3:string part_name, 4:EnvironmentContext environment_context) + 3:string part_name, 4:EnvironmentContext environment_context,5:string validWriteIdList) throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) bool drop_partition(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:bool deleteData) throws(1:NoSuchObjectException o1, 2:MetaException o2) @@ -2048,12 +2050,12 @@ service ThriftHiveMetastore extends fb303.FacebookService Partition get_partition(1:string db_name, 2:string tbl_name, 3:list part_vals) throws(1:MetaException o1, 2:NoSuchObjectException o2) Partition exchange_partition(1:map partitionSpecs, 2:string source_db, - 3:string source_table_name, 4:string dest_db, 5:string dest_table_name) + 3:string source_table_name, 4:string dest_db, 5:string dest_table_name, 6:string validWriteIdList) throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3, 4:InvalidInputException o4) list exchange_partitions(1:map partitionSpecs, 2:string source_db, - 3:string source_table_name, 4:string dest_db, 5:string dest_table_name) + 3:string source_table_name, 4:string dest_db, 5:string dest_table_name, 6:string validWriteIdList) throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3, 4:InvalidInputException o4) @@ -2201,9 +2203,9 @@ service ThriftHiveMetastore extends fb303.FacebookService // exists for one or more columns, the existing statistics will be overwritten. The update APIs // validate that the dbName, tableName, partName, colName[] passed in as part of the ColumnStatistics // struct are valid, throws InvalidInputException/NoSuchObjectException if found to be invalid - bool update_table_column_statistics(1:ColumnStatistics stats_obj) throws (1:NoSuchObjectException o1, + bool update_table_column_statistics(1:ColumnStatistics stats_obj, 2:string validWriteIdList) throws (1:NoSuchObjectException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4) - bool update_partition_column_statistics(1:ColumnStatistics stats_obj) throws (1:NoSuchObjectException o1, + bool update_partition_column_statistics(1:ColumnStatistics stats_obj, 2:string validWriteIdList) throws (1:NoSuchObjectException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4) SetPartitionsStatsResponse update_table_column_statistics_req(1:SetPartitionsStatsRequest req) throws (1:NoSuchObjectException o1, diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index d4aaa5c..7a504bd 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -385,7 +385,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam if (transactionalListeners != null && !transactionalListeners.isEmpty()) { txnAlterTableEventResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ALTER_TABLE, - new AlterTableEvent(oldt, newt, false, true, + new AlterTableEvent(oldt, newt, writeIdList, false, true, newt.getWriteId(), handler), environmentContext); } @@ -450,7 +450,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam // make this call whether the event failed or succeeded. To make this behavior consistent, // this call is made for failed events also. MetaStoreListenerNotifier.notifyEvent(listeners, EventMessage.EventType.ALTER_TABLE, - new AlterTableEvent(oldt, newt, false, success, newt.getWriteId(), handler), + new AlterTableEvent(oldt, newt, writeIdList, false, success, newt.getWriteId(), handler), environmentContext, txnAlterTableEventResponses, msdb); } } @@ -533,7 +533,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ALTER_PARTITION, new AlterPartitionEvent(oldPart, new_part, tbl, false, - true, new_part.getWriteId(), handler), + true, new_part.getWriteId(), validWriteIds, handler), environmentContext); @@ -682,7 +682,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ALTER_PARTITION, new AlterPartitionEvent(oldPart, new_part, tbl, false, - true, new_part.getWriteId(), handler), + true, new_part.getWriteId(), validWriteIds, handler), environmentContext); } @@ -788,7 +788,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str if (transactionalListeners != null && !transactionalListeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ALTER_PARTITION, - new AlterPartitionEvent(oldPart, newPart, tbl, false, true, newPart.getWriteId(), handler), + new AlterPartitionEvent(oldPart, newPart, tbl, false, true, newPart.getWriteId(), writeIdList, handler), environmentContext); } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 1a694fb..847cf0b 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -1884,17 +1884,17 @@ public boolean drop_type(final String name) throws MetaException, NoSuchObjectEx } private void create_table_core(final RawStore ms, final Table tbl, - final EnvironmentContext envContext) + final EnvironmentContext envContext, String validWriteIdList) throws AlreadyExistsException, MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { - create_table_core(ms, tbl, envContext, null, null, null, null, null, null); + create_table_core(ms, tbl, envContext, null, null, null, null, null, null, validWriteIdList); } private void create_table_core(final RawStore ms, final Table tbl, final EnvironmentContext envContext, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, - List checkConstraints) + List checkConstraints, String validWriteIdListString) throws AlreadyExistsException, MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { @@ -2003,7 +2003,7 @@ private void create_table_core(final RawStore ms, final Table tbl, if (primaryKeys == null && foreignKeys == null && uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null && checkConstraints == null) { - ms.createTable(tbl); + ms.createTable(tbl, validWriteIdListString); } else { // Check that constraints have catalog name properly set first if (primaryKeys != null && !primaryKeys.isEmpty() && !primaryKeys.get(0).isSetCatName()) { @@ -2091,7 +2091,7 @@ private void create_table_core(final RawStore ms, final Table tbl, if (!transactionalListeners.isEmpty()) { transactionalListenerResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners, - EventType.CREATE_TABLE, new CreateTableEvent(tbl, true, this), envContext); + EventType.CREATE_TABLE, new CreateTableEvent(tbl, validWriteIdListString, true, this), envContext); if (primaryKeys != null && !primaryKeys.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_PRIMARYKEY, new AddPrimaryKeyEvent(primaryKeys, true, this), envContext); @@ -2121,7 +2121,7 @@ private void create_table_core(final RawStore ms, final Table tbl, if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.CREATE_TABLE, - new CreateTableEvent(tbl, success, this), envContext, transactionalListenerResponses, ms); + new CreateTableEvent(tbl, validWriteIdListString, success, this), envContext, transactionalListenerResponses, ms); if (primaryKeys != null && !primaryKeys.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PRIMARYKEY, new AddPrimaryKeyEvent(primaryKeys, success, this), envContext); @@ -2162,21 +2162,21 @@ private void create_table_core(final RawStore ms, final Table tbl, } @Override - public void create_table(final Table tbl) throws AlreadyExistsException, + public void create_table(final Table tbl, String validWriteIdList) throws AlreadyExistsException, MetaException, InvalidObjectException, InvalidInputException { - create_table_with_environment_context(tbl, null); + create_table_with_environment_context(tbl, null, validWriteIdList); } @Override public void create_table_with_environment_context(final Table tbl, - final EnvironmentContext envContext) + final EnvironmentContext envContext, String validWriteIdList) throws AlreadyExistsException, MetaException, InvalidObjectException, InvalidInputException { startFunction("create_table", ": " + tbl.toString()); boolean success = false; Exception ex = null; try { - create_table_core(getMS(), tbl, envContext); + create_table_core(getMS(), tbl, envContext, validWriteIdList); success = true; } catch (NoSuchObjectException e) { LOG.warn("create_table_with_environment_context got ", e); @@ -2199,7 +2199,8 @@ public void create_table_with_constraints(final Table tbl, List uniqueConstraints, List notNullConstraints, List defaultConstraints, - List checkConstraints) + List checkConstraints, + String validWriteIdList) throws AlreadyExistsException, MetaException, InvalidObjectException, InvalidInputException { startFunction("create_table", ": " + tbl.toString()); @@ -2207,7 +2208,7 @@ public void create_table_with_constraints(final Table tbl, Exception ex = null; try { create_table_core(getMS(), tbl, null, primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); + uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, validWriteIdList); success = true; } catch (NoSuchObjectException e) { ex = e; @@ -2857,14 +2858,14 @@ private void alterPartitionForTruncate(RawStore ms, String catName, String dbNam MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ALTER_PARTITION, new AlterPartitionEvent(partition, partition, table, true, true, - writeId, this)); + writeId, validWriteIds, this)); } if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ALTER_PARTITION, new AlterPartitionEvent(partition, partition, table, true, true, - writeId, this)); + writeId, validWriteIds, this)); } if (writeId > 0) { @@ -2890,14 +2891,14 @@ private void alterTableStatsForTruncate(RawStore ms, String catName, String dbNa if (!transactionalListeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ALTER_TABLE, - new AlterTableEvent(table, table, true, true, + new AlterTableEvent(table, table, validWriteIds, true, true, writeId, this)); } if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ALTER_TABLE, - new AlterTableEvent(table, table, true, true, + new AlterTableEvent(table, table, validWriteIds, true, true, writeId, this)); } @@ -3290,7 +3291,7 @@ private boolean doesClientHaveCapability(ClientCapabilities client, ClientCapabi private Partition append_partition_common(RawStore ms, String catName, String dbName, String tableName, List part_vals, - EnvironmentContext envContext) + EnvironmentContext envContext, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, NoSuchObjectException { Partition part = new Partition(); @@ -3356,12 +3357,12 @@ private Partition append_partition_common(RawStore ms, String catName, String db MetaStoreServerUtils.updatePartitionStatsFast(part, tbl, wh, madeDir, false, envContext, true); } - if (ms.addPartition(part)) { + if (ms.addPartition(part, validWriteIdList)) { if (!transactionalListeners.isEmpty()) { transactionalListenerResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_PARTITION, - new AddPartitionEvent(tbl, part, true, this), + new AddPartitionEvent(tbl, validWriteIdList, part, true, this), envContext); } @@ -3378,7 +3379,7 @@ private Partition append_partition_common(RawStore ms, String catName, String db if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PARTITION, - new AddPartitionEvent(tbl, part, success, this), + new AddPartitionEvent(tbl, validWriteIdList, part, success, this), envContext, transactionalListenerResponses, ms); } @@ -3400,14 +3401,14 @@ private void firePreEvent(PreEventContext event) throws MetaException { @Override public Partition append_partition(final String dbName, final String tableName, - final List part_vals) throws InvalidObjectException, + final List part_vals, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException { - return append_partition_with_environment_context(dbName, tableName, part_vals, null); + return append_partition_with_environment_context(dbName, tableName, part_vals, null, validWriteIdList); } @Override public Partition append_partition_with_environment_context(final String dbName, - final String tableName, final List part_vals, final EnvironmentContext envContext) + final String tableName, final List part_vals, final EnvironmentContext envContext, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException { if (part_vals == null || part_vals.isEmpty()) { throw new MetaException("The partition values must not be null or empty."); @@ -3423,7 +3424,7 @@ public Partition append_partition_with_environment_context(final String dbName, Partition ret = null; Exception ex = null; try { - ret = append_partition_common(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, part_vals, envContext); + ret = append_partition_common(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, part_vals, envContext, validWriteIdList); } catch (MetaException | InvalidObjectException | AlreadyExistsException e) { ex = e; throw e; @@ -3487,7 +3488,7 @@ public boolean equals(Object obj) { } private List add_partitions_core(final RawStore ms, String catName, - String dbName, String tblName, List parts, final boolean ifNotExists) + String dbName, String tblName, List parts, final boolean ifNotExists, String validWriteIdList) throws TException { logAndAudit("add_partitions"); boolean success = false; @@ -3546,7 +3547,7 @@ public boolean equals(Object obj) { newParts.addAll(createPartitionFolders(partitionsToAdd, tbl, addedPartitions)); if (!newParts.isEmpty()) { - ms.addPartitions(catName, dbName, tblName, newParts); + ms.addPartitions(catName, dbName, tblName, newParts, validWriteIdList); } // Notification is generated for newly created partitions only. The subset of partitions @@ -3555,13 +3556,13 @@ public boolean equals(Object obj) { transactionalListenerResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_PARTITION, - new AddPartitionEvent(tbl, newParts, true, this)); + new AddPartitionEvent(tbl, validWriteIdList, newParts, true, this)); } if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PARTITION, - new AddPartitionEvent(tbl, newParts, true, this), + new AddPartitionEvent(tbl, validWriteIdList, newParts, true, this), null, transactionalListenerResponses, ms); @@ -3569,7 +3570,7 @@ public boolean equals(Object obj) { // The request has succeeded but we failed to add these partitions. MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PARTITION, - new AddPartitionEvent(tbl, existingParts, false, this), + new AddPartitionEvent(tbl, validWriteIdList, existingParts, false, this), null, null, ms); } } @@ -3584,13 +3585,14 @@ public boolean equals(Object obj) { for (ColumnStatistics partColStats: partsColStats) { long writeId = partsWriteIds.get(cnt++); String validWriteIds = null; - if (writeId > 0) { - ValidWriteIdList validWriteIdList = - new ValidReaderWriteIdList(TableName.getDbTable(tbl.getDbName(), - tbl.getTableName()), - new long[0], new BitSet(), writeId); - validWriteIds = validWriteIdList.toString(); - } + // TODO =====to be reworked in HIVE-21637====== +// if (writeId > 0) { +// ValidWriteIdList validWriteIdList = +// new ValidReaderWriteIdList(TableName.getDbTable(tbl.getDbName(), +// tbl.getTableName()), +// new long[0], new BitSet(), writeId); +// validWriteIds = validWriteIdList.toString(); +// } updatePartitonColStatsInternal(tbl, partColStats, validWriteIds, writeId); } @@ -3603,7 +3605,7 @@ public boolean equals(Object obj) { if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PARTITION, - new AddPartitionEvent(tbl, parts, false, this), + new AddPartitionEvent(tbl, validWriteIdList, parts, false, this), null, null, ms); } } @@ -3801,7 +3803,8 @@ public AddPartitionsResult add_partitions_req(AddPartitionsRequest request) } }); List parts = add_partitions_core(getMS(), request.getCatName(), request.getDbName(), - request.getTblName(), request.getParts(), request.isIfNotExists()); + request.getTblName(), request.getParts(), request.isIfNotExists(), + request.isSetValidWriteIdList()?request.getValidWriteIdList():null); if (request.isNeedResult()) { result.setPartitions(parts); } @@ -3814,7 +3817,7 @@ public AddPartitionsResult add_partitions_req(AddPartitionsRequest request) } @Override - public int add_partitions(final List parts) throws MetaException, + public int add_partitions(final List parts, String validWriteIdList) throws MetaException, InvalidObjectException, AlreadyExistsException { startFunction("add_partition"); if (parts == null) { @@ -3835,7 +3838,7 @@ public int add_partitions(final List parts) throws MetaException, } } ret = add_partitions_core(getMS(), parts.get(0).getCatName(), parts.get(0).getDbName(), - parts.get(0).getTableName(), parts, false).size(); + parts.get(0).getTableName(), parts, false, validWriteIdList).size(); assert ret == parts.size(); } catch (MetaException | InvalidObjectException | AlreadyExistsException e) { ex = e; @@ -3851,7 +3854,7 @@ public int add_partitions(final List parts) throws MetaException, } @Override - public int add_partitions_pspec(final List partSpecs) + public int add_partitions_pspec(final List partSpecs, String validWriteIdList) throws TException { logAndAudit("add_partitions_pspec"); @@ -3870,12 +3873,12 @@ public int add_partitions_pspec(final List partSpecs) catName = partSpecs.get(0).getCatName(); } - return add_partitions_pspec_core(getMS(), catName, dbName, tableName, partSpecs, false); + return add_partitions_pspec_core(getMS(), catName, dbName, tableName, partSpecs, false, validWriteIdList); } private int add_partitions_pspec_core(RawStore ms, String catName, String dbName, String tblName, List partSpecs, - boolean ifNotExists) + boolean ifNotExists, String validWriteIdList) throws TException { boolean success = false; if (dbName == null || tblName == null) { @@ -3916,13 +3919,13 @@ private int add_partitions_pspec_core(RawStore ms, String catName, String dbName createPartitionFolders(partitionsToAdd, tbl, addedPartitions); - ms.addPartitions(catName, dbName, tblName, partitionSpecProxy, ifNotExists); + ms.addPartitions(catName, dbName, tblName, partitionSpecProxy, ifNotExists, validWriteIdList); if (!transactionalListeners.isEmpty()) { transactionalListenerResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_PARTITION, - new AddPartitionEvent(tbl, partitionSpecProxy, true, this)); + new AddPartitionEvent(tbl, validWriteIdList, partitionSpecProxy, true, this)); } success = ms.commitTransaction(); @@ -3936,7 +3939,7 @@ private int add_partitions_pspec_core(RawStore ms, String catName, String dbName if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PARTITION, - new AddPartitionEvent(tbl, partitionSpecProxy, true, this), + new AddPartitionEvent(tbl, validWriteIdList, partitionSpecProxy, true, this), null, transactionalListenerResponses, ms); } @@ -4074,7 +4077,7 @@ private void initializePartitionParameters(final Table tbl, } private Partition add_partition_core(final RawStore ms, - final Partition part, final EnvironmentContext envContext) + final Partition part, final EnvironmentContext envContext, String validWriteIdList) throws TException { boolean success = false; Table tbl = null; @@ -4101,7 +4104,7 @@ private Partition add_partition_core(final RawStore ms, try { initializeAddedPartition(tbl, part, madeDir, envContext); initializePartitionParameters(tbl, part); - success = ms.addPartition(part); + success = ms.addPartition(part, validWriteIdList); } finally { if (!success && madeDir) { wh.deleteDir(new Path(part.getSd().getLocation()), true, @@ -4116,7 +4119,7 @@ private Partition add_partition_core(final RawStore ms, transactionalListenerResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_PARTITION, - new AddPartitionEvent(tbl, Arrays.asList(part), true, this), + new AddPartitionEvent(tbl, validWriteIdList, Arrays.asList(part), true, this), envContext); } @@ -4132,7 +4135,7 @@ private Partition add_partition_core(final RawStore ms, if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PARTITION, - new AddPartitionEvent(tbl, Arrays.asList(part), success, this), + new AddPartitionEvent(tbl, validWriteIdList, Arrays.asList(part), success, this), envContext, transactionalListenerResponses, ms); @@ -4142,14 +4145,14 @@ private Partition add_partition_core(final RawStore ms, } @Override - public Partition add_partition(final Partition part) + public Partition add_partition(final Partition part, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException { - return add_partition_with_environment_context(part, null); + return add_partition_with_environment_context(part, null, validWriteIdList); } @Override public Partition add_partition_with_environment_context( - final Partition part, EnvironmentContext envContext) + final Partition part, EnvironmentContext envContext, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException { if (part == null) { @@ -4160,7 +4163,7 @@ public Partition add_partition_with_environment_context( Partition ret = null; Exception ex = null; try { - ret = add_partition_core(getMS(), part, envContext); + ret = add_partition_core(getMS(), part, envContext, validWriteIdList); } catch (MetaException | InvalidObjectException | AlreadyExistsException e) { ex = e; throw e; @@ -4176,8 +4179,8 @@ public Partition add_partition_with_environment_context( @Override public Partition exchange_partition(Map partitionSpecs, String sourceDbName, String sourceTableName, String destDbName, - String destTableName) throws TException { - exchange_partitions(partitionSpecs, sourceDbName, sourceTableName, destDbName, destTableName); + String destTableName, String validWriteIdList) throws TException { + exchange_partitions(partitionSpecs, sourceDbName, sourceTableName, destDbName, destTableName, validWriteIdList); // Wouldn't it make more sense to return the first element of the list returned by the // previous call? return new Partition(); @@ -4186,7 +4189,7 @@ public Partition exchange_partition(Map partitionSpecs, @Override public List exchange_partitions(Map partitionSpecs, String sourceDbName, String sourceTableName, String destDbName, - String destTableName) throws TException { + String destTableName, String validWriteIdList) throws TException { String[] parsedDestDbName = parseDbName(destDbName, conf); String[] parsedSourceDbName = parseDbName(sourceDbName, conf); // No need to check catalog for null as parseDbName() will never return null for the catalog. @@ -4288,7 +4291,7 @@ public Partition exchange_partition(Map partitionSpecs, Path destPartitionPath = new Path(destinationTable.getSd().getLocation(), Warehouse.makePartName(destinationTable.getPartitionKeys(), partition.getValues())); destPartition.getSd().setLocation(destPartitionPath.toString()); - ms.addPartition(destPartition); + ms.addPartition(destPartition, validWriteIdList); destPartitions.add(destPartition); ms.dropPartition(parsedSourceDbName[CAT_NAME], partition.getDbName(), sourceTable.getTableName(), partition.getValues()); @@ -4312,7 +4315,7 @@ public Partition exchange_partition(Map partitionSpecs, transactionalListenerResponsesForAddPartition = MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_PARTITION, - new AddPartitionEvent(destinationTable, destPartitions, true, this)); + new AddPartitionEvent(destinationTable, validWriteIdList, destPartitions, true, this)); for (Partition partition : partitionsToExchange) { DropPartitionEvent dropPartitionEvent = @@ -4335,7 +4338,7 @@ public Partition exchange_partition(Map partitionSpecs, } if (!listeners.isEmpty()) { - AddPartitionEvent addPartitionEvent = new AddPartitionEvent(destinationTable, destPartitions, success, this); + AddPartitionEvent addPartitionEvent = new AddPartitionEvent(destinationTable, validWriteIdList, destPartitions, success, this); MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PARTITION, addPartitionEvent, @@ -5080,7 +5083,7 @@ private void rename_partition(String catName, String db_name, String tbl_name, MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ALTER_PARTITION, new AlterPartitionEvent(oldPart, new_part, table, false, - true, new_part.getWriteId(), this), + true, new_part.getWriteId(), validWriteIds, this), envContext); } } catch (InvalidObjectException e) { @@ -5181,7 +5184,7 @@ private void alter_partitions_with_environment_context(String catName, String db MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ALTER_PARTITION, new AlterPartitionEvent(oldTmpPart, tmpPart, table, false, - true, writeId, this)); + true, writeId, writeIdList, this)); } } } catch (InvalidObjectException e) { @@ -5210,17 +5213,17 @@ public String getVersion() throws TException { @Override public void alter_table(final String dbname, final String name, - final Table newTable) + final Table newTable, String validWriteIdList) throws InvalidOperationException, MetaException { // Do not set an environment context. String[] parsedDbName = parseDbName(dbname, conf); alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, newTable, - null, null); + null, validWriteIdList); } @Override public void alter_table_with_cascade(final String dbname, final String name, - final Table newTable, final boolean cascade) + final Table newTable, final boolean cascade, String validWriteIdList) throws InvalidOperationException, MetaException { EnvironmentContext envContext = null; if (cascade) { @@ -5229,7 +5232,7 @@ public void alter_table_with_cascade(final String dbname, final String name, } String[] parsedDbName = parseDbName(dbname, conf); alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, newTable, - envContext, null); + envContext, validWriteIdList); } @Override @@ -5243,11 +5246,11 @@ public AlterTableResponse alter_table_req(AlterTableRequest req) @Override public void alter_table_with_environment_context(final String dbname, final String name, final Table newTable, - final EnvironmentContext envContext) + final EnvironmentContext envContext, String validWriteIdList) throws InvalidOperationException, MetaException { String[] parsedDbName = parseDbName(dbname, conf); alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - name, newTable, envContext, null); + name, newTable, envContext, validWriteIdList); } private void alter_table_core(String catName, String dbname, String name, Table newTable, @@ -5715,13 +5718,13 @@ public Partition get_partition_by_name(final String db_name, final String tbl_na @Override public Partition append_partition_by_name(final String db_name, final String tbl_name, - final String part_name) throws TException { - return append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, null); + final String part_name, String validWriteIdList) throws TException { + return append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, null, validWriteIdList); } @Override public Partition append_partition_by_name_with_environment_context(final String db_name, - final String tbl_name, final String part_name, final EnvironmentContext env_context) + final String tbl_name, final String part_name, final EnvironmentContext env_context, String validWriteIdList) throws TException { String[] parsedDbName = parseDbName(db_name, conf); startFunction("append_partition_by_name", ": tbl=" @@ -5733,7 +5736,7 @@ public Partition append_partition_by_name_with_environment_context(final String try { RawStore ms = getMS(); List partVals = getPartValsFromName(ms, parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_name); - ret = append_partition_common(ms, parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, partVals, env_context); + ret = append_partition_common(ms, parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, partVals, env_context, validWriteIdList); } catch (InvalidObjectException | AlreadyExistsException | MetaException e) { ex = e; throw e; @@ -6046,9 +6049,9 @@ public PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsReques } @Override - public boolean update_table_column_statistics(ColumnStatistics colStats) throws TException { + public boolean update_table_column_statistics(ColumnStatistics colStats, String validWriteIdList) throws TException { // Deprecated API, won't work for transactional tables - return updateTableColumnStatsInternal(colStats, null, -1); + return updateTableColumnStatsInternal(colStats, validWriteIdList, -1); } @Override @@ -6089,13 +6092,13 @@ private boolean updateTableColumnStatsInternal(ColumnStatistics colStats, if (transactionalListeners != null && !transactionalListeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.UPDATE_TABLE_COLUMN_STAT, - new UpdateTableColumnStatEvent(colStats, tableObj, parameters, + new UpdateTableColumnStatEvent(colStats, tableObj, parameters, validWriteIds, writeId, this)); } if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.UPDATE_TABLE_COLUMN_STAT, - new UpdateTableColumnStatEvent(colStats, tableObj, parameters, + new UpdateTableColumnStatEvent(colStats, tableObj, parameters, validWriteIds, writeId,this)); } } @@ -6155,13 +6158,13 @@ private boolean updatePartitonColStatsInternal(Table tbl, ColumnStatistics colSt if (transactionalListeners != null && !transactionalListeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.UPDATE_PARTITION_COLUMN_STAT, - new UpdatePartitionColumnStatEvent(colStats, partVals, parameters, tbl, + new UpdatePartitionColumnStatEvent(colStats, partVals, parameters, validWriteIds, tbl, writeId, this)); } if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.UPDATE_PARTITION_COLUMN_STAT, - new UpdatePartitionColumnStatEvent(colStats, partVals, parameters, tbl, + new UpdatePartitionColumnStatEvent(colStats, partVals, parameters, validWriteIds, tbl, writeId, this)); } } @@ -6177,9 +6180,9 @@ private boolean updatePartitonColStatsInternal(Table tbl, ColumnStatistics colSt } @Override - public boolean update_partition_column_statistics(ColumnStatistics colStats) throws TException { + public boolean update_partition_column_statistics(ColumnStatistics colStats, String validWriteIdList) throws TException { // Deprecated API. - return updatePartitonColStatsInternal(null, colStats, null, -1); + return updatePartitonColStatsInternal(null, colStats, validWriteIdList, -1); } @@ -7749,7 +7752,7 @@ public void commit_txn(CommitTxnRequest rqst) throws TException { getTxnHandler().commitTxn(rqst); if (listeners != null && !listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.COMMIT_TXN, - new CommitTxnEvent(rqst.getTxnid(), this)); + new CommitTxnEvent(rqst.getTxnid(), rqst.getTxnWriteIds(), this)); } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java index c2ba3b0..c983d62 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java @@ -288,7 +288,8 @@ public int repair(MsckInfo msckInfo) { if (success) { try { LOG.info("txnId: {} succeeded. Committing..", txnId); - getMsc().commitTxn(txnId); + // TODO =====to be reworked in HIVE-21637====== + getMsc().commitTxn(txnId, null); } catch (Exception e) { LOG.warn("Error while committing txnId: {} for table: {}", txnId, qualifiedTableName, e); ret = 1; @@ -385,7 +386,8 @@ public Void execute(int size) throws MetastoreException { addMsgs.add(String.format(addMsgFormat, part.getPartitionName())); currentBatchSize--; } - metastoreClient.add_partitions(partsToAdd, true, false); + // TODO =====to be reworked in HIVE-21637====== + metastoreClient.add_partitions(partsToAdd, true, false, null); // if last batch is successful remove it from partsNotInMs batchWork.removeAll(lastBatch); repairOutput.addAll(addMsgs); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 61019c6..caf8462 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -991,7 +991,8 @@ public boolean dropType(String typeName) { boolean success = false; try { openTransaction(); - createTable(tbl); + // TODO =====to be reworked in HIVE-21637====== + createTable(tbl, null); // Add constraints. // We need not do a deep retrieval of the Table Column Descriptor while persisting the // constraints since this transaction involving create table is not yet committed. @@ -1024,7 +1025,7 @@ public boolean dropType(String typeName) { } @Override - public void createTable(Table tbl) throws InvalidObjectException, MetaException { + public void createTable(Table tbl, String validWriteIdList) throws InvalidObjectException, MetaException { boolean commited = false; MTable mtbl = null; @@ -2114,7 +2115,7 @@ private CreationMetadata convertToCreationMetadata( } @Override - public boolean addPartitions(String catName, String dbName, String tblName, List parts) + public boolean addPartitions(String catName, String dbName, String tblName, List parts, String validWriteIdList) throws InvalidObjectException, MetaException { boolean success = false; openTransaction(); @@ -2182,7 +2183,7 @@ private boolean isValidPartition( @Override public boolean addPartitions(String catName, String dbName, String tblName, - PartitionSpecProxy partitionSpec, boolean ifNotExists) + PartitionSpecProxy partitionSpec, boolean ifNotExists, String validWriteIdList) throws InvalidObjectException, MetaException { boolean success = false; openTransaction(); @@ -2241,7 +2242,7 @@ public boolean addPartitions(String catName, String dbName, String tblName, } @Override - public boolean addPartition(Partition part) throws InvalidObjectException, + public boolean addPartition(Partition part, String validWriteIdList) throws InvalidObjectException, MetaException { boolean success = false; boolean commited = false; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index 6a93e26..508be78 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -181,7 +181,7 @@ boolean alterDatabase(String catalogName, String dbname, Database db) boolean dropType(String typeName); - void createTable(Table tbl) throws InvalidObjectException, + void createTable(Table tbl, String validWriteIdList) throws InvalidObjectException, MetaException; /** @@ -225,11 +225,12 @@ Table getTable(String catalogName, String dbName, String tableName, /** * Add a partition. * @param part partition to add + * @param validWriteIdList writeIds snapshot * @return true if the partition was successfully added. * @throws InvalidObjectException the provided partition object is not valid. * @throws MetaException error writing to the RDBMS. */ - boolean addPartition(Partition part) + boolean addPartition(Partition part, String validWriteIdList) throws InvalidObjectException, MetaException; /** @@ -238,12 +239,13 @@ boolean addPartition(Partition part) * @param dbName database name. * @param tblName table name. * @param parts list of partitions to be added. + * @param validWriteIdList writeIds snapshot * @return true if the operation succeeded. * @throws InvalidObjectException never throws this AFAICT * @throws MetaException the partitions don't belong to the indicated table or error writing to * the RDBMS. */ - boolean addPartitions(String catName, String dbName, String tblName, List parts) + boolean addPartitions(String catName, String dbName, String tblName, List parts, String validWriteIdList) throws InvalidObjectException, MetaException; /** @@ -254,12 +256,13 @@ boolean addPartitions(String catName, String dbName, String tblName, List deletedCols = new ArrayList<>(); colStats = HiveAlterHandler.updateOrGetPartitionColumnStats(rawStore, catalogName, dbName, tableName, @@ -223,19 +230,19 @@ static private ColumnStatistics updateStatsForAlterPart(RawStore rawStore, Table } if (colStats != null) { sharedCache.alterPartitionAndStatsInCache(catalogName, dbName, tableName, part.getWriteId(), - part.getValues(), part.getParameters(), colStats.getStatsObj()); + part.getValues(), part.getParameters(), colStats.getStatsObj(), writeIds); } return colStats; } static private void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, Table tblAfter, String catalogName, - String dbName, String tableName) throws Exception { + String dbName, String tableName, ValidWriteIdList writeIds) throws Exception { ColumnStatistics colStats = null; List deletedCols = new ArrayList<>(); if (tblBefore.isSetPartitionKeys()) { List parts = sharedCache.listCachedPartitions(catalogName, dbName, tableName, -1); for (Partition part : parts) { - colStats = updateStatsForAlterPart(rawStore, tblBefore, catalogName, dbName, tableName, part); + colStats = updateStatsForAlterPart(rawStore, tblBefore, catalogName, dbName, tableName, part, writeIds); } } @@ -251,14 +258,13 @@ static private void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, } @VisibleForTesting - public static long updateUsingNotificationEvents(RawStore rawStore, long lastEventId) throws Exception { + public static long updateUsingNotificationEvents(RawStore rawStore, long lastEventId, Configuration conf) throws Exception { LOG.debug("updating cache using notification events starting from event id " + lastEventId); NotificationEventRequest rqst = new NotificationEventRequest(lastEventId); //Add the events which are not related to metadata update rqst.addToEventTypeSkipList(MessageBuilder.INSERT_EVENT); rqst.addToEventTypeSkipList(MessageBuilder.OPEN_TXN_EVENT); - rqst.addToEventTypeSkipList(MessageBuilder.COMMIT_TXN_EVENT); rqst.addToEventTypeSkipList(MessageBuilder.ABORT_TXN_EVENT); rqst.addToEventTypeSkipList(MessageBuilder.ALLOC_WRITE_ID_EVENT); rqst.addToEventTypeSkipList(MessageBuilder.ACID_WRITE_EVENT); @@ -276,6 +282,8 @@ public static long updateUsingNotificationEvents(RawStore rawStore, long lastEve rqst.addToEventTypeSkipList(MessageBuilder.ALTER_SCHEMA_VERSION_EVENT); rqst.addToEventTypeSkipList(MessageBuilder.DROP_SCHEMA_VERSION_EVENT); + String defaultCat = getDefaultCatalog(conf); + Deadline.startTimer("getNextNotification"); NotificationEventResponse resp = rawStore.getNextNotification(rqst); Deadline.stopTimer(); @@ -308,15 +316,18 @@ public static long updateUsingNotificationEvents(RawStore rawStore, long lastEve case MessageBuilder.ADD_PARTITION_EVENT: AddPartitionMessage addPartMessage = deserializer.getAddPartitionMessage(message); sharedCache.addPartitionsToCache(catalogName, - dbName, tableName, addPartMessage.getPartitionObjs()); + dbName, tableName, addPartMessage.getPartitionObjs(), + addPartMessage.getWriteIds()!=null?new ValidReaderWriteIdList(addPartMessage.getWriteIds()):null); break; case MessageBuilder.ALTER_PARTITION_EVENT: AlterPartitionMessage alterPartitionMessage = deserializer.getAlterPartitionMessage(message); sharedCache.alterPartitionInCache(catalogName, dbName, tableName, - alterPartitionMessage.getPtnObjBefore().getValues(), alterPartitionMessage.getPtnObjAfter()); + alterPartitionMessage.getPtnObjBefore().getValues(), alterPartitionMessage.getPtnObjAfter(), + alterPartitionMessage.getWriteIds()!=null?new ValidReaderWriteIdList(alterPartitionMessage.getWriteIds()):null); //TODO : Use the stat object stored in the alter table message to update the stats in cache. updateStatsForAlterPart(rawStore, alterPartitionMessage.getTableObj(), - catalogName, dbName, tableName, alterPartitionMessage.getPtnObjAfter()); + catalogName, dbName, tableName, alterPartitionMessage.getPtnObjAfter(), + alterPartitionMessage.getWriteIds()!=null?new ValidReaderWriteIdList(alterPartitionMessage.getWriteIds()):null); break; case MessageBuilder.DROP_PARTITION_EVENT: DropPartitionMessage dropPartitionMessage = deserializer.getDropPartitionMessage(message); @@ -327,14 +338,16 @@ public static long updateUsingNotificationEvents(RawStore rawStore, long lastEve case MessageBuilder.CREATE_TABLE_EVENT: CreateTableMessage createTableMessage = deserializer.getCreateTableMessage(message); sharedCache.addTableToCache(catalogName, dbName, - tableName, createTableMessage.getTableObj()); + tableName, createTableMessage.getTableObj(), createTableMessage.getWriteIds()!=null? + new ValidReaderWriteIdList(createTableMessage.getWriteIds()):null, false); break; case MessageBuilder.ALTER_TABLE_EVENT: AlterTableMessage alterTableMessage = deserializer.getAlterTableMessage(message); sharedCache.alterTableInCache(catalogName, dbName, tableName, alterTableMessage.getTableObjAfter()); //TODO : Use the stat object stored in the alter table message to update the stats in cache. updateStatsForAlterTable(rawStore, alterTableMessage.getTableObjBefore(), alterTableMessage.getTableObjAfter(), - catalogName, dbName, tableName); + catalogName, dbName, tableName, + alterTableMessage.getWriteIds()!=null?new ValidReaderWriteIdList(alterTableMessage.getWriteIds()):null); break; case MessageBuilder.DROP_TABLE_EVENT: DropTableMessage dropTableMessage = deserializer.getDropTableMessage(message); @@ -386,13 +399,24 @@ public static long updateUsingNotificationEvents(RawStore rawStore, long lastEve UpdatePartitionColumnStatMessage msgPartUpdate = deserializer.getUpdatePartitionColumnStatMessage(message); sharedCache.alterPartitionAndStatsInCache(catalogName, dbName, tableName, msgPartUpdate.getWriteId(), msgPartUpdate.getPartVals(), msgPartUpdate.getParameters(), - msgPartUpdate.getColumnStatistics().getStatsObj()); + msgPartUpdate.getColumnStatistics().getStatsObj(), + msgPartUpdate.getWriteIds()!=null?new ValidReaderWriteIdList(msgPartUpdate.getWriteIds()):null); break; case MessageBuilder.DELETE_PART_COL_STAT_EVENT: DeletePartitionColumnStatMessage msgPart = deserializer.getDeletePartitionColumnStatMessage(message); sharedCache.removePartitionColStatsFromCache(catalogName, dbName, tableName, msgPart.getPartValues(), msgPart.getColName()); break; + case MessageBuilder.COMMIT_TXN_EVENT: + CommitTxnMessage msgCommit = deserializer.getCommitTxnMessage(message); + String txnWriteIdsString = msgCommit.getTxnWriteIds(); + ValidTxnWriteIdList txnWriteIds = new ValidTxnWriteIdList(txnWriteIdsString); + for (String tblName : txnWriteIds.getTableNames()) { + ValidWriteIdList writeIds = txnWriteIds.getTableValidWriteIdList(tblName); + String[] names = writeIds.getTableName().split("\\."); + sharedCache.markTableCommitted(defaultCat, names[0], names[1], writeIds); + } + break; default: LOG.error("Event is not supported for cache invalidation : " + event.getEventType()); } @@ -405,13 +429,14 @@ public static long updateUsingNotificationEvents(RawStore rawStore, long lastEve * This initializes the caches in SharedCache by getting the objects from Metastore DB via * ObjectStore and populating the respective caches */ - static void prewarm(RawStore rawStore) { + static void prewarm(RawStore rawStore, Configuration conf) { if (isCachePrewarmed.get()) { return; } long startTime = System.nanoTime(); LOG.info("Prewarming CachedStore"); long sleepTime = 100; + TxnStore txn = TxnUtils.getTxnStore(conf); while (!isCachePrewarmed.get()) { // Prevents throwing exceptions in our raw store calls since we're not using RawStoreProxy Deadline.registerIfNot(1000000); @@ -477,13 +502,21 @@ static void prewarm(RawStore rawStore) { continue; } Table table; + ValidWriteIdList writeIds; try { + ValidTxnList currentTxnList = TxnCommonUtils.createValidReadTxnList(txn.getOpenTxns(), 0); + GetValidWriteIdsRequest rqst = new GetValidWriteIdsRequest(Arrays.asList(TableName.getDbTable(dbName, tblName))); + rqst.setValidTxnList(currentTxnList.toString()); + writeIds = TxnCommonUtils.createValidReaderWriteIdList(txn.getValidWriteIds(rqst).getTblValidWriteIds().get(0)); table = rawStore.getTable(catName, dbName, tblName); } catch (MetaException e) { LOG.debug(ExceptionUtils.getStackTrace(e)); // It is possible the table is deleted during fetching tables of the database, // in that case, continue with the next table continue; + } catch (NoSuchTxnException e) { + LOG.warn("Cannot find transaction", e); + continue; } List colNames = MetaStoreUtils.getColumnNamesForTable(table); try { @@ -535,8 +568,8 @@ static void prewarm(RawStore rawStore) { Deadline.stopTimer(); } // If the table could not cached due to memory limit, stop prewarm - boolean isSuccess = sharedCache.populateTableInCache(table, tableColStats, partitions, partitionColStats, - aggrStatsAllPartitions, aggrStatsAllButDefaultPartition); + boolean isSuccess = sharedCache.populateTableInCache(table, tableColStats, partitions, + partitionColStats, aggrStatsAllPartitions, aggrStatsAllButDefaultPartition, writeIds, false); if (isSuccess) { LOG.trace("Cached Database: {}'s Table: {}.", dbName, tblName); } else { @@ -692,9 +725,11 @@ static void setCacheRefreshPeriod(long time) { static class CacheUpdateMasterWork implements Runnable { private boolean shouldRunPrewarm = true; private final RawStore rawStore; + private Configuration conf; CacheUpdateMasterWork(Configuration conf, boolean shouldRunPrewarm) { + this.conf = conf; this.shouldRunPrewarm = shouldRunPrewarm; String rawStoreClassName = MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, ObjectStore.class.getName()); @@ -713,7 +748,7 @@ public void run() { if (!shouldRunPrewarm) { if (canUseEvents) { try { - triggerUpdateUsingEvent(rawStore); + triggerUpdateUsingEvent(rawStore, conf); } catch (Exception e) { LOG.error("failed to update cache using events ", e); } @@ -727,7 +762,7 @@ public void run() { } } else { try { - triggerPreWarm(rawStore); + triggerPreWarm(rawStore, conf); shouldRunPrewarm = false; } catch (Exception e) { LOG.error("Prewarm failure", e); @@ -737,7 +772,7 @@ public void run() { } void update() { - Deadline.registerIfNot(1000000); +/** Deadline.registerIfNot(1000000); LOG.debug("CachedStore: updating cached objects. Shared cache has been update {} times so far.", sharedCache.getUpdateCount()); try { @@ -776,9 +811,10 @@ void update() { sharedCache.getUpdateCount()); } catch (MetaException e) { LOG.error("Updating CachedStore: error happen when refresh; skipping this iteration", e); - } + }**/ } + /** private void updateDatabases(RawStore rawStore, String catName, List dbNames) { LOG.debug("CachedStore: updating cached database objects for catalog: {}", catName); boolean success = false; @@ -894,7 +930,7 @@ private void updateTablePartitionColStats(RawStore rawStore, String catName, Str Deadline.stopTimer(); // Also save partitions for consistency as they have the stats state. for (Partition part : parts) { - sharedCache.alterPartitionInCache(catName, dbName, tblName, part.getValues(), part); + sharedCache.alterPartitionInCache(catName, dbName, tblName, part.getValues(), part, writeIds); } } committed = rawStore.commitTransaction(); @@ -951,7 +987,7 @@ private static void updateTableAggregatePartitionColStats(RawStore rawStore, Str } catch (MetaException | NoSuchObjectException e) { LOG.info("Updating CachedStore: unable to read aggregate column stats of table: " + tblName, e); } - } + }**/ } @Override @@ -985,7 +1021,7 @@ public boolean commitTransaction() { // consistency in case there is only one metastore. if (canUseEvents) { try { - triggerUpdateUsingEvent(rawStore); + triggerUpdateUsingEvent(rawStore, conf); } catch (Exception e) { //TODO : Not sure how to handle it as the commit is already done in the object store. LOG.error("Failed to update cache", e); @@ -1155,8 +1191,8 @@ private void validateTableType(Table tbl) { } @Override - public void createTable(Table tbl) throws InvalidObjectException, MetaException { - rawStore.createTable(tbl); + public void createTable(Table tbl, String validWriteIdList) throws InvalidObjectException, MetaException { + rawStore.createTable(tbl, validWriteIdList); // in case of event based cache update, cache will be updated during commit. if (canUseEvents) { return; @@ -1168,7 +1204,8 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException return; } validateTableType(tbl); - sharedCache.addTableToCache(catName, dbName, tblName, tbl); + ValidWriteIdList writeIds = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + sharedCache.addTableToCache(catName, dbName, tblName, tbl, writeIds, true); } @Override @@ -1201,7 +1238,8 @@ public Table getTable(String catName, String dbName, String tblName, String vali if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { return rawStore.getTable(catName, dbName, tblName, validWriteIds); } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + ValidWriteIdList writeIds = validWriteIds!=null?new ValidReaderWriteIdList(validWriteIds):null; + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName, writeIds); if (tbl == null) { // This table is not yet loaded in cache @@ -1238,8 +1276,8 @@ public Table getTable(String catName, String dbName, String tblName, String vali } @Override - public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { - boolean succ = rawStore.addPartition(part); + public boolean addPartition(Partition part, String validWriteIdList) throws InvalidObjectException, MetaException { + boolean succ = rawStore.addPartition(part, validWriteIdList); // in case of event based cache update, cache will be updated during commit. if (succ && !canUseEvents) { String dbName = normalizeIdentifier(part.getDbName()); @@ -1248,15 +1286,15 @@ public boolean addPartition(Partition part) throws InvalidObjectException, MetaE if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } - sharedCache.addPartitionToCache(catName, dbName, tblName, part); + sharedCache.addPartitionToCache(catName, dbName, tblName, part, validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null); } return succ; } @Override - public boolean addPartitions(String catName, String dbName, String tblName, List parts) + public boolean addPartitions(String catName, String dbName, String tblName, List parts, String validWriteIdList) throws InvalidObjectException, MetaException { - boolean succ = rawStore.addPartitions(catName, dbName, tblName, parts); + boolean succ = rawStore.addPartitions(catName, dbName, tblName, parts, validWriteIdList); // in case of event based cache update, cache will be updated during commit. if (succ && !canUseEvents) { catName = normalizeIdentifier(catName); @@ -1265,15 +1303,16 @@ public boolean addPartitions(String catName, String dbName, String tblName, List if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } - sharedCache.addPartitionsToCache(catName, dbName, tblName, parts); + sharedCache.addPartitionsToCache(catName, dbName, tblName, parts, + validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null); } return succ; } @Override public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, - boolean ifNotExists) throws InvalidObjectException, MetaException { - boolean succ = rawStore.addPartitions(catName, dbName, tblName, partitionSpec, ifNotExists); + boolean ifNotExists, String validWriteIdList) throws InvalidObjectException, MetaException { + boolean succ = rawStore.addPartitions(catName, dbName, tblName, partitionSpec, ifNotExists, validWriteIdList); // in case of event based cache update, cache will be updated during commit. if (succ && !canUseEvents) { catName = normalizeIdentifier(catName); @@ -1285,7 +1324,8 @@ public boolean addPartitions(String catName, String dbName, String tblName, Part PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator(); while (iterator.hasNext()) { Partition part = iterator.next(); - sharedCache.addPartitionToCache(catName, dbName, tblName, part); + sharedCache.addPartitionToCache(catName, dbName, tblName, part, + validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null); } } return succ; @@ -1315,7 +1355,8 @@ public Partition getPartition(String catName, String dbName, String tblName, catName, dbName, tblName, part_vals, validWriteIds); } if (validWriteIds != null) { - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table containing the partition is not yet loaded in cache return rawStore.getPartition( @@ -1338,7 +1379,8 @@ public boolean doesPartitionExist(String catName, String dbName, String tblName, if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, part_vals); } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (tbl == null) { // The table containing the partition is not yet loaded in cache return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, part_vals); @@ -1393,7 +1435,8 @@ public void dropPartitions(String catName, String dbName, String tblName, List tables = new ArrayList<>(); for (String tblName : tblNames) { tblName = normalizeIdentifier(tblName); - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (tbl == null) { tbl = rawStore.getTable(catName, dbName, tblName); } @@ -1558,7 +1604,8 @@ public void updateCreationMetadata(String catName, String dbname, String tablena if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { return rawStore.listPartitionNames(catName, dbName, tblName, max_parts); } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (tbl == null) { // The table is not yet loaded in cache return rawStore.listPartitionNames(catName, dbName, tblName, max_parts); @@ -1582,9 +1629,9 @@ public PartitionValuesResponse listPartitionValues(String catName, String db_nam @Override public Partition alterPartition(String catName, String dbName, String tblName, - List partVals, Partition newPart, String validWriteIds) + List partVals, Partition newPart, String validWriteIdList) throws InvalidObjectException, MetaException { - newPart = rawStore.alterPartition(catName, dbName, tblName, partVals, newPart, validWriteIds); + newPart = rawStore.alterPartition(catName, dbName, tblName, partVals, newPart, validWriteIdList); // in case of event based cache update, cache will be updated during commit. if (canUseEvents) { return newPart; @@ -1595,17 +1642,18 @@ public Partition alterPartition(String catName, String dbName, String tblName, if (!shouldCacheTable(catName, dbName, tblName)) { return newPart; } - sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, newPart); + sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, newPart, + validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null); return newPart; } @Override public List alterPartitions(String catName, String dbName, String tblName, List> partValsList, List newParts, - long writeId, String validWriteIds) + long writeId, String validWriteIdList) throws InvalidObjectException, MetaException { newParts = rawStore.alterPartitions( - catName, dbName, tblName, partValsList, newParts, writeId, validWriteIds); + catName, dbName, tblName, partValsList, newParts, writeId, validWriteIdList); // in case of event based cache update, cache will be updated during commit. if (canUseEvents) { return newParts; @@ -1616,7 +1664,8 @@ public Partition alterPartition(String catName, String dbName, String tblName, if (!shouldCacheTable(catName, dbName, tblName)) { return newParts; } - sharedCache.alterPartitionsInCache(catName, dbName, tblName, partValsList, newParts); + sharedCache.alterPartitionsInCache(catName, dbName, tblName, partValsList, newParts, + validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null); return newParts; } @@ -1665,7 +1714,8 @@ public boolean getPartitionsByExpr(String catName, String dbName, String tblName return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result); } List partNames = new LinkedList<>(); - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result); @@ -1697,7 +1747,8 @@ public int getNumPartitionsByExpr(String catName, String dbName, String tblName, } String defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); List partNames = new LinkedList<>(); - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); @@ -1729,7 +1780,8 @@ public int getNumPartitionsByExpr(String catName, String dbName, String tblName, if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames); @@ -1916,7 +1968,8 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames); @@ -1943,7 +1996,8 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames); @@ -1972,7 +2026,8 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { return rawStore.listPartitionNamesPs(catName, dbName, tblName, partSpecs, maxParts); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return rawStore.listPartitionNamesPs(catName, dbName, tblName, partSpecs, maxParts); @@ -2001,7 +2056,8 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partSpecs, maxParts, userName, groupNames); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partSpecs, maxParts, userName, groupNames); @@ -2090,7 +2146,8 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt if (!shouldCacheTable(catName, dbName, tblName)) { return; } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return; @@ -2153,7 +2210,8 @@ public ColumnStatistics getTableColumnStatistics( return rawStore.getTableColumnStatistics( catName, dbName, tblName, colNames, validWriteIds); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return rawStore.getTableColumnStatistics( @@ -2189,10 +2247,10 @@ public boolean deleteTableColumnStatistics(String catName, String dbName, String @Override public Map updatePartitionColumnStatistics(ColumnStatistics colStats, - List partVals, String validWriteIds, long writeId) + List partVals, String validWriteIdList, long writeId) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { Map newParams = rawStore.updatePartitionColumnStatistics( - colStats, partVals, validWriteIds, writeId); + colStats, partVals, validWriteIdList, writeId); // in case of event based cache update, cache is updated during commit txn if (newParams != null && !canUseEvents) { String catName = colStats.getStatsDesc().isSetCatName() ? @@ -2204,7 +2262,8 @@ public boolean deleteTableColumnStatistics(String catName, String dbName, String } Partition part = getPartition(catName, dbName, tblName, partVals); part.setParameters(newParams); - sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, part); + sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, part, + validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null); sharedCache.updatePartitionColStatsInCache(catName, dbName, tblName, partVals, colStats.getStatsObj()); } return newParams; @@ -2275,7 +2334,8 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblNam return rawStore.get_aggr_stats_for( catName, dbName, tblName, partNames, colNames, writeIdList); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return rawStore.get_aggr_stats_for( @@ -2717,9 +2777,10 @@ public int getDatabaseCount() throws MetaException { if (!shouldCacheTable(catName, dbName, tblName)) { return constraintNames; } + // TODO =====to be reworked in HIVE-21637====== sharedCache.addTableToCache(StringUtils.normalizeIdentifier(tbl.getCatName()), StringUtils.normalizeIdentifier(tbl.getDbName()), - StringUtils.normalizeIdentifier(tbl.getTableName()), tbl); + StringUtils.normalizeIdentifier(tbl.getTableName()), tbl, null, true); return constraintNames; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java index 05cf70b..da8d3ba 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java @@ -34,6 +34,7 @@ import java.util.TreeMap; import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; @@ -57,6 +58,7 @@ import org.apache.hadoop.hive.metastore.utils.StringUtils; import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator; import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator.ObjectEstimator; +import org.apache.hive.common.util.TxnIdUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -158,12 +160,20 @@ private static ObjectEstimator getMemorySizeEstimator(Class clazz) { private Map> aggrColStatsCache = new ConcurrentHashMap>(); private AtomicBoolean isAggrPartitionColStatsCacheDirty = new AtomicBoolean(false); + private ValidWriteIdList txnWriteIds; + private boolean committed; - TableWrapper(Table t, byte[] sdHash, String location, Map parameters) { + TableWrapper(Table t, byte[] sdHash, String location, Map parameters, ValidWriteIdList txnWriteIds) { + this(t, sdHash, location, parameters, txnWriteIds, true); + } + + TableWrapper(Table t, byte[] sdHash, String location, Map parameters, ValidWriteIdList txnWriteIds, boolean committed) { this.t = t; this.sdHash = sdHash; this.location = location; this.parameters = parameters; + this.txnWriteIds = txnWriteIds; + this.committed = committed; } public Table getTable() { @@ -202,10 +212,10 @@ boolean sameDatabase(String catName, String dbName) { return catName.equals(t.getCatName()) && dbName.equals(t.getDbName()); } - void cachePartition(Partition part, SharedCache sharedCache) { + void cachePartition(Partition part, SharedCache sharedCache, ValidWriteIdList writeIds) { try { tableLock.writeLock().lock(); - PartitionWrapper wrapper = makePartitionWrapper(part, sharedCache); + PartitionWrapper wrapper = makePartitionWrapper(part, sharedCache, writeIds); partitionCache.put(CacheUtils.buildPartitionCacheKey(part.getValues()), wrapper); isPartitionCacheDirty.set(true); // Invalidate cached aggregate stats @@ -217,11 +227,11 @@ void cachePartition(Partition part, SharedCache sharedCache) { } } - boolean cachePartitions(Iterable parts, SharedCache sharedCache, boolean fromPrewarm) { + boolean cachePartitions(Iterable parts, SharedCache sharedCache, boolean fromPrewarm, ValidWriteIdList writeIds) { try { tableLock.writeLock().lock(); for (Partition part : parts) { - PartitionWrapper ptnWrapper = makePartitionWrapper(part, sharedCache); + PartitionWrapper ptnWrapper = makePartitionWrapper(part, sharedCache, writeIds); if (maxCacheSizeInBytes > 0) { ObjectEstimator ptnWrapperSizeEstimator = getMemorySizeEstimator(PartitionWrapper.class); @@ -342,18 +352,18 @@ public void removePartitions(List> partVals, SharedCache sharedCach } } - public void alterPartition(List partVals, Partition newPart, SharedCache sharedCache) { + public void alterPartition(List partVals, Partition newPart, SharedCache sharedCache, ValidWriteIdList writeIds) { try { tableLock.writeLock().lock(); removePartition(partVals, sharedCache); - cachePartition(newPart, sharedCache); + cachePartition(newPart, sharedCache, writeIds); } finally { tableLock.writeLock().unlock(); } } public void alterPartitionAndStats(List partVals, SharedCache sharedCache, long writeId, - Map parameters, List colStatsObjs) { + Map parameters, List colStatsObjs, ValidWriteIdList writeIds) { try { tableLock.writeLock().lock(); PartitionWrapper partitionWrapper = partitionCache.get(CacheUtils.buildPartitionCacheKey(partVals)); @@ -365,7 +375,7 @@ public void alterPartitionAndStats(List partVals, SharedCache sharedCach newPart.setParameters(parameters); newPart.setWriteId(writeId); removePartition(partVals, sharedCache); - cachePartition(newPart, sharedCache); + cachePartition(newPart, sharedCache, writeIds); updatePartitionColStats(partVals, colStatsObjs); } finally { tableLock.writeLock().unlock(); @@ -373,20 +383,20 @@ public void alterPartitionAndStats(List partVals, SharedCache sharedCach } public void alterPartitions(List> partValsList, List newParts, - SharedCache sharedCache) { + SharedCache sharedCache, ValidWriteIdList writeIds) { try { tableLock.writeLock().lock(); for (int i = 0; i < partValsList.size(); i++) { List partVals = partValsList.get(i); Partition newPart = newParts.get(i); - alterPartition(partVals, newPart, sharedCache); + alterPartition(partVals, newPart, sharedCache, writeIds); } } finally { tableLock.writeLock().unlock(); } } - public void refreshPartitions(List partitions, SharedCache sharedCache) { + public void refreshPartitions(List partitions, SharedCache sharedCache, ValidWriteIdList writeIds) { Map newPartitionCache = new HashMap(); try { tableLock.writeLock().lock(); @@ -403,7 +413,7 @@ public void refreshPartitions(List partitions, SharedCache sharedCach sharedCache.decrSd(wrapper.getSdHash()); } } - wrapper = makePartitionWrapper(part, sharedCache); + wrapper = makePartitionWrapper(part, sharedCache, writeIds); newPartitionCache.put(key, wrapper); } partitionCache = newPartitionCache; @@ -854,7 +864,7 @@ private void updateTableObj(Table newTable, SharedCache sharedCache) { } } - private PartitionWrapper makePartitionWrapper(Partition part, SharedCache sharedCache) { + private PartitionWrapper makePartitionWrapper(Partition part, SharedCache sharedCache, ValidWriteIdList writeIds) { Partition partCopy = part.deepCopy(); PartitionWrapper wrapper; if (part.getSd() != null) { @@ -862,12 +872,24 @@ private PartitionWrapper makePartitionWrapper(Partition part, SharedCache shared StorageDescriptor sd = part.getSd(); sharedCache.increSd(sd, sdHash); partCopy.setSd(null); - wrapper = new PartitionWrapper(partCopy, sdHash, sd.getLocation(), sd.getParameters()); + wrapper = new PartitionWrapper(partCopy, sdHash, sd.getLocation(), sd.getParameters(), writeIds); } else { - wrapper = new PartitionWrapper(partCopy, null, null, null); + wrapper = new PartitionWrapper(partCopy, null, null, null, writeIds); } return wrapper; } + + ValidWriteIdList getWriteIds() { + return txnWriteIds; + } + + boolean isCommitted() { + return committed; + } + + void setCommitted(boolean committed) { + this.committed = committed; + } } static class PartitionWrapper { @@ -875,12 +897,20 @@ private PartitionWrapper makePartitionWrapper(Partition part, SharedCache shared String location; Map parameters; byte[] sdHash; + private ValidWriteIdList txnWriteIds; + private boolean committed; - PartitionWrapper(Partition p, byte[] sdHash, String location, Map parameters) { + PartitionWrapper(Partition p, byte[] sdHash, String location, Map parameters, ValidWriteIdList writeIds) { + this(p, sdHash, location, parameters, writeIds, true); + } + + PartitionWrapper(Partition p, byte[] sdHash, String location, Map parameters, ValidWriteIdList writeIds, boolean committed) { this.p = p; this.sdHash = sdHash; this.location = location; this.parameters = parameters; + this.txnWriteIds = writeIds; + this.committed = committed; } public Partition getPartition() { @@ -898,6 +928,18 @@ public String getLocation() { public Map getParameters() { return parameters; } + + ValidWriteIdList getWriteIds() { + return txnWriteIds; + } + + boolean isCommitted() { + return committed; + } + + void setCommitted(boolean committed) { + this.committed = committed; + } } static class StorageDescriptorWrapper { @@ -1175,7 +1217,8 @@ public int getCachedDatabaseCount() { public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, List partitions, List partitionColStats, - AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { + AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition, + ValidWriteIdList writeIds, boolean committed) { String catName = StringUtils.normalizeIdentifier(table.getCatName()); String dbName = StringUtils.normalizeIdentifier(table.getDbName()); String tableName = StringUtils.normalizeIdentifier(table.getTableName()); @@ -1184,7 +1227,7 @@ public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, if (tablesDeletedDuringPrewarm.contains(CacheUtils.buildTableKey(catName, dbName, tableName))) { return false; } - TableWrapper tblWrapper = createTableWrapper(catName, dbName, tableName, table); + TableWrapper tblWrapper = createTableWrapper(catName, dbName, tableName, table, writeIds, committed); if (maxCacheSizeInBytes > 0) { ObjectEstimator tblWrapperSizeEstimator = getMemorySizeEstimator(TableWrapper.class); long estimatedMemUsage = tblWrapperSizeEstimator.estimate(tblWrapper, sizeEstimators); @@ -1209,7 +1252,7 @@ public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, } else { if (partitions != null) { // If the partitions were not added due to memory limit, return false - if (!tblWrapper.cachePartitions(partitions, this, true)) { + if (!tblWrapper.cachePartitions(partitions, this, true, writeIds)) { return false; } } @@ -1259,13 +1302,20 @@ public void completeTableCachePrewarm() { } } - public Table getTableFromCache(String catName, String dbName, String tableName) { + public Table getTableFromCache(String catName, String dbName, String tableName, ValidWriteIdList writeIds) { Table t = null; try { cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { + if (writeIds!=null) { + // If the request writeIds is newer than the cached version + if (tblWrapper.getWriteIds()==null || !tblWrapper.isCommitted() || + tblWrapper.getWriteIds()!=null && TxnIdUtils.compare(writeIds, tblWrapper.getWriteIds()) > 0) { + return null; + } + } t = CacheUtils.assemble(tblWrapper, this); } } finally { @@ -1274,20 +1324,37 @@ public Table getTableFromCache(String catName, String dbName, String tableName) return t; } - public TableWrapper addTableToCache(String catName, String dbName, String tblName, Table tbl) { + public void addTableToCache(String catName, String dbName, String tblName, Table tbl, ValidWriteIdList writeIds, boolean committed) { try { cacheLock.writeLock().lock(); - TableWrapper wrapper = createTableWrapper(catName, dbName, tblName, tbl); - tableCache.put(CacheUtils.buildTableKey(catName, dbName, tblName), wrapper); + String key = CacheUtils.buildTableKey(catName, dbName, tblName); + if (writeIds != null) { + TableWrapper wrapper = tableCache.get(key); + // skip if cached writeId is newer + if (wrapper!=null && wrapper.getWriteIds()!=null && TxnIdUtils.compare(writeIds, wrapper.getWriteIds()) < 0) { + return; + } + } + TableWrapper wrapper = createTableWrapper(catName, dbName, tblName, tbl, writeIds, committed); + tableCache.put(key, wrapper); isTableCacheDirty.set(true); - return wrapper; } finally { cacheLock.writeLock().unlock(); } } + public void markTableCommitted(String catName, String dbName, String tblName, ValidWriteIdList writeIds) { + String key = CacheUtils.buildTableKey(catName, dbName, tblName); + if (tableCache.containsKey(key)) { + TableWrapper wrapper = tableCache.get(key); + if (writeIds!=null && wrapper.getWriteIds()!=null && TxnIdUtils.compare(writeIds, wrapper.getWriteIds())==0) { + wrapper.setCommitted(true); + } + } + } + private TableWrapper createTableWrapper(String catName, String dbName, String tblName, - Table tbl) { + Table tbl, ValidWriteIdList txnWriteIds, boolean committed) { TableWrapper wrapper; Table tblCopy = tbl.deepCopy(); tblCopy.setCatName(normalizeIdentifier(catName)); @@ -1303,9 +1370,9 @@ private TableWrapper createTableWrapper(String catName, String dbName, String tb StorageDescriptor sd = tbl.getSd(); increSd(sd, sdHash); tblCopy.setSd(null); - wrapper = new TableWrapper(tblCopy, sdHash, sd.getLocation(), sd.getParameters()); + wrapper = new TableWrapper(tblCopy, sdHash, sd.getLocation(), sd.getParameters(), txnWriteIds, committed); } else { - wrapper = new TableWrapper(tblCopy, null, null, null); + wrapper = new TableWrapper(tblCopy, null, null, null, txnWriteIds, committed); } return wrapper; } @@ -1457,7 +1524,8 @@ public boolean refreshTablesInCache(String catName, String dbName, List t if (tblWrapper != null) { tblWrapper.updateTableObj(tbl, this); } else { - tblWrapper = createTableWrapper(catName, dbName, tblName, tbl); + // TODO =====to be reworked in HIVE-21637====== + tblWrapper = createTableWrapper(catName, dbName, tblName, tbl, null, true); } newCacheForDB.put(CacheUtils.buildTableKey(catName, dbName, tblName), tblWrapper); } @@ -1589,12 +1657,12 @@ public int getCachedTableCount() { return tableMetas; } - public void addPartitionToCache(String catName, String dbName, String tblName, Partition part) { + public void addPartitionToCache(String catName, String dbName, String tblName, Partition part, ValidWriteIdList writeIds) { try { cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { - tblWrapper.cachePartition(part, this); + tblWrapper.cachePartition(part, this, writeIds); } } finally { cacheLock.readLock().unlock(); @@ -1602,12 +1670,12 @@ public void addPartitionToCache(String catName, String dbName, String tblName, P } public void addPartitionsToCache(String catName, String dbName, String tblName, - Iterable parts) { + Iterable parts, ValidWriteIdList writeIds) { try { cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { - tblWrapper.cachePartitions(parts, this, false); + tblWrapper.cachePartitions(parts, this, false, writeIds); } } finally { cacheLock.readLock().unlock(); @@ -1688,12 +1756,12 @@ public void removePartitionsFromCache(String catName, String dbName, String tblN } public void alterPartitionInCache(String catName, String dbName, String tblName, - List partVals, Partition newPart) { + List partVals, Partition newPart, ValidWriteIdList writeIds) { try { cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { - tblWrapper.alterPartition(partVals, newPart, this); + tblWrapper.alterPartition(partVals, newPart, this, writeIds); } } finally { cacheLock.readLock().unlock(); @@ -1702,12 +1770,12 @@ public void alterPartitionInCache(String catName, String dbName, String tblName, public void alterPartitionAndStatsInCache(String catName, String dbName, String tblName, long writeId, List partVals, Map parameters, - List colStatsObjs) { + List colStatsObjs, ValidWriteIdList writeIds) { try { cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { - tblWrapper.alterPartitionAndStats(partVals, this, writeId, parameters, colStatsObjs); + tblWrapper.alterPartitionAndStats(partVals, this, writeId, parameters, colStatsObjs, writeIds); } } finally { cacheLock.readLock().unlock(); @@ -1715,12 +1783,12 @@ public void alterPartitionAndStatsInCache(String catName, String dbName, String } public void alterPartitionsInCache(String catName, String dbName, String tblName, - List> partValsList, List newParts) { + List> partValsList, List newParts, ValidWriteIdList writeIds) { try { cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { - tblWrapper.alterPartitions(partValsList, newParts, this); + tblWrapper.alterPartitions(partValsList, newParts, this, writeIds); } } finally { cacheLock.readLock().unlock(); @@ -1728,12 +1796,12 @@ public void alterPartitionsInCache(String catName, String dbName, String tblName } public void refreshPartitionsInCache(String catName, String dbName, String tblName, - List partitions) { + List partitions, ValidWriteIdList writeIds) { try { cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { - tblWrapper.refreshPartitions(partitions, this); + tblWrapper.refreshPartitions(partitions, this, writeIds); } } finally { cacheLock.readLock().unlock(); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java index d6ee673..7874b51 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java @@ -113,7 +113,8 @@ public Partition build(Configuration conf) throws MetaException { public Partition addToTable(IMetaStoreClient client, Configuration conf) throws TException { Partition p = build(conf); - client.add_partition(p); + // TODO =====to be reworked in HIVE-21637====== + client.add_partition(p, null); return p; } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java index fed3dda..df94a70 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java @@ -19,6 +19,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; @@ -215,9 +216,9 @@ public Table build(Configuration conf) throws MetaException { return t; } - public Table create(IMetaStoreClient client, Configuration conf) throws TException { + public Table create(IMetaStoreClient client, Configuration conf, String txnWriteIds) throws TException { Table t = build(conf); - client.createTable(t); + client.createTable(t, txnWriteIds); return t; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AddPartitionEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AddPartitionEvent.java index d4542d7..56375d7 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AddPartitionEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AddPartitionEvent.java @@ -36,28 +36,31 @@ private final Table table; private final List partitions; private PartitionSpecProxy partitionSpecProxy; + private final String writeIds; - public AddPartitionEvent(Table table, List partitions, boolean status, + public AddPartitionEvent(Table table, String writeIds, List partitions, boolean status, IHMSHandler handler) { super(status, handler); this.table = table; this.partitions = partitions; this.partitionSpecProxy = null; + this.writeIds = writeIds; } - public AddPartitionEvent(Table table, Partition partition, boolean status, IHMSHandler handler) { - this(table, Arrays.asList(partition), status, handler); + public AddPartitionEvent(Table table, String writeIds, Partition partition, boolean status, IHMSHandler handler) { + this(table, writeIds, Arrays.asList(partition), status, handler); } /** * Alternative constructor to use PartitionSpec APIs. */ - public AddPartitionEvent(Table table, PartitionSpecProxy partitionSpec, boolean status, + public AddPartitionEvent(Table table, String writeIds, PartitionSpecProxy partitionSpec, boolean status, IHMSHandler handler) { super(status, handler); this.table = table; this.partitions = null; this.partitionSpecProxy = partitionSpec; + this.writeIds = writeIds; } /** @@ -81,4 +84,10 @@ public Table getTable() { } } + /*** + * @return writeIds as string + */ + public String getValidWriteIdList() { + return writeIds; + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AlterPartitionEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AlterPartitionEvent.java index 499c6e4..94730d4 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AlterPartitionEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AlterPartitionEvent.java @@ -33,15 +33,17 @@ private final Table table; private final boolean isTruncateOp; private Long writeId; + private final String writeIds; public AlterPartitionEvent(Partition oldPart, Partition newPart, Table table, boolean isTruncateOp, - boolean status, Long writeId, IHMSHandler handler) { + boolean status, Long writeId, String writeIds, IHMSHandler handler) { super(status, handler); this.oldPart = oldPart; this.newPart = newPart; this.table = table; this.isTruncateOp = isTruncateOp; this.writeId = writeId; + this.writeIds = writeIds; } /** @@ -78,4 +80,11 @@ public boolean getIsTruncateOp() { public Long getWriteId() { return writeId; } + + /*** + * @return writeIds as string + */ + public String getValidWriteIdList() { + return writeIds; + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AlterTableEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AlterTableEvent.java index 541fbe4..8138a92 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AlterTableEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AlterTableEvent.java @@ -32,14 +32,16 @@ private final Table oldTable; private final boolean isTruncateOp; private Long writeId; + private final String writeIds; - public AlterTableEvent (Table oldTable, Table newTable, boolean isTruncateOp, boolean status, + public AlterTableEvent (Table oldTable, Table newTable, String writeIds, boolean isTruncateOp, boolean status, Long writeId, IHMSHandler handler) { super (status, handler); this.oldTable = oldTable; this.newTable = newTable; this.isTruncateOp = isTruncateOp; this.writeId = writeId; + this.writeIds = writeIds; } /** @@ -66,4 +68,11 @@ public boolean getIsTruncateOp() { public Long getWriteId() { return writeId; } + + /*** + * @return writeIds as string + */ + public String getValidWriteIdList() { + return writeIds; + } } \ No newline at end of file diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitTxnEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitTxnEvent.java index ba382cd..291801e 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitTxnEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitTxnEvent.java @@ -31,15 +31,17 @@ public class CommitTxnEvent extends ListenerEvent { private final Long txnId; + private final String txnWriteIds; /** * * @param transactionId Unique identification for the transaction just got committed. * @param handler handler that is firing the event */ - public CommitTxnEvent(Long transactionId, IHMSHandler handler) { + public CommitTxnEvent(Long transactionId, String txnWriteIds, IHMSHandler handler) { super(true, handler); this.txnId = transactionId; + this.txnWriteIds = txnWriteIds; } /** @@ -48,4 +50,8 @@ public CommitTxnEvent(Long transactionId, IHMSHandler handler) { public Long getTxnId() { return txnId; } + + public String getTxnWriteIds() { + return txnWriteIds; + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CreateTableEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CreateTableEvent.java index 4f5e887..d041a58 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CreateTableEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CreateTableEvent.java @@ -28,10 +28,12 @@ public class CreateTableEvent extends ListenerEvent { private final Table table; + private final String writeIds; - public CreateTableEvent (Table table, boolean status, IHMSHandler handler) { + public CreateTableEvent (Table table, String writeIds, boolean status, IHMSHandler handler) { super (status, handler); this.table = table; + this.writeIds = writeIds; } /** @@ -40,4 +42,11 @@ public CreateTableEvent (Table table, boolean status, IHMSHandler handler) { public Table getTable () { return table; } + + /*** + * @return writeIds as string + */ + public String getValidWriteIdList() { + return writeIds; + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdatePartitionColumnStatEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdatePartitionColumnStatEvent.java index ba61a08..977465f 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdatePartitionColumnStatEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdatePartitionColumnStatEvent.java @@ -39,6 +39,7 @@ private Map parameters; private List partVals; private Table tableObj; + private String writeIds; /** * @param statsObj Columns statistics Info. @@ -48,7 +49,7 @@ * @param writeId writeId for the query. * @param handler handler that is firing the event */ - public UpdatePartitionColumnStatEvent(ColumnStatistics statsObj, List partVals, Map parameters, + public UpdatePartitionColumnStatEvent(ColumnStatistics statsObj, List partVals, Map parameters, String writeIds, Table tableObj, long writeId, IHMSHandler handler) { super(true, handler); this.partColStats = statsObj; @@ -56,6 +57,7 @@ public UpdatePartitionColumnStatEvent(ColumnStatistics statsObj, List pa this.parameters = parameters; this.partVals = partVals; this.tableObj = tableObj; + this.writeIds = writeIds; } /** @@ -64,13 +66,14 @@ public UpdatePartitionColumnStatEvent(ColumnStatistics statsObj, List pa * @param handler handler that is firing the event */ public UpdatePartitionColumnStatEvent(ColumnStatistics statsObj, List partVals, - Table tableObj, IHMSHandler handler) { + Table tableObj, String writeIds, IHMSHandler handler) { super(true, handler); this.partColStats = statsObj; this.partVals = partVals; this.writeId = 0; this.parameters = null; this.tableObj = tableObj; + this.writeIds = writeIds; } public ColumnStatistics getPartColStats() { @@ -90,4 +93,8 @@ public long getWriteId() { } public Table getTableObj() { return tableObj; } + + public String getValidWriteIdList() { + return writeIds; + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdateTableColumnStatEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdateTableColumnStatEvent.java index 71300ab..004ec62 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdateTableColumnStatEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/UpdateTableColumnStatEvent.java @@ -38,6 +38,7 @@ private long writeId; private Map parameters; private Table tableObj; + private String writeIds; /** * @param colStats Columns statistics Info. @@ -47,13 +48,14 @@ * @param handler handler that is firing the event */ public UpdateTableColumnStatEvent(ColumnStatistics colStats, Table tableObj, - Map parameters, + Map parameters, String writeIds, long writeId, IHMSHandler handler) { super(true, handler); this.colStats = colStats; this.writeId = writeId; this.parameters = parameters; this.tableObj = tableObj; + this.writeIds = writeIds; } /** @@ -83,4 +85,8 @@ public long getWriteId() { public Table getTableObj() { return tableObj; } + + public String getValidWriteIdList() { + return writeIds; + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/AddPartitionMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/AddPartitionMessage.java index 3262b52..a3a2cd2 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/AddPartitionMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/AddPartitionMessage.java @@ -65,4 +65,5 @@ public EventMessage checkValid() { */ public abstract Iterable getPartitionFilesIter(); + public abstract String getWriteIds(); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/AlterPartitionMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/AlterPartitionMessage.java index a1ba01a..809eafd 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/AlterPartitionMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/AlterPartitionMessage.java @@ -67,5 +67,7 @@ public EventMessage checkValid() { } public abstract Long getWriteId(); + + public abstract String getWriteIds(); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/AlterTableMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/AlterTableMessage.java index bbc01c1..32185e8 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/AlterTableMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/AlterTableMessage.java @@ -57,4 +57,6 @@ public EventMessage checkValid() { } public abstract Long getWriteId(); + + public abstract String getWriteIds(); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CommitTxnMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CommitTxnMessage.java index 9733039..af00b82 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CommitTxnMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CommitTxnMessage.java @@ -39,6 +39,8 @@ protected CommitTxnMessage() { */ public abstract Long getTxnId(); + public abstract String getTxnWriteIds(); + public abstract List getWriteIds(); public abstract List getDatabases(); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CreateTableMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CreateTableMessage.java index 49732ff..d3943a9 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CreateTableMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CreateTableMessage.java @@ -44,6 +44,8 @@ protected CreateTableMessage() { */ public abstract Iterable getFiles(); + public abstract String getWriteIds(); + @Override public EventMessage checkValid() { if (getTable() == null) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java index aa83da4..eaa6a31 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java @@ -178,14 +178,14 @@ public DropDatabaseMessage buildDropDatabaseMessage(Database db) { return new JSONDropDatabaseMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, db, now()); } - public CreateTableMessage buildCreateTableMessage(Table table, Iterator fileIter) { - return new JSONCreateTableMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, table, fileIter, now()); + public CreateTableMessage buildCreateTableMessage(Table table, Iterator fileIter, String writeIds) { + return new JSONCreateTableMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, table, fileIter, now(), writeIds); } public AlterTableMessage buildAlterTableMessage(Table before, Table after, boolean isTruncateOp, - Long writeId) { + Long writeId, String writeIds) { return new JSONAlterTableMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, before, after, - isTruncateOp, writeId, now()); + isTruncateOp, writeId, now(), writeIds); } public DropTableMessage buildDropTableMessage(Table table) { @@ -193,15 +193,15 @@ public DropTableMessage buildDropTableMessage(Table table) { } public AddPartitionMessage buildAddPartitionMessage(Table table, - Iterator partitionsIterator, Iterator partitionFileIter) { + Iterator partitionsIterator, Iterator partitionFileIter, String writeIds) { return new JSONAddPartitionMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, table, - partitionsIterator, partitionFileIter, now()); + partitionsIterator, partitionFileIter, now(), writeIds); } public AlterPartitionMessage buildAlterPartitionMessage(Table table, Partition before, - Partition after, boolean isTruncateOp, Long writeId) { + Partition after, boolean isTruncateOp, Long writeId, String writeIds) { return new JSONAlterPartitionMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, - table, before, after, isTruncateOp, writeId, now()); + table, before, after, isTruncateOp, writeId, now(), writeIds); } public DropPartitionMessage buildDropPartitionMessage(Table table, @@ -266,8 +266,8 @@ public OpenTxnMessage buildOpenTxnMessage(Long fromTxnId, Long toTxnId) { return new JSONOpenTxnMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, fromTxnId, toTxnId, now()); } - public CommitTxnMessage buildCommitTxnMessage(Long txnId) { - return new JSONCommitTxnMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, txnId, now()); + public CommitTxnMessage buildCommitTxnMessage(Long txnId, String txnWriteIds) { + return new JSONCommitTxnMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, txnId, txnWriteIds, now()); } public AbortTxnMessage buildAbortTxnMessage(Long txnId) { @@ -289,9 +289,10 @@ public AcidWriteMessage buildAcidWriteMessage(AcidWriteEvent acidWriteEvent, public JSONUpdateTableColumnStatMessage buildUpdateTableColumnStatMessage(ColumnStatistics colStats, Table tableObj, Map parameters, - long writeId) { + long writeId, + String writeIds) { return new JSONUpdateTableColumnStatMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, now(), - colStats, tableObj, parameters, writeId); + colStats, tableObj, parameters, writeId, writeIds); } public JSONDeleteTableColumnStatMessage buildDeleteTableColumnStatMessage(String dbName, String colName) { @@ -300,9 +301,9 @@ public JSONDeleteTableColumnStatMessage buildDeleteTableColumnStatMessage(String public JSONUpdatePartitionColumnStatMessage buildUpdatePartitionColumnStatMessage(ColumnStatistics colStats, List partVals, Map parameters, - Table tableObj, long writeId) { + Table tableObj, long writeId, String writeIds) { return new JSONUpdatePartitionColumnStatMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, now(), colStats, partVals, - parameters, tableObj, writeId); + parameters, tableObj, writeId, writeIds); } public JSONDeletePartitionColumnStatMessage buildDeletePartitionColumnStatMessage(String dbName, String colName, diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdatePartitionColumnStatMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdatePartitionColumnStatMessage.java index e92a0dc..f685bc4 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdatePartitionColumnStatMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdatePartitionColumnStatMessage.java @@ -41,4 +41,6 @@ protected UpdatePartitionColumnStatMessage() { public abstract List getPartVals(); public abstract Table getTableObject() throws Exception; + + public abstract String getWriteIds(); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdateTableColumnStatMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdateTableColumnStatMessage.java index e3f049c..118faa7 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdateTableColumnStatMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/UpdateTableColumnStatMessage.java @@ -38,4 +38,6 @@ protected UpdateTableColumnStatMessage() { public abstract Map getParameters(); public abstract Table getTableObject() throws Exception; + + public abstract String getWriteIds(); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPartitionMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPartitionMessage.java index 6494cb8..202dd25 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPartitionMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPartitionMessage.java @@ -58,6 +58,9 @@ @JsonProperty List partitionFiles; + @JsonProperty + String writeIds; + /** * Default Constructor. Required for Jackson. */ @@ -69,13 +72,14 @@ public JSONAddPartitionMessage() { */ public JSONAddPartitionMessage(String server, String servicePrincipal, Table tableObj, Iterator partitionsIterator, Iterator partitionFileIter, - Long timestamp) { + Long timestamp, String writeIds) { this.server = server; this.servicePrincipal = servicePrincipal; this.db = tableObj.getDbName(); this.table = tableObj.getTableName(); this.tableType = tableObj.getTableType(); this.timestamp = timestamp; + this.writeIds = writeIds; partitions = new ArrayList<>(); partitionListJson = new ArrayList<>(); Partition partitionObj; @@ -174,4 +178,8 @@ public String toString() { return partitionFiles; } + @Override + public String getWriteIds() { + return writeIds; + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionMessage.java index 414402f..58bd8ff 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionMessage.java @@ -48,6 +48,9 @@ @JsonProperty String partitionObjBeforeJson, partitionObjAfterJson; + @JsonProperty + String writeIds; + /** * Default constructor, needed for Jackson. */ @@ -55,7 +58,7 @@ public JSONAlterPartitionMessage() { } public JSONAlterPartitionMessage(String server, String servicePrincipal, Table tableObj, - Partition partitionObjBefore, Partition partitionObjAfter, boolean isTruncateOp, Long writeId, Long timestamp) { + Partition partitionObjBefore, Partition partitionObjAfter, boolean isTruncateOp, Long writeId, Long timestamp, String writeIds) { this.server = server; this.servicePrincipal = servicePrincipal; this.db = tableObj.getDbName(); @@ -65,6 +68,7 @@ public JSONAlterPartitionMessage(String server, String servicePrincipal, Table t this.timestamp = timestamp; this.keyValues = MessageBuilder.getPartitionKeyValues(tableObj, partitionObjBefore); this.writeId = writeId; + this.writeIds = writeIds; try { this.tableObjJson = MessageBuilder.createTableObjJson(tableObj); this.partitionObjBeforeJson = MessageBuilder.createPartitionObjJson(partitionObjBefore); @@ -145,6 +149,11 @@ public String getPartitionObjAfterJson() { } @Override + public String getWriteIds() { + return writeIds; + } + + @Override public Long getWriteId() { return writeId == null ? 0 : writeId; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterTableMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterTableMessage.java index 8c621b2..e5d4f66 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterTableMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterTableMessage.java @@ -39,6 +39,9 @@ @JsonProperty Long timestamp, writeId; + @JsonProperty + String writeIds; + /** * Default constructor, needed for Jackson. */ @@ -46,7 +49,7 @@ public JSONAlterTableMessage() { } public JSONAlterTableMessage(String server, String servicePrincipal, Table tableObjBefore, Table tableObjAfter, - boolean isTruncateOp, Long writeId, Long timestamp) { + boolean isTruncateOp, Long writeId, Long timestamp, String writeIds) { this.server = server; this.servicePrincipal = servicePrincipal; this.db = tableObjBefore.getDbName(); @@ -55,6 +58,7 @@ public JSONAlterTableMessage(String server, String servicePrincipal, Table table this.isTruncateOp = Boolean.toString(isTruncateOp); this.timestamp = timestamp; this.writeId = writeId; + this.writeIds = writeIds; try { this.tableObjBeforeJson = MessageBuilder.createTableObjJson(tableObjBefore); this.tableObjAfterJson = MessageBuilder.createTableObjJson(tableObjAfter); @@ -125,6 +129,11 @@ public Long getWriteId() { } @Override + public String getWriteIds() { + return writeIds; + } + + @Override public String toString() { try { return JSONMessageDeserializer.mapper.writeValueAsString(this); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitTxnMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitTxnMessage.java index 482fc8e..ceea78d 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitTxnMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitTxnMessage.java @@ -50,6 +50,9 @@ private List writeIds; @JsonProperty + private String txnWriteIds; + + @JsonProperty private List databases, tables, partitions, tableObjs, partitionObjs, files; /** @@ -58,9 +61,10 @@ public JSONCommitTxnMessage() { } - public JSONCommitTxnMessage(String server, String servicePrincipal, Long txnid, Long timestamp) { + public JSONCommitTxnMessage(String server, String servicePrincipal, Long txnid, String txnWriteIds, Long timestamp) { this.timestamp = timestamp; this.txnid = txnid; + this.txnWriteIds = txnWriteIds; this.server = server; this.servicePrincipal = servicePrincipal; this.databases = null; @@ -78,6 +82,11 @@ public Long getTxnId() { } @Override + public String getTxnWriteIds() { + return txnWriteIds; + } + + @Override public Long getTimestamp() { return timestamp; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateTableMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateTableMessage.java index 145ee4b..7b2e930 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateTableMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateTableMessage.java @@ -41,6 +41,8 @@ Long timestamp; @JsonProperty List files; + @JsonProperty + String writeIds; /** * Default constructor, needed for Jackson. @@ -49,25 +51,26 @@ public JSONCreateTableMessage() { } public JSONCreateTableMessage(String server, String servicePrincipal, String db, String table, - String tableType, Long timestamp) { + String tableType, Long timestamp, String writeIds) { this.server = server; this.servicePrincipal = servicePrincipal; this.db = db; this.table = table; this.tableType = tableType; this.timestamp = timestamp; + this.writeIds = writeIds; checkValid(); } public JSONCreateTableMessage(String server, String servicePrincipal, String db, String table, - Long timestamp) { - this(server, servicePrincipal, db, table, null, timestamp); + Long timestamp, String writeIds) { + this(server, servicePrincipal, db, table, null, timestamp, writeIds); } public JSONCreateTableMessage(String server, String servicePrincipal, Table tableObj, - Iterator fileIter, Long timestamp) { + Iterator fileIter, Long timestamp, String writeIds) { this(server, servicePrincipal, tableObj.getDbName(), tableObj.getTableName(), - tableObj.getTableType(), timestamp); + tableObj.getTableType(), timestamp, writeIds); try { this.tableObjJson = MessageBuilder.createTableObjJson(tableObj); } catch (TException e) { @@ -115,6 +118,11 @@ public Table getTableObj() throws Exception { return (Table) MessageBuilder.getTObj(tableObjJson,Table.class); } + @Override + public String getWriteIds() { + return writeIds; + } + public String getTableObjJson() { return tableObjJson; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdatePartitionColumnStatMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdatePartitionColumnStatMessage.java index fd7fe00..2e4d9de 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdatePartitionColumnStatMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdatePartitionColumnStatMessage.java @@ -52,6 +52,9 @@ @JsonProperty private String tableObjJson; + @JsonProperty + private String writeIds; + /** * Default constructor, needed for Jackson. */ @@ -61,7 +64,7 @@ public JSONUpdatePartitionColumnStatMessage() { public JSONUpdatePartitionColumnStatMessage(String server, String servicePrincipal, Long timestamp, ColumnStatistics colStats, List partVals, Map parameters, - Table tableObj, long writeId) { + Table tableObj, long writeId, String writeIds) { this.timestamp = timestamp; this.server = server; this.servicePrincipal = servicePrincipal; @@ -75,6 +78,7 @@ public JSONUpdatePartitionColumnStatMessage(String server, String servicePrincip throw new IllegalArgumentException("Could not serialize JSONUpdatePartitionColumnStatMessage : ", e); } this.parameters = parameters; + this.writeIds = writeIds; } @Override @@ -127,6 +131,11 @@ public Table getTableObject() throws Exception { } @Override + public String getWriteIds() { + return writeIds; + } + + @Override public String toString() { try { return JSONMessageDeserializer.mapper.writeValueAsString(this); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java index 275d204..40636c8 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java @@ -47,6 +47,9 @@ @JsonProperty private String tableObjJson; + @JsonProperty + private String writeIds; + /** * Default constructor, needed for Jackson. */ @@ -55,7 +58,7 @@ public JSONUpdateTableColumnStatMessage() { public JSONUpdateTableColumnStatMessage(String server, String servicePrincipal, Long timestamp, ColumnStatistics colStats, Table tableObj, Map parameters, - long writeId) { + long writeId, String writeIds) { this.timestamp = timestamp; this.server = server; this.servicePrincipal = servicePrincipal; @@ -68,6 +71,7 @@ public JSONUpdateTableColumnStatMessage(String server, String servicePrincipal, throw new IllegalArgumentException("Could not serialize JSONUpdateTableColumnStatMessage : ", e); } this.parameters = parameters; + this.writeIds = writeIds; } @Override @@ -115,6 +119,11 @@ public Long getWriteId() { } @Override + public String getWriteIds() { + return writeIds; + } + + @Override public String toString() { try { return JSONMessageDeserializer.mapper.writeValueAsString(this); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java index 9cdf271..3e0ded4 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java @@ -68,13 +68,14 @@ private void runTest(IMetaStoreClient client) throws TException { .create(client, conf); LOG.info("Going to create table " + tableName); + // TODO =====to be reworked in HIVE-21637====== Table table = new TableBuilder() .inDb(db) .setTableName(tableName) .addCol("col1", ColumnType.INT_TYPE_NAME) .addCol("col2", ColumnType.TIMESTAMP_TYPE_NAME) .addPartCol("pcol1", ColumnType.STRING_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); LOG.info("Going to create partition with value " + partValue); Partition part = new PartitionBuilder() diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index fd85af9..4032889 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -1320,7 +1320,7 @@ public void commitTxn(CommitTxnRequest rqst) if (transactionalListeners != null) { MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners, - EventMessage.EventType.COMMIT_TXN, new CommitTxnEvent(txnid, null), dbConn, sqlGenerator); + EventMessage.EventType.COMMIT_TXN, new CommitTxnEvent(txnid, rqst.getTxnWriteIds(), null), dbConn, sqlGenerator); } LOG.debug("Going to commit"); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index f202832..80f3200 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -245,8 +245,8 @@ public boolean dropType(String typeName) { } @Override - public void createTable(Table tbl) throws InvalidObjectException, MetaException { - objectStore.createTable(tbl); + public void createTable(Table tbl, String validWriteIdList) throws InvalidObjectException, MetaException { + objectStore.createTable(tbl, validWriteIdList); } @Override @@ -268,9 +268,9 @@ public Table getTable(String catName, String dbName, String tableName, String wr } @Override - public boolean addPartition(Partition part) + public boolean addPartition(Partition part, String validWriteIdList) throws InvalidObjectException, MetaException { - return objectStore.addPartition(part); + return objectStore.addPartition(part, validWriteIdList); } @Override @@ -812,13 +812,14 @@ public boolean doesPartitionExist(String catName, String dbName, String tableNam } @Override - public boolean addPartitions(String catName, String dbName, String tblName, List parts) + public boolean addPartitions(String catName, String dbName, String tblName, List parts, String validWriteIdList) throws InvalidObjectException, MetaException { - return objectStore.addPartitions(catName, dbName, tblName, parts); + return objectStore.addPartitions(catName, dbName, tblName, parts, validWriteIdList); } @Override - public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { + public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists, + String validWriteIdList) throws InvalidObjectException, MetaException { return false; } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 1a7ce04..c433470 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -228,7 +228,7 @@ public boolean dropType(String typeName) { } @Override - public void createTable(Table tbl) throws InvalidObjectException, MetaException { + public void createTable(Table tbl, String validWriteIdList) throws InvalidObjectException, MetaException { } @@ -252,7 +252,7 @@ public Table getTable(String catalogName, String dbName, String tableName, } @Override - public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { + public boolean addPartition(Partition part, String validWriteIdList) throws InvalidObjectException, MetaException { return false; } @@ -809,13 +809,14 @@ public boolean doesPartitionExist(String catName, String dbName, String tableNam } @Override - public boolean addPartitions(String catName, String dbName, String tblName, List parts) + public boolean addPartitions(String catName, String dbName, String tblName, List parts, String validWriteIdList) throws InvalidObjectException, MetaException { return false; } @Override - public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { + public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists, + String validWriteIdList) throws InvalidObjectException, MetaException { return false; } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index 459c7c2..9b4d9f3 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -57,6 +57,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -393,25 +394,25 @@ public void reconnect() throws MetaException { * org.apache.hadoop.hive.metastore.api.Table) */ @Override - public void alter_table(String dbname, String tbl_name, Table new_tbl) + public void alter_table(String dbname, String tbl_name, Table new_tbl, String validWriteIdList) throws InvalidOperationException, MetaException, TException { - alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null); + alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null, validWriteIdList); } @Override public void alter_table(String defaultDatabaseName, String tblName, Table table, - boolean cascade) throws InvalidOperationException, MetaException, TException { + boolean cascade, String validWriteIdList) throws InvalidOperationException, MetaException, TException { EnvironmentContext environmentContext = new EnvironmentContext(); if (cascade) { environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE); } - alter_table_with_environmentContext(defaultDatabaseName, tblName, table, environmentContext); + alter_table_with_environmentContext(defaultDatabaseName, tblName, table, environmentContext, validWriteIdList); } @Override public void alter_table_with_environmentContext(String dbname, String tbl_name, Table new_tbl, - EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { - client.alter_table_with_environment_context(dbname, tbl_name, new_tbl, envContext); + EnvironmentContext envContext, String validWriteIdList) throws InvalidOperationException, MetaException, TException { + client.alter_table_with_environment_context(dbname, tbl_name, new_tbl, envContext, validWriteIdList); } /** @@ -618,6 +619,7 @@ public String getMetaConf(String key) throws TException { /** * @param new_part + * @param validWriteIdList writeIds snapshot * @return the added partition * @throws InvalidObjectException * @throws AlreadyExistsException @@ -626,18 +628,19 @@ public String getMetaConf(String key) throws TException { * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partition(org.apache.hadoop.hive.metastore.api.Partition) */ @Override - public Partition add_partition(Partition new_part) throws TException { - return add_partition(new_part, null); + public Partition add_partition(Partition new_part, String validWriteIdList) throws TException { + return add_partition(new_part, null, validWriteIdList); } - public Partition add_partition(Partition new_part, EnvironmentContext envContext) + public Partition add_partition(Partition new_part, EnvironmentContext envContext, String validWriteIdList) throws TException { - Partition p = client.add_partition_with_environment_context(new_part, envContext); + Partition p = client.add_partition_with_environment_context(new_part, envContext, validWriteIdList); return fastpath ? p : deepCopy(p); } /** * @param new_parts + * @param validWriteIdList writeIds snapshot * @throws InvalidObjectException * @throws AlreadyExistsException * @throws MetaException @@ -645,13 +648,13 @@ public Partition add_partition(Partition new_part, EnvironmentContext envContext * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partitions(List) */ @Override - public int add_partitions(List new_parts) throws TException { - return client.add_partitions(new_parts); + public int add_partitions(List new_parts, String validWriteIdList) throws TException { + return client.add_partitions(new_parts, validWriteIdList); } @Override public List add_partitions( - List parts, boolean ifNotExists, boolean needResults) throws TException { + List parts, boolean ifNotExists, boolean needResults, String validWriteIdList) throws TException { if (parts.isEmpty()) { return needResults ? new ArrayList<>() : null; } @@ -659,19 +662,21 @@ public int add_partitions(List new_parts) throws TException { AddPartitionsRequest req = new AddPartitionsRequest( part.getDbName(), part.getTableName(), parts, ifNotExists); req.setNeedResult(needResults); + req.setValidWriteIdList(validWriteIdList); AddPartitionsResult result = client.add_partitions_req(req); return needResults ? filterHook.filterPartitions(result.getPartitions()) : null; } @Override - public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TException { - return client.add_partitions_pspec(partitionSpec.toPartitionSpec()); + public int add_partitions_pspec(PartitionSpecProxy partitionSpec, String validWriteIdList) throws TException { + return client.add_partitions_pspec(partitionSpec.toPartitionSpec(), validWriteIdList); } /** * @param table_name * @param db_name * @param part_vals + * @param validWriteIdList writeIds snapshot * @return the appended partition * @throws InvalidObjectException * @throws AlreadyExistsException @@ -682,27 +687,27 @@ public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TExcept */ @Override public Partition appendPartition(String db_name, String table_name, - List part_vals) throws TException { + List part_vals, String validWriteIdList) throws TException { return appendPartition(db_name, table_name, part_vals, null); } public Partition appendPartition(String db_name, String table_name, List part_vals, - EnvironmentContext envContext) throws TException { + EnvironmentContext envContext, String validWriteIdList) throws TException { Partition p = client.append_partition_with_environment_context(db_name, table_name, - part_vals, envContext); + part_vals, envContext, validWriteIdList); return fastpath ? p : deepCopy(p); } @Override - public Partition appendPartition(String dbName, String tableName, String partName) + public Partition appendPartition(String dbName, String tableName, String partName, String validWriteIdList) throws TException { - return appendPartition(dbName, tableName, partName, (EnvironmentContext)null); + return appendPartition(dbName, tableName, partName, (EnvironmentContext)null, validWriteIdList); } public Partition appendPartition(String dbName, String tableName, String partName, - EnvironmentContext envContext) throws TException { + EnvironmentContext envContext, String validWriteIdList) throws TException { Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName, - partName, envContext); + partName, envContext, validWriteIdList); return fastpath ? p : deepCopy(p); } @@ -711,15 +716,16 @@ public Partition appendPartition(String dbName, String tableName, String partNam * @param partitionSpecs partitions specs of the parent partition to be exchanged * @param destDb the db of the destination table * @param destinationTableName the destination table name + * @param validWriteIdList writeIds snapshot * @return new partition after exchanging */ @Override public Partition exchange_partition(Map partitionSpecs, String sourceDb, String sourceTable, String destDb, - String destinationTableName) throws MetaException, + String destinationTableName, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException, TException { return client.exchange_partition(partitionSpecs, sourceDb, sourceTable, - destDb, destinationTableName); + destDb, destinationTableName, validWriteIdList); } /** @@ -727,15 +733,16 @@ public Partition exchange_partition(Map partitionSpecs, * @param partitionSpecs partitions specs of the parent partition to be exchanged * @param destDb the db of the destination table * @param destinationTableName the destination table name + * @param validWriteIdList writeIds snapshot * @return new partitions after exchanging */ @Override public List exchange_partitions(Map partitionSpecs, String sourceDb, String sourceTable, String destDb, - String destinationTableName) throws MetaException, + String destinationTableName, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException, TException { return client.exchange_partitions(partitionSpecs, sourceDb, sourceTable, - destDb, destinationTableName); + destDb, destinationTableName, validWriteIdList); } @Override @@ -767,12 +774,12 @@ public void createDatabase(Database db) * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table) */ @Override - public void createTable(Table tbl) throws AlreadyExistsException, + public void createTable(Table tbl, String txnWriteIds) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { - createTable(tbl, null); + createTable(tbl, null, txnWriteIds); } - public void createTable(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException, + public void createTable(Table tbl, EnvironmentContext envContext, String txnWriteIds) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { HiveMetaHook hook = getHook(tbl); if (hook != null) { @@ -804,7 +811,8 @@ public void createTableWithConstraints(Table tbl, List uniqueConstraints, List notNullConstraints, List defaultConstraints, - List checkConstraints) + List checkConstraints, + String validWriteIdList) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { HiveMetaHook hook = getHook(tbl); @@ -815,7 +823,7 @@ public void createTableWithConstraints(Table tbl, try { // Subclasses can override this step (for example, for temporary tables) client.create_table_with_constraints(tbl, primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); + uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, validWriteIdList); if (hook != null) { hook.commitCreateTable(tbl); } @@ -1724,20 +1732,20 @@ public void alterDatabase(String dbName, Database db) @Override @Deprecated //use setPartitionColumnStatistics instead - public boolean updateTableColumnStatistics(ColumnStatistics statsObj) + public boolean updateTableColumnStatistics(ColumnStatistics statsObj, String validWriteIdList) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException{ - return client.update_table_column_statistics(statsObj); + return client.update_table_column_statistics(statsObj, validWriteIdList); } /** {@inheritDoc} */ @Override @Deprecated //use setPartitionColumnStatistics instead - public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) + public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj, String validWriteIdList) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException{ - return client.update_partition_column_statistics(statsObj); + return client.update_partition_column_statistics(statsObj, validWriteIdList); } /** {@inheritDoc} */ @@ -1854,16 +1862,16 @@ public Partition getPartition(String db, String tableName, String partName) return fastpath ? p : deepCopy(filterHook.filterPartition(p)); } - public Partition appendPartitionByName(String dbName, String tableName, String partName) + public Partition appendPartitionByName(String dbName, String tableName, String partName, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, TException { - return appendPartitionByName(dbName, tableName, partName, null); + return appendPartitionByName(dbName, tableName, partName, null, validWriteIdList); } public Partition appendPartitionByName(String dbName, String tableName, String partName, - EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException, + EnvironmentContext envContext, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, TException { Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName, - partName, envContext); + partName, envContext, validWriteIdList); return fastpath ? p : deepCopy(p); } @@ -2304,7 +2312,7 @@ public void replRollbackTxn(long srcTxnId, String replPolicy) throws NoSuchTxnEx } @Override - public void commitTxn(long txnid) + public void commitTxn(long txnid, String txnWriteIds) throws NoSuchTxnException, TxnAbortedException, TException { client.commit_txn(new CommitTxnRequest(txnid)); } @@ -2668,7 +2676,7 @@ public GetAllFunctionsResponse getAllFunctions() protected void create_table_with_environment_context(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { - client.create_table_with_environment_context(tbl, envContext); + client.create_table_with_environment_context(tbl, envContext, null); } protected void drop_table_with_environment_context(String dbname, String name, @@ -3155,14 +3163,14 @@ public void updateCreationMetadata(String catName, String dbName, String tableNa @Override public Partition appendPartition(String catName, String dbName, String tableName, - List partVals) throws InvalidObjectException, + List partVals, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, TException { throw new UnsupportedOperationException(); } @Override public Partition appendPartition(String catName, String dbName, String tableName, - String name) throws InvalidObjectException, + String name, String validWriteIdList) throws InvalidObjectException, AlreadyExistsException, MetaException, TException { throw new UnsupportedOperationException(); } @@ -3177,7 +3185,7 @@ public Partition getPartition(String catName, String dbName, String tblName, @Override public Partition exchange_partition(Map partitionSpecs, String sourceCat, String sourceDb, String sourceTable, String destCat, - String destdb, String destTableName) throws MetaException, + String destdb, String destTableName, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException, TException { throw new UnsupportedOperationException(); } @@ -3185,7 +3193,7 @@ public Partition exchange_partition(Map partitionSpecs, String s @Override public List exchange_partitions(Map partitionSpecs, String sourceCat, String sourceDb, String sourceTable, String destCat, - String destdb, String destTableName) throws + String destdb, String destTableName, String validWriteIdList) throws MetaException, NoSuchObjectException, InvalidObjectException, TException { throw new UnsupportedOperationException(); } @@ -3316,13 +3324,6 @@ public boolean isPartitionMarkedForEvent(String catName, String db_name, String } @Override - public void alter_table(String catName, String dbName, String tblName, Table newTable, - EnvironmentContext envContext) throws InvalidOperationException, - MetaException, TException { - throw new UnsupportedOperationException(); - } - - @Override public void dropDatabase(String catName, String dbName, boolean deleteData, boolean ignoreUnknownDb, boolean cascade) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java index 6c7fe11..0f82a8b 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java @@ -208,7 +208,7 @@ public void addNotificationEvent(NotificationEvent entry) throws MetaException { } @Override - public void createTable(Table tbl) throws InvalidObjectException, MetaException { + public void createTable(Table tbl, String validWriteIdList) throws InvalidObjectException, MetaException { if (callerVerifier != null) { CallerArguments args = new CallerArguments(tbl.getDbName()); args.tblName = tbl.getTableName(); @@ -218,7 +218,7 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException + args.dbName + " table: " + args.tblName); } } - super.createTable(tbl); + super.createTable(tbl, validWriteIdList); } @Override diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/NonCatCallsWithCatalog.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/NonCatCallsWithCatalog.java index 377a550..0c803ba 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/NonCatCallsWithCatalog.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/NonCatCallsWithCatalog.java @@ -168,7 +168,7 @@ public void setUp() throws Exception { for (Table t : testTables) { t.unsetCatName(); - client.createTable(t); + client.createTable(t, null); } // Create partitions for the partitioned table @@ -178,7 +178,7 @@ public void setUp() throws Exception { .addValue("a" + i) .build(conf); p.unsetCatName(); - client.add_partition(p); + client.add_partition(p, null); } } @@ -282,7 +282,7 @@ public void tablesCreateDropAlterTruncate() throws TException, URISyntaxExceptio */ Table t = builder.build(conf); t.unsetCatName(); - client.createTable(t); + client.createTable(t, null); } // Add partitions for the partitioned table @@ -295,7 +295,7 @@ public void tablesCreateDropAlterTruncate() throws TException, URISyntaxExceptio .addValue(partVals[i]) .build(conf); p.unsetCatName(); - client.add_partition(p); + client.add_partition(p, null); } // Get tables, make sure the locations are correct @@ -355,13 +355,13 @@ public void tablesCreateDropAlterTruncate() throws TException, URISyntaxExceptio // Test altering the table Table t = client.getTable(dbName, tableNames[0]).deepCopy(); t.getParameters().put("test", "test"); - client.alter_table(dbName, tableNames[0], t); + client.alter_table(dbName, tableNames[0], t, null); t = client.getTable(dbName, tableNames[0]).deepCopy(); Assert.assertEquals("test", t.getParameters().get("test")); // Alter a table in the wrong catalog try { - client.alter_table(DEFAULT_DATABASE_NAME, tableNames[0], t); + client.alter_table(DEFAULT_DATABASE_NAME, tableNames[0], t, null); Assert.fail(); } catch (InvalidOperationException e) { // NOP @@ -416,7 +416,7 @@ public void tablesGetExists() throws TException { .addCol("col2_" + i, ColumnType.INT_TYPE_NAME) .build(conf); table.unsetCatName(); - client.createTable(table); + client.createTable(table, null); } Set tables = new HashSet<>(client.getTables(dbName, "*e_in_other_*")); @@ -452,7 +452,7 @@ public void tablesList() throws TException { if (i == 0) builder.addTableParam("the_key", "the_value"); Table table = builder.build(conf); table.unsetCatName(); - client.createTable(table); + client.createTable(table, null); } String filter = hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS + "the_key=\"the_value\""; @@ -481,7 +481,7 @@ public void getTableMeta() throws TException { .addCol("name", "string") .build(conf); table.unsetCatName(); - client.createTable(table); + client.createTable(table, null); TableMeta tableMeta = new TableMeta(dbName, tableNames[i], TableType.MANAGED_TABLE.name()); tableMeta.setCatName(expectedCatalog()); expected.add(tableMeta); @@ -514,7 +514,7 @@ public void addPartitions() throws TException { .addPartCol("partcol", "string") .build(conf); table.unsetCatName(); - client.createTable(table); + client.createTable(table, null); Partition[] parts = new Partition[5]; for (int i = 0; i < parts.length; i++) { @@ -524,9 +524,9 @@ public void addPartitions() throws TException { .build(conf); parts[i].unsetCatName(); } - client.add_partition(parts[0]); - Assert.assertEquals(2, client.add_partitions(Arrays.asList(parts[1], parts[2]))); - client.add_partitions(Arrays.asList(parts[3], parts[4]), true, false); + client.add_partition(parts[0], null); + Assert.assertEquals(2, client.add_partitions(Arrays.asList(parts[1], parts[2]), null)); + client.add_partitions(Arrays.asList(parts[3], parts[4]), true, false, null); for (int i = 0; i < parts.length; i++) { Partition fetched = client.getPartition(dbName, tableName, @@ -558,7 +558,7 @@ public void getPartitions() throws TException { .addTableParam("PARTITION_LEVEL_PRIVILEGE", "true") .build(conf); table.unsetCatName(); - client.createTable(table); + client.createTable(table, null); Partition[] parts = new Partition[5]; for (int i = 0; i < parts.length; i++) { @@ -568,7 +568,7 @@ public void getPartitions() throws TException { .build(conf); parts[i].unsetCatName(); } - client.add_partitions(Arrays.asList(parts)); + client.add_partitions(Arrays.asList(parts), null); Partition fetched = client.getPartition(dbName, tableName, Collections.singletonList("a0")); @@ -607,7 +607,7 @@ public void listPartitions() throws TException { .addPartCol("partcol", "string") .build(conf); table.unsetCatName(); - client.createTable(table); + client.createTable(table, null); Partition[] parts = new Partition[5]; for (int i = 0; i < parts.length; i++) { @@ -617,7 +617,7 @@ public void listPartitions() throws TException { .build(conf); parts[i].unsetCatName(); } - client.add_partitions(Arrays.asList(parts)); + client.add_partitions(Arrays.asList(parts), null); List fetched = client.listPartitions(dbName, tableName, (short)-1); Assert.assertEquals(parts.length, fetched.size()); @@ -674,7 +674,7 @@ public void alterPartitions() throws TException { .addPartCol("partcol", "string") .build(conf); table.unsetCatName(); - client.createTable(table); + client.createTable(table, null); Partition[] parts = new Partition[5]; for (int i = 0; i < 5; i++) { @@ -685,7 +685,7 @@ public void alterPartitions() throws TException { .build(conf); parts[i].unsetCatName(); } - client.add_partitions(Arrays.asList(parts)); + client.add_partitions(Arrays.asList(parts), null); Partition newPart = client.getPartition(dbName, tableName, Collections.singletonList("a0")); @@ -742,7 +742,7 @@ public void dropPartitions() throws TException { .addPartCol("partcol", "string") .build(conf); table.unsetCatName(); - client.createTable(table); + client.createTable(table, null); Partition[] parts = new Partition[2]; for (int i = 0; i < parts.length; i++) { @@ -752,7 +752,7 @@ public void dropPartitions() throws TException { .build(conf); parts[i].unsetCatName(); } - client.add_partitions(Arrays.asList(parts)); + client.add_partitions(Arrays.asList(parts), null); List fetched = client.listPartitions(dbName, tableName, (short)-1); Assert.assertEquals(parts.length, fetched.size()); @@ -1000,7 +1000,7 @@ public void createTableWithConstraints() throws TException { .build(conf); for (SQLCheckConstraint cccol : cc) cccol.unsetCatName(); - client.createTableWithConstraints(table, pk, fk, uc, nn, dv, cc); + client.createTableWithConstraints(table, pk, fk, uc, nn, dv, cc, null); PrimaryKeysRequest pkRqst = new PrimaryKeysRequest(parentTable.getDbName(), parentTable.getTableName()); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java index 23faa74..81c866b 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java @@ -220,13 +220,13 @@ protected void creatEnv(Configuration conf) throws Exception { .setTableName(TAB1) .addCol("id", "int") .addCol("name", "string") - .create(client, conf); + .create(client, conf, null); Table tab2 = new TableBuilder() .setDbName(DBNAME1) .setTableName(TAB2) .addCol("id", "int") .addPartCol("name", "string") - .create(client, conf); + .create(client, conf, null); new PartitionBuilder() .inTable(tab2) .addValue("value1") diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestGetPartitionsUsingProjectionAndFilterSpecs.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestGetPartitionsUsingProjectionAndFilterSpecs.java index bc43f3d..9b6d4aa 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestGetPartitionsUsingProjectionAndFilterSpecs.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestGetPartitionsUsingProjectionAndFilterSpecs.java @@ -151,7 +151,7 @@ private void createTestTables() throws TException { .setSerdeLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe") .setInputFormat("org.apache.hadoop.hive.ql.io.HiveInputFormat") .setOutputFormat("org.apache.hadoop.hive.ql.io.HiveOutputFormat") - .create(client, conf); + .create(client, conf, null); Table table = client.getTable(dbName, tblName); Assert.assertTrue("Table " + dbName + "." + tblName + " does not exist", @@ -163,7 +163,7 @@ private void createTestTables() throws TException { partitions.add(createPartition(Arrays.asList("WA", "Seattle"), table)); partitions.add(createPartition(Arrays.asList("AZ", "Phoenix"), table)); - client.add_partitions(partitions); + client.add_partitions(partitions, null); } private Partition createPartition(List vals, Table table) throws MetaException { @@ -551,7 +551,7 @@ public void testNonStandardPartitions() throws TException { .addBucketCol("ns_c1") .addSortCol("ns_c2", 1) .addTableParam("tblparamKey", "Partitions of this table are not located within table directory") - .create(client, conf); + .create(client, conf, null); Table table = client.getTable(dbName, testTblName); Assert.assertNotNull("Unable to create a test table ", table); @@ -562,7 +562,7 @@ public void testNonStandardPartitions() throws TException { partitions.add(createPartition(Arrays.asList("p2", "Seattle"), table)); partitions.add(createPartition(Arrays.asList("p2", "Phoenix"), table)); - client.add_partitions(partitions); + client.add_partitions(partitions, null); // change locations of two of the partitions outside table directory List testPartitions = client.listPartitions(dbName, testTblName, (short) -1); Assert.assertEquals(4, testPartitions.size()); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 0c4c84c..b1e7fb3 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -225,7 +225,7 @@ private static void partitionTester(HiveMetaStoreClient client, Configuration co .setSkewedColValueLocationMaps(Collections.singletonMap(skewedColValue, "location1")) .addPartCol("ds", ColumnType.STRING_TYPE_NAME) .addPartCol("hr", ColumnType.STRING_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -253,13 +253,13 @@ private static void partitionTester(HiveMetaStoreClient client, Configuration co exceptionThrown = true; } assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown); - Partition retp = client.add_partition(part); + Partition retp = client.add_partition(part, null); assertNotNull("Unable to create partition " + part, retp); - Partition retp2 = client.add_partition(part2); + Partition retp2 = client.add_partition(part2, null); assertNotNull("Unable to create partition " + part2, retp2); - Partition retp3 = client.add_partition(part3); + Partition retp3 = client.add_partition(part3, null); assertNotNull("Unable to create partition " + part3, retp3); - Partition retp4 = client.add_partition(part4); + Partition retp4 = client.add_partition(part4, null); assertNotNull("Unable to create partition " + part4, retp4); Partition part_get = client.getPartition(dbName, tblName, part.getValues()); @@ -277,7 +277,7 @@ private static void partitionTester(HiveMetaStoreClient client, Configuration co Partition part6 = makePartitionObject(dbName, tblName, vals6, tbl, "/part5"); part6.getSd().setCols(null); LOG.info("Creating partition will null field schema"); - client.add_partition(part6); + client.add_partition(part6, null); LOG.info("Listing all partitions for table " + dbName + "." + tblName); final List partitions = client.listPartitions(dbName, tblName, (short) -1); boolean foundPart = false; @@ -365,7 +365,7 @@ private static void partitionTester(HiveMetaStoreClient client, Configuration co assertFalse(fs.exists(partPath)); // Test append_partition_by_name - client.appendPartition(dbName, tblName, partName); + client.appendPartition(dbName, tblName, partName, null); Partition part5 = client.getPartition(dbName, tblName, part.getValues()); assertTrue("Append partition by name failed", part5.getValues().equals(vals)); Path part5Path = new Path(part5.getSd().getLocation()); @@ -378,7 +378,7 @@ private static void partitionTester(HiveMetaStoreClient client, Configuration co // add the partition again so that drop table with a partition can be // tested - retp = client.add_partition(part); + retp = client.add_partition(part, null); assertNotNull("Unable to create partition " + part, retp); // test add_partitions @@ -392,13 +392,13 @@ private static void partitionTester(HiveMetaStoreClient client, Configuration co Exception savedException; // add_partitions(empty list) : ok, normal operation - client.add_partitions(new ArrayList<>()); + client.add_partitions(new ArrayList<>(), null); // add_partitions(1,2,3) : ok, normal operation Partition mpart1 = makePartitionObject(dbName, tblName, mvals1, tbl, "/mpart1"); Partition mpart2 = makePartitionObject(dbName, tblName, mvals2, tbl, "/mpart2"); Partition mpart3 = makePartitionObject(dbName, tblName, mvals3, tbl, "/mpart3"); - client.add_partitions(Arrays.asList(mpart1,mpart2,mpart3)); + client.add_partitions(Arrays.asList(mpart1,mpart2,mpart3), null); // do DDL time munging if thrift mode adjust(client, mpart1, dbName, tblName, isThriftClient); @@ -419,7 +419,7 @@ private static void partitionTester(HiveMetaStoreClient client, Configuration co // add_partitions(5,4) : err = duplicate keyvals on mpart4 savedException = null; try { - client.add_partitions(Arrays.asList(mpart5,mpart4)); + client.add_partitions(Arrays.asList(mpart5,mpart4), null); } catch (Exception e) { savedException = e; } finally { @@ -431,7 +431,7 @@ private static void partitionTester(HiveMetaStoreClient client, Configuration co assertFalse(fs.exists(new Path(mpart4.getSd().getLocation()))); // add_partitions(5) : ok - client.add_partitions(Arrays.asList(mpart5)); + client.add_partitions(Arrays.asList(mpart5), null); // do DDL time munging if thrift mode adjust(client, mpart5, dbName, tblName, isThriftClient); @@ -450,8 +450,8 @@ private static void partitionTester(HiveMetaStoreClient client, Configuration co // still exist tbl.setParameters(new HashMap<>()); tbl.getParameters().put("EXTERNAL", "TRUE"); - client.createTable(tbl); - retp = client.add_partition(part); + client.createTable(tbl, null); + retp = client.add_partition(part, null); assertTrue(fs.exists(partPath)); client.dropPartition(dbName, tblName, part.getValues(), true); assertTrue(fs.exists(partPath)); @@ -608,7 +608,7 @@ public void testAlterTableCascade() throws Throwable { cols.add(new FieldSchema("new_col", ColumnType.STRING_TYPE_NAME, "")); tbl.getSd().setCols(cols); //add new column with cascade option - client.alter_table(dbName, tblName, tbl, true); + client.alter_table(dbName, tblName, tbl, true, null); // Table tbl2 = client.getTable(dbName, tblName); assertEquals("Unexpected number of cols", 3, tbl2.getSd().getCols().size()); @@ -626,7 +626,7 @@ public void testAlterTableCascade() throws Throwable { cols.add(new FieldSchema("new_col2", ColumnType.STRING_TYPE_NAME, "")); tbl.getSd().setCols(cols); //add new column with no cascade option - client.alter_table(dbName, tblName, tbl, false); + client.alter_table(dbName, tblName, tbl, false, null); tbl2 = client.getTable(dbName, tblName); assertEquals("Unexpected number of cols", 4, tbl2.getSd().getCols().size()); assertEquals("Unexpected column name", "new_col2", tbl2.getSd().getCols().get(3).getName()); @@ -849,7 +849,7 @@ public void testAlterViewParititon() throws Throwable { .setTableName(tblName) .addCol("name", ColumnType.STRING_TYPE_NAME) .addCol("income", ColumnType.INT_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -882,7 +882,7 @@ public void testAlterViewParititon() throws Throwable { viewSd.setSerdeInfo(new SerDeInfo()); viewSd.getSerdeInfo().setParameters(new HashMap<>()); - client.createTable(view); + client.createTable(view, null); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -901,7 +901,7 @@ public void testAlterViewParititon() throws Throwable { part.setValues(vals); part.setParameters(new HashMap<>()); - client.add_partition(part); + client.add_partition(part, null); Partition part2 = client.getPartition(dbName, viewName, part.getValues()); @@ -947,7 +947,7 @@ public void testAlterPartition() throws Throwable { .addSerdeParam(ColumnType.SERIALIZATION_FORMAT, "1") .addPartCol("ds", ColumnType.STRING_TYPE_NAME) .addPartCol("hr", ColumnType.INT_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -966,7 +966,7 @@ public void testAlterPartition() throws Throwable { part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo()); part.getSd().setLocation(tbl.getSd().getLocation() + "/part1"); - client.add_partition(part); + client.add_partition(part, null); Partition part2 = client.getPartition(dbName, tblName, part.getValues()); @@ -1022,7 +1022,7 @@ public void testRenamePartition() throws Throwable { .addCol("income", ColumnType.INT_TYPE_NAME) .addPartCol("ds", ColumnType.STRING_TYPE_NAME) .addPartCol("hr", ColumnType.INT_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -1044,7 +1044,7 @@ public void testRenamePartition() throws Throwable { part.getSd().setNumBuckets(12); part.getSd().getSerdeInfo().getParameters().put("abc", "1"); - client.add_partition(part); + client.add_partition(part, null); part.setValues(tmp_vals); client.renamePartition(dbName, tblName, vals, part); @@ -1395,7 +1395,7 @@ public void testSimpleTable() throws Exception { .setNumBuckets(1) .addBucketCol("name") .addStorageDescriptorParam("test_param_1", "Use this for comments etc") - .create(client, conf); + .create(client, conf, null); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -1442,7 +1442,7 @@ public void testSimpleTable() throws Exception { } tbl2.unsetId(); - client.createTable(tbl2); + client.createTable(tbl2, null); if (isThriftClient) { tbl2 = client.getTable(tbl2.getDbName(), tbl2.getTableName()); } @@ -1686,7 +1686,7 @@ public void testColumnStatistics() throws Throwable { colStats.setStatsObj(statsObjs); // write stats objs persistently - client.updateTableColumnStatistics(colStats); + client.updateTableColumnStatistics(colStats, null); // retrieve the stats obj that was just written ColumnStatisticsObj colStats2 = client.getTableColumnStatistics( @@ -1712,7 +1712,7 @@ public void testColumnStatistics() throws Throwable { colStats.setStatsObj(statsObjs); // update table level column stats - client.updateTableColumnStatistics(colStats); + client.updateTableColumnStatistics(colStats, null); // query column stats for column whose stats were updated in the previous call colStats2 = client.getTableColumnStatistics( @@ -1746,7 +1746,7 @@ public void testColumnStatistics() throws Throwable { colStats.setStatsDesc(statsDesc); colStats.setStatsObj(statsObjs); - client.updatePartitionColumnStatistics(colStats); + client.updatePartitionColumnStatistics(colStats, null); colStats2 = client.getPartitionColumnStatistics(dbName, tblName, Lists.newArrayList(partName), Lists.newArrayList(colName[1])).get(partName).get(0); @@ -1795,7 +1795,7 @@ public void testGetSchemaWithNoClassDefFoundError() throws TException { .setTableName(tblName) .addCol("name", ColumnType.STRING_TYPE_NAME, "") .setSerdeLib("no.such.class") - .create(client, conf); + .create(client, conf, null); client.getSchema(dbName, tblName); } @@ -1841,7 +1841,7 @@ public void testCreateTableSettingId() throws Exception { .build(conf); table.setId(1); try { - client.createTable(table); + client.createTable(table, null); Assert.fail("An error should happen when setting the id" + " to create a table"); } catch (InvalidObjectException e) { @@ -1876,7 +1876,7 @@ public void testAlterTable() throws Exception { boolean failed = false; try { - client.createTable(tbl); + client.createTable(tbl, null); } catch (InvalidObjectException ex) { failed = true; } @@ -1893,7 +1893,7 @@ public void testAlterTable() throws Exception { tbl.getSd().setCols(invColsInvType); boolean failChecker = false; try { - client.createTable(tbl); + client.createTable(tbl, null); } catch (InvalidObjectException ex) { failChecker = true; } @@ -1909,7 +1909,7 @@ public void testAlterTable() throws Exception { // create a valid table tbl.setTableName(tblName); tbl.getSd().setCols(cols); - client.createTable(tbl); + client.createTable(tbl, null); if (isThriftClient) { tbl = client.getTable(tbl.getDbName(), tbl.getTableName()); @@ -1921,7 +1921,7 @@ public void testAlterTable() throws Exception { try { tbl2.setTableName(invTblName); tbl2.getSd().setCols(invCols); - client.alter_table(dbName, tblName, tbl2); + client.alter_table(dbName, tblName, tbl2, null); } catch (InvalidOperationException ex) { failed = true; } @@ -1939,7 +1939,7 @@ public void testAlterTable() throws Exception { } tbl_pk.setPartitionKeys(partitionKeys); try { - client.alter_table(dbName, tblName, tbl_pk); + client.alter_table(dbName, tblName, tbl_pk, null); } catch (InvalidOperationException ex) { failed = true; } @@ -1954,7 +1954,7 @@ public void testAlterTable() throws Exception { } tbl_pk.setPartitionKeys(partitionKeys); try { - client.alter_table(dbName, tblName, tbl_pk); + client.alter_table(dbName, tblName, tbl_pk, null); } catch (InvalidOperationException ex) { failed = true; } @@ -1966,7 +1966,7 @@ public void testAlterTable() throws Exception { tbl2.setTableName(tblName + "_renamed"); tbl2.getSd().setCols(cols); tbl2.getSd().setNumBuckets(32); - client.alter_table(dbName, tblName, tbl2); + client.alter_table(dbName, tblName, tbl2, null); Table tbl3 = client.getTable(dbName, tbl2.getTableName()); assertEquals("Alter table didn't succeed. Num buckets is different ", tbl2.getSd().getNumBuckets(), tbl3.getSd().getNumBuckets()); @@ -1986,7 +1986,7 @@ public void testAlterTable() throws Exception { tbl_pk.getSd().setCols(invColsInvType); failed = false; try { - client.alter_table(dbName, tbl2.getTableName(), tbl_pk); + client.alter_table(dbName, tbl2.getTableName(), tbl_pk, null); } catch (InvalidOperationException ex) { failed = true; } @@ -2034,7 +2034,7 @@ public void testComplexTable() throws Exception { .setNumBuckets(1) .addBucketCol("name") .addStorageDescriptorParam("test_param_1","Use this for comments etc") - .create(client, conf); + .create(client, conf, null); Table tbl2 = client.getTable(dbName, tblName); assertEquals(tbl2.getDbName(), dbName); @@ -2108,7 +2108,7 @@ public void testTableDatabase() throws Exception { .setTableName(tblName_1) .addCol("name", ColumnType.STRING_TYPE_NAME) .addCol("income", ColumnType.INT_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); tbl = client.getTable(dbName, tblName_1); @@ -2205,7 +2205,7 @@ public void testPartitionFilter() throws Exception { .addPartCol("p1", ColumnType.STRING_TYPE_NAME) .addPartCol("p2", ColumnType.STRING_TYPE_NAME) .addPartCol("p3", ColumnType.INT_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); tbl = client.getTable(dbName, tblName); @@ -2376,7 +2376,7 @@ public void testFilterSinglePartition() throws Exception { .addCol("c1", ColumnType.STRING_TYPE_NAME) .addCol("c2", ColumnType.INT_TYPE_NAME) .addPartCol("p1", ColumnType.STRING_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); tbl = client.getTable(dbName, tblName); @@ -2426,7 +2426,7 @@ public void testFilterLastPartition() throws Exception { .addCol("c2", ColumnType.INT_TYPE_NAME) .addPartCol("p1", ColumnType.STRING_TYPE_NAME) .addPartCol("p2", ColumnType.STRING_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); tbl = client.getTable(dbName, tblName); @@ -2476,7 +2476,7 @@ private void add_partition(HiveMetaStoreClient client, Table table, part.getSd().setSerdeInfo(table.getSd().getSerdeInfo()); part.getSd().setLocation(table.getSd().getLocation() + location); - client.add_partition(part); + client.add_partition(part, null); } /** @@ -2651,7 +2651,7 @@ private Table createTableForTestFilter(String dbName, String tableName, String o .setTableParams(tableParams) .setOwner(owner) .setLastAccessTime(lastAccessTime) - .create(client, conf); + .create(client, conf, null); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -2683,7 +2683,7 @@ public void testConcurrentMetastores() throws Exception { .setTableName(tblName) .addCol("c1", ColumnType.STRING_TYPE_NAME) .addCol("c2", ColumnType.INT_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); // get the table from the client, verify the name is correct Table tbl2 = client.getTable(dbName, tblName); @@ -2895,7 +2895,7 @@ private void createTable(String dbName, String tableName) throws TException { .setTableName(tableName) .addCol("foo", "string") .addCol("bar", "string") - .create(client, conf); + .create(client, conf, null); } private void createMaterializedView(String dbName, String tableName, Set tablesUsed) @@ -2907,7 +2907,7 @@ private void createMaterializedView(String dbName, String tableName, Set .addMaterializedViewReferencedTables(tablesUsed) .addCol("foo", "string") .addCol("bar", "string") - .create(client, conf); + .create(client, conf, null); } private List createPartitions(String dbName, Table tbl, @@ -2927,7 +2927,7 @@ private void createMaterializedView(String dbName, String tableName, Set exceptionThrown = true; } assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown); - Partition retp = client.add_partition(part); + Partition retp = client.add_partition(part, null); assertNotNull("Unable to create partition " + part, retp); partitions.add(retp); } @@ -2954,7 +2954,7 @@ private void createMaterializedView(String dbName, String tableName, Set .addCol("income", ColumnType.INT_TYPE_NAME) .addPartCol("ds", ColumnType.STRING_TYPE_NAME) .addPartCol("hr", ColumnType.STRING_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -3218,7 +3218,7 @@ public void testValidateTableCols() throws Throwable { .setTableName(tblName) .addCol("name", ColumnType.STRING_TYPE_NAME) .addCol("income", ColumnType.INT_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); if (isThriftClient) { tbl = client.getTable(dbName, tblName); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java index ebbd1c7..428d53c 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java @@ -109,7 +109,7 @@ private static void createTable(HiveMetaStoreClient hmsc, boolean enablePartitio tableParameters.put("hive.hcatalog.partition.spec.grouping.enabled", enablePartitionGrouping? "true":"false"); Table table = new Table(tableName, dbName, "", 0, 0, 0, storageDescriptor, partColumns, tableParameters, "", "", ""); - hmsc.createTable(table); + hmsc.createTable(table, null); Assert.assertTrue("Table " + dbName + "." + tableName + " does not exist", hmsc.tableExists(dbName, tableName)); @@ -144,7 +144,7 @@ private static void populatePartitions(HiveMetaStoreClient hmsc, Table table, Li // Add partitions located in the table-directory (i.e. default). List values = Arrays.asList(datePrefix + i, blurb); sd.setLocation(getPartitionPath(table, values)); - hmsc.add_partition(new Partition(values, dbName, tableName, 0, 0, sd, null)); + hmsc.add_partition(new Partition(values, dbName, tableName, 0, 0, sd, null), null); } } } @@ -233,7 +233,7 @@ public void testAddPartitions() { StorageDescriptor targetTableSd = new StorageDescriptor(targetTable.getSd()); targetTableSd.setLocation( targetTableSd.getLocation().replace( tableName, targetTableName)); - hmsc.createTable(targetTable); + hmsc.createTable(targetTable, null); // Get partition-list from source. PartitionSpecProxy partitionsForAddition @@ -242,7 +242,7 @@ public void testAddPartitions() { partitionsForAddition.setRootLocation(targetTableSd.getLocation()); Assert.assertEquals("Unexpected number of partitions added. ", - partitionsForAddition.size(), hmsc.add_partitions_pspec(partitionsForAddition)); + partitionsForAddition.size(), hmsc.add_partitions_pspec(partitionsForAddition, null)); // Check that the added partitions are as expected. PartitionSpecProxy clonedPartitions = hmsc.listPartitionSpecs(dbName, targetTableName, -1); @@ -289,7 +289,7 @@ public void testFetchingPartitionsWithDifferentSchemas() { List fields = table.getSd().getCols(); fields.add(new FieldSchema("goo", "string", "Entirely new column. Doesn't apply to older partitions.")); table.getSd().setCols(fields); - hmsc.alter_table(dbName, tableName, table); + hmsc.alter_table(dbName, tableName, table, null); // Check that the change stuck. table = hmsc.getTable(dbName,tableName); Assert.assertEquals("Unexpected number of table columns.", diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java index 4cdc035..5b3df7f 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java @@ -73,7 +73,7 @@ public void testTxns() throws Exception { Assert.assertEquals(2L, (long) tids.get(1)); Assert.assertEquals(3L, (long) tids.get(2)); client.rollbackTxn(1); - client.commitTxn(2); + client.commitTxn(2, null); ValidTxnList validTxns = client.getValidTxns(); Assert.assertFalse(validTxns.isTxnValid(1)); Assert.assertTrue(validTxns.isTxnValid(2)); @@ -88,7 +88,7 @@ public void testOpenTxnNotExcluded() throws Exception { Assert.assertEquals(2L, (long) tids.get(1)); Assert.assertEquals(3L, (long) tids.get(2)); client.rollbackTxn(1); - client.commitTxn(2); + client.commitTxn(2, null); ValidTxnList validTxns = client.getValidTxns(3); Assert.assertFalse(validTxns.isTxnValid(1)); Assert.assertTrue(validTxns.isTxnValid(2)); @@ -111,7 +111,7 @@ public void testTxNWithKeyValue() throws Exception { try { client.createDatabase(db); - client.createTable(tbl); + client.createTable(tbl, null); tbl = client.getTable(dbName, tblName); stm.executeUpdate( @@ -169,7 +169,7 @@ public void testTxNWithKeyWrongPrefix() throws Exception { Table tbl = new TableBuilder().setDbName(dbName).setTableName(tblName) .addCol("id", "int").addCol("name", "string") .setType(TableType.MANAGED_TABLE.name()).build(conf); - client.createTable(tbl); + client.createTable(tbl, null); tbl = client.getTable(dbName, tblName); client.commitTxnWithKeyValue(1, tbl.getId(), "mykey", @@ -259,7 +259,7 @@ public void testLocksWithTxn() throws Exception { client.heartbeat(txnid, 1); - client.commitTxn(txnid); + client.commitTxn(txnid, null); } @Test @@ -302,7 +302,7 @@ public void stringifyValidTxns() throws Exception { @Test public void testOpenTxnWithType() throws Exception { long txnId = client.openTxn("me", TxnType.DEFAULT); - client.commitTxn(txnId); + client.commitTxn(txnId, null); ValidTxnList validTxns = client.getValidTxns(); Assert.assertTrue(validTxns.isTxnValid(txnId)); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java index 38b3f6e..a0aa434 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java @@ -118,7 +118,7 @@ public void testEnvironmentContext() throws Exception { CreateDatabaseEvent dbEvent = (CreateDatabaseEvent)(notifyList.get(listSize - 1)); assert dbEvent.getStatus(); - msc.createTable(table, envContext); + msc.createTable(table, envContext, null); listSize++; assertEquals(notifyList.size(), listSize); CreateTableEvent tblEvent = (CreateTableEvent)(notifyList.get(listSize - 1)); @@ -128,7 +128,7 @@ public void testEnvironmentContext() throws Exception { table = msc.getTable(dbName, tblName); partition.getSd().setLocation(table.getSd().getLocation() + "/part1"); - msc.add_partition(partition, envContext); + msc.add_partition(partition, envContext, null); listSize++; assertEquals(notifyList.size(), listSize); AddPartitionEvent partEvent = (AddPartitionEvent)(notifyList.get(listSize-1)); @@ -137,7 +137,7 @@ public void testEnvironmentContext() throws Exception { List partVals = new ArrayList<>(); partVals.add("2012"); - msc.appendPartition(dbName, tblName, partVals, envContext); + msc.appendPartition(dbName, tblName, partVals, envContext, null); listSize++; assertEquals(notifyList.size(), listSize); AddPartitionEvent appendPartEvent = (AddPartitionEvent)(notifyList.get(listSize-1)); @@ -145,7 +145,7 @@ public void testEnvironmentContext() throws Exception { assertEquals(envContext, appendPartEvent.getEnvironmentContext()); table.setTableName(renamed); - msc.alter_table_with_environmentContext(dbName, tblName, table, envContext); + msc.alter_table_with_environmentContext(dbName, tblName, table, envContext, null); listSize++; assertEquals(notifyList.size(), listSize); AlterTableEvent alterTableEvent = (AlterTableEvent) notifyList.get(listSize-1); @@ -153,7 +153,7 @@ public void testEnvironmentContext() throws Exception { assertEquals(envContext, alterTableEvent.getEnvironmentContext()); table.setTableName(tblName); - msc.alter_table_with_environmentContext(dbName, renamed, table, envContext); + msc.alter_table_with_environmentContext(dbName, renamed, table, envContext, null); listSize++; assertEquals(notifyList.size(), listSize); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHmsServerAuthorization.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHmsServerAuthorization.java index 19fd634..13a6b32 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHmsServerAuthorization.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHmsServerAuthorization.java @@ -152,13 +152,13 @@ protected void creatEnv(Configuration conf) throws Exception { .setTableName(TAB1) .addCol("id", "int") .addCol("name", "string") - .create(client, conf); + .create(client, conf, null); Table tab2 = new TableBuilder() .setDbName(dbName1) .setTableName(TAB2) .addCol("id", "int") .addPartCol("name", "string") - .create(client, conf); + .create(client, conf, null); new PartitionBuilder() .inTable(tab2) .addValue("value1") diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java index 00fae25..3ebfd57 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java @@ -72,13 +72,13 @@ public void testMarkingPartitionSet() throws TException, InterruptedException { .setTableName(tableName) .addCol("a", "string") .addPartCol("b", "string") - .create(msc, conf); + .create(msc, conf, null); Partition part = new PartitionBuilder() .inTable(table) .addValue("2011") .build(conf); - msc.add_partition(part); + msc.add_partition(part, null); Map kvs = new HashMap<>(); kvs.put("b", "'2011'"); msc.markPartitionForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java index b919eef..d8cee63 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java @@ -96,7 +96,7 @@ public void testEndFunctionListener() throws Exception { .setTableName(tblName) .addCol("a", "string") .addPartCol("b", "string") - .create(msc, conf); + .create(msc, conf, null); try { msc.getTable(dbName, unknownTable); } catch (Exception e1) { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java index fe64a91..992f303 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java @@ -220,7 +220,7 @@ public void testListener() throws Exception { .setTableName(tblName) .addCol("a", "string") .addPartCol("b", "string") - .create(msc, conf); + .create(msc, conf, null); PreCreateTableEvent preTblEvent = (PreCreateTableEvent)(preNotifyList.get(preNotifyList.size() - 1)); listSize++; Table tbl = msc.getTable(dbName, tblName); @@ -259,7 +259,7 @@ public void testListener() throws Exception { currentTime, table.getSd(), table.getParameters()); Partition partition3 = new Partition(Arrays.asList("20110103"), dbName, "tmptbl", currentTime, currentTime, table.getSd(), table.getParameters()); - hmsClient.add_partitions(Arrays.asList(partition1, partition2, partition3)); + hmsClient.add_partitions(Arrays.asList(partition1, partition2, partition3), null); ++listSize; AddPartitionEvent multiplePartitionEvent = (AddPartitionEvent)(notifyList.get(listSize-1)); assertEquals("Unexpected table value.", table, multiplePartitionEvent.getTable()); @@ -295,7 +295,7 @@ public void testListener() throws Exception { part_vals.add("c=2012"); int preEventListSize; preEventListSize = preNotifyList.size() + 1; - Partition newPart = msc.appendPartition(dbName, tblName, part_vals); + Partition newPart = msc.appendPartition(dbName, tblName, part_vals, null); listSize++; assertEquals(notifyList.size(), listSize); @@ -312,7 +312,7 @@ public void testListener() throws Exception { Table renamedTable = new Table(table); renamedTable.setTableName(renamed); - msc.alter_table(dbName, tblName, renamedTable); + msc.alter_table(dbName, tblName, renamedTable, null); listSize++; assertEquals(notifyList.size(), listSize); PreAlterTableEvent preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1); @@ -328,13 +328,13 @@ public void testListener() throws Exception { //change the table name back table = new Table(renamedTable); table.setTableName(tblName); - msc.alter_table(dbName, renamed, table); + msc.alter_table(dbName, renamed, table, null); listSize++; assertEquals(notifyList.size(), listSize); table = msc.getTable(dbName, tblName); table.getSd().addToCols(new FieldSchema("c", "int", "")); - msc.alter_table(dbName, tblName, table); + msc.alter_table(dbName, tblName, table, null); listSize++; assertEquals(notifyList.size(), listSize); preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java index 546422d..c9e1edd 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java @@ -91,7 +91,7 @@ public void testEventStatus() throws Exception { .setTableName(tableName) .addCol("id", "int") .addPartCol("ds", "string") - .create(msc, conf); + .create(msc, conf, null); listSize += 1; notifyList = DummyListener.notifyList; assertEquals(notifyList.size(), listSize); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index 0e814bc..07a6044 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -245,7 +245,7 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO params.put("EXTERNAL", "false"); Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd1, null, params, null, null, "MANAGED_TABLE"); - objectStore.createTable(tbl1); + objectStore.createTable(tbl1, null); List tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1); Assert.assertEquals(1, tables.size()); @@ -272,7 +272,7 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO Assert.assertEquals("Owner of table was not altered", newTbl1.getOwner(), alteredTable.getOwner()); Assert.assertEquals("Owner type of table was not altered", newTbl1.getOwnerType(), alteredTable.getOwnerType()); - objectStore.createTable(tbl1); + objectStore.createTable(tbl1, null); tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1); Assert.assertEquals(2, tables.size()); @@ -343,17 +343,17 @@ public void testPartitionOps() throws MetaException, InvalidObjectException, Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2), tableParams, null, null, "MANAGED_TABLE"); - objectStore.createTable(tbl1); + objectStore.createTable(tbl1, null); HashMap partitionParams = new HashMap<>(); partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true"); List value1 = Arrays.asList("US", "CA"); Partition part1 = new Partition(value1, DB1, TABLE1, 111, 111, sd, partitionParams); part1.setCatName(DEFAULT_CATALOG_NAME); - objectStore.addPartition(part1); + objectStore.addPartition(part1, null); List value2 = Arrays.asList("US", "MA"); Partition part2 = new Partition(value2, DB1, TABLE1, 222, 222, sd, partitionParams); part2.setCatName(DEFAULT_CATALOG_NAME); - objectStore.addPartition(part2); + objectStore.addPartition(part2, null); Deadline.startTimer("getPartition"); List partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10); @@ -401,7 +401,7 @@ public void testConcurrentDropPartitions() throws MetaException, InvalidObjectEx Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2), tableParams, null, null, "MANAGED_TABLE"); - objectStore.createTable(tbl1); + objectStore.createTable(tbl1, null); HashMap partitionParams = new HashMap<>(); partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true"); @@ -414,7 +414,7 @@ public void testConcurrentDropPartitions() throws MetaException, InvalidObjectEx for (List n : partNames) { Partition p = new Partition(n, DB1, TABLE1, 111, 111, sd, partitionParams); p.setCatName(DEFAULT_CATALOG_NAME); - objectStore.addPartition(p); + objectStore.addPartition(p, null); } int numThreads = 2; @@ -581,7 +581,7 @@ private void createPartitionedTable(boolean withPrivileges, boolean withStatisti .addCol("test_skewed_col", "int", "test skewed col comment") .addCol("test_sort_col", "int", "test sort col comment") .build(conf); - objectStore.createTable(tbl1); + objectStore.createTable(tbl1, null); PrivilegeBag privilegeBag = new PrivilegeBag(); // Create partitions for the partitioned table @@ -595,7 +595,7 @@ private void createPartitionedTable(boolean withPrivileges, boolean withStatisti .addSkewedColName("test_skewed_col") .addSortCol("test_sort_col", 1) .build(conf); - objectStore.addPartition(part); + objectStore.addPartition(part, null); if (withPrivileges) { HiveObjectRef partitionReference = new HiveObjectRefBuilder().buildPartitionReference(part); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreStatementVerify.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreStatementVerify.java index 0d9b1bc..f9326b7 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreStatementVerify.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreStatementVerify.java @@ -130,7 +130,7 @@ public void testGetTableMetaFetchGroup() throws MetaException, InvalidObjectExce db.setCatalogName("hive"); objectStore.createDatabase(db); - objectStore.createTable(makeTable(DB1, TBL1)); + objectStore.createTable(makeTable(DB1, TBL1), null); List tableMeta = objectStore.getTableMeta("hive", "*", "*", Collections.emptyList()); Assert.assertEquals("Number of items for tableMeta is incorrect", 1, tableMeta.size()); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java index 27c5bba..fb18750 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java @@ -145,7 +145,7 @@ public void testPartitionOps() throws Exception { partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, Collections.emptyMap(), null, null, null); - store.createTable(table); + store.createTable(table, null); Deadline.startTimer("getPartition"); for (int i = 0; i < 10; i++) { @@ -156,7 +156,7 @@ public void testPartitionOps() throws Exception { Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, Collections.emptyMap()); part.setCatName(DEFAULT_CATALOG_NAME); - store.addPartition(part); + store.addPartition(part, null); ColumnStatistics cs = new ColumnStatistics(); ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); desc.setLastAnalyzed(now); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionManagement.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionManagement.java index 059c166..ab2cf3c 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionManagement.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionManagement.java @@ -167,7 +167,7 @@ public void tearDown() throws Exception { tb.addPartCol(partKeys.get(i), partKeyTypes.get(i)); } } - Table table = tb.create(client, conf); + Table table = tb.create(client, conf, null); if (partKeys != null) { for (List partVal : partVals) { @@ -224,7 +224,7 @@ public void testPartitionDiscoveryDisabledByDefault() throws TException, IOExcep // table property is set to false, so no change expected table.getParameters().put(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "false"); - client.alter_table(dbName, tableName, table); + client.alter_table(dbName, tableName, table, null); runPartitionManagementTask(conf); partitions = client.listPartitions(dbName, tableName, (short) -1); assertEquals(3, partitions.size()); @@ -259,7 +259,7 @@ public void testPartitionDiscoveryEnabledBothTableTypes() throws TException, IOE // table property is set to true, we expect 5 partitions table.getParameters().put(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true"); - client.alter_table(dbName, tableName, table); + client.alter_table(dbName, tableName, table, null); runPartitionManagementTask(conf); partitions = client.listPartitions(dbName, tableName, (short) -1); assertEquals(5, partitions.size()); @@ -267,7 +267,7 @@ public void testPartitionDiscoveryEnabledBothTableTypes() throws TException, IOE // change table type to external, delete a partition directory and make sure partition discovery works table.getParameters().put("EXTERNAL", "true"); table.setTableType(TableType.EXTERNAL_TABLE.name()); - client.alter_table(dbName, tableName, table); + client.alter_table(dbName, tableName, table, null); boolean deleted = fs.delete(newPart1.getParent(), true); assertTrue(deleted); assertEquals(4, fs.listStatus(tablePath).length); @@ -325,7 +325,7 @@ public void testPartitionDiscoveryNonDefaultCatalog() throws TException, IOExcep fs.mkdirs(newPart2); assertEquals(5, fs.listStatus(tablePath).length); table.getParameters().put(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true"); - client.alter_table(catName, dbName, tableName, table); + client.alter_table(catName, dbName, tableName, table, null); // default catalog in conf is 'hive' but we are using 'cat3' as catName for this test, so msck should not fix // anything for this one runPartitionManagementTask(conf); @@ -364,7 +364,7 @@ public void testPartitionDiscoveryDBPattern() throws TException, IOException { fs.mkdirs(newPart2); assertEquals(5, fs.listStatus(tablePath).length); table.getParameters().put(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true"); - client.alter_table(dbName, tableName, table); + client.alter_table(dbName, tableName, table, null); // no match for this db pattern, so we will see only 3 partitions conf.set(MetastoreConf.ConfVars.PARTITION_MANAGEMENT_DATABASE_PATTERN.getVarname(), "*dbfoo*"); runPartitionManagementTask(conf); @@ -403,7 +403,7 @@ public void testPartitionDiscoveryTablePattern() throws TException, IOException fs.mkdirs(newPart2); assertEquals(5, fs.listStatus(tablePath).length); table.getParameters().put(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true"); - client.alter_table(dbName, tableName, table); + client.alter_table(dbName, tableName, table, null); // no match for this table pattern, so we will see only 3 partitions conf.set(MetastoreConf.ConfVars.PARTITION_MANAGEMENT_TABLE_PATTERN.getVarname(), "*tblfoo*"); runPartitionManagementTask(conf); @@ -446,7 +446,7 @@ public void testPartitionDiscoveryTransactionalTable() table.getParameters().put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true"); table.getParameters().put(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES, TransactionalValidationListener.INSERTONLY_TRANSACTIONAL_PROPERTY); - client.alter_table(dbName, tableName, table); + client.alter_table(dbName, tableName, table, null); runPartitionManagementTask(conf); partitions = client.listPartitions(dbName, tableName, (short) -1); @@ -515,7 +515,7 @@ public void testPartitionRetention() throws TException, IOException, Interrupted assertEquals(5, fs.listStatus(tablePath).length); table.getParameters().put(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true"); table.getParameters().put(PartitionManagementTask.PARTITION_RETENTION_PERIOD_TBLPROPERTY, "20000ms"); - client.alter_table(dbName, tableName, table); + client.alter_table(dbName, tableName, table, null); runPartitionManagementTask(conf); partitions = client.listPartitions(dbName, tableName, (short) -1); @@ -555,7 +555,7 @@ public void testPartitionDiscoverySkipInvalidPath() throws TException, IOExcepti table.getParameters().put(PartitionManagementTask.DISCOVER_PARTITIONS_TBLPROPERTY, "true"); // empty retention period basically means disabled table.getParameters().put(PartitionManagementTask.PARTITION_RETENTION_PERIOD_TBLPROPERTY, ""); - client.alter_table(dbName, tableName, table); + client.alter_table(dbName, tableName, table, null); // there is one partition with invalid path which will get skipped runPartitionManagementTask(conf); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java index b9fa89e..45da635 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java @@ -74,7 +74,7 @@ public void testRetryingHMSHandler() throws Exception { .setDbName(dbName) .setTableName(tblName) .addCol("c1", ColumnType.STRING_TYPE_NAME) - .create(msc, conf); + .create(msc, conf, null); Assert.assertEquals(4, AlternateFailurePreListener.getCallCount()); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java index 03378ba..27b81a5 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java @@ -154,7 +154,7 @@ public void tearDown() throws TException { "Must provide partition values for partitioned table"; tb.addPartCol(partKey, ColumnType.STRING_TYPE_NAME); } - Table table = tb.create(client, conf); + Table table = tb.create(client, conf, null); if (partKey != null) { for (String partVal : partVals) { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java index 8caa929..e5912df 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java @@ -112,21 +112,21 @@ public void setUp() throws Exception { objectStore.createDatabase(db2); // For each database object, create one partitioned and one unpartitioned table db1Utbl1 = createUnpartitionedTableObject(db1); - objectStore.createTable(db1Utbl1); + objectStore.createTable(db1Utbl1, null); db1Ptbl1 = createPartitionedTableObject(db1); - objectStore.createTable(db1Ptbl1); + objectStore.createTable(db1Ptbl1, null); db2Utbl1 = createUnpartitionedTableObject(db2); - objectStore.createTable(db2Utbl1); + objectStore.createTable(db2Utbl1, null); db2Ptbl1 = createPartitionedTableObject(db2); - objectStore.createTable(db2Ptbl1); + objectStore.createTable(db2Ptbl1, null); // Create partitions for cs_db1's partitioned table db1Ptbl1Ptns = createPartitionObjects(db1Ptbl1).getPartitions(); db1Ptbl1PtnNames = createPartitionObjects(db1Ptbl1).getPartitionNames(); - objectStore.addPartitions(db1Ptbl1.getCatName(), db1Ptbl1.getDbName(), db1Ptbl1.getTableName(), db1Ptbl1Ptns); + objectStore.addPartitions(db1Ptbl1.getCatName(), db1Ptbl1.getDbName(), db1Ptbl1.getTableName(), db1Ptbl1Ptns, null); // Create partitions for cs_db2's partitioned table db2Ptbl1Ptns = createPartitionObjects(db2Ptbl1).getPartitions(); db2Ptbl1PtnNames = createPartitionObjects(db2Ptbl1).getPartitionNames(); - objectStore.addPartitions(db2Ptbl1.getCatName(), db2Ptbl1.getDbName(), db2Ptbl1.getTableName(), db2Ptbl1Ptns); + objectStore.addPartitions(db2Ptbl1.getCatName(), db2Ptbl1.getDbName(), db2Ptbl1.getTableName(), db2Ptbl1Ptns, null); objectStore.shutdown(); } @@ -167,7 +167,7 @@ public void testPrewarm() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); List allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); Assert.assertEquals(2, allDatabases.size()); Assert.assertTrue(allDatabases.contains(db1.getName())); @@ -213,7 +213,7 @@ public void testPrewarmBlackList() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); SharedCache sharedCache = CachedStore.getSharedCache(); // cachedStore.getAllTables falls back to objectStore when whitelist/blacklist is set List db1Tables = sharedCache.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); @@ -237,7 +237,7 @@ public void testPrewarmWhiteList() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); SharedCache sharedCache = CachedStore.getSharedCache(); // cachedStore.getAllTables falls back to objectStore when whitelist/blacklist is set List db1Tables = sharedCache.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); @@ -261,7 +261,7 @@ public void testPrewarmMemoryEstimation() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); SharedCache sharedCache = CachedStore.getSharedCache(); List db1Tables = sharedCache.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(2, db1Tables.size()); @@ -282,7 +282,7 @@ public void testCacheUpdate() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Drop basedb1's unpartitioned table objectStore.dropTable(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName()); Deadline.startTimer(""); @@ -316,16 +316,16 @@ public void testCacheUpdate() throws Exception { // Create a new unpartitioned table under basedb1 Table db1Utbl2 = createUnpartitionedTableObject(db1); db1Utbl2.setTableName(db1.getName() + "_unptntbl2"); - objectStore.createTable(db1Utbl2); + objectStore.createTable(db1Utbl2, null); // Add a new partition to db1PartitionedTable // Create partitions for cs_db1's partitioned table db1Ptbl1Ptns = createPartitionObjects(db1Ptbl1).getPartitions(); Deadline.startTimer(""); - objectStore.addPartition(db1Ptbl1Ptns.get(0)); - objectStore.addPartition(db1Ptbl1Ptns.get(1)); - objectStore.addPartition(db1Ptbl1Ptns.get(2)); - objectStore.addPartition(db1Ptbl1Ptns.get(3)); - objectStore.addPartition(db1Ptbl1Ptns.get(4)); + objectStore.addPartition(db1Ptbl1Ptns.get(0), null); + objectStore.addPartition(db1Ptbl1Ptns.get(1), null); + objectStore.addPartition(db1Ptbl1Ptns.get(2), null); + objectStore.addPartition(db1Ptbl1Ptns.get(3), null); + objectStore.addPartition(db1Ptbl1Ptns.get(4), null); updateCache(cachedStore); allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); Assert.assertEquals(2, allDatabases.size()); @@ -371,7 +371,7 @@ public void testCreateAndGetDatabase() throws Exception { db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Read database via CachedStore Database dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); Assert.assertEquals(db, dbRead); @@ -411,7 +411,7 @@ public void testDropDatabase() throws Exception { db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Read database via CachedStore Database dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); Assert.assertEquals(db, dbRead); @@ -452,7 +452,7 @@ public void testAlterDatabase() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Read database via CachedStore List allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); Assert.assertEquals(2, allDatabases.size()); @@ -492,7 +492,7 @@ public void testCreateAndGetTable() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Read database via CachedStore List allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); Assert.assertEquals(2, allDatabases.size()); @@ -504,7 +504,7 @@ public void testCreateAndGetTable() throws Exception { // Create a new unpartitioned table under db1 Table db1Utbl2 = createUnpartitionedTableObject(db1); db1Utbl2.setTableName(db1.getName() + "_unptntbl2"); - cachedStore.createTable(db1Utbl2); + cachedStore.createTable(db1Utbl2, null); db1Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(3, db1Tables.size()); db1Utbl2 = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl2.getDbName(), db1Utbl2.getTableName()); @@ -513,7 +513,7 @@ public void testCreateAndGetTable() throws Exception { // Create a new unpartitioned table under basedb2 via ObjectStore Table db2Utbl2 = createUnpartitionedTableObject(db2); db2Utbl2.setTableName(db2.getName() + "_unptntbl2"); - objectStore.createTable(db2Utbl2); + objectStore.createTable(db2Utbl2, null); db2Utbl2 = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl2.getDbName(), db2Utbl2.getTableName()); updateCache(cachedStore); db2Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName()); @@ -541,7 +541,7 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); SharedCache sharedCache = CachedStore.getSharedCache(); List db1Tables = sharedCache.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(2, db1Tables.size()); @@ -567,7 +567,7 @@ public void testGetAllTablesBlacklist() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); SharedCache sharedCache = CachedStore.getSharedCache(); // cachedStore.getAllTables falls back to objectStore when whitelist/blacklist is set List db1Tables = sharedCache.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); @@ -593,7 +593,7 @@ public void testGetAllTablesWhitelist() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); SharedCache sharedCache = CachedStore.getSharedCache(); // cachedStore.getAllTables falls back to objectStore when whitelist/blacklist is set List db1Tables = sharedCache.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); @@ -617,7 +617,7 @@ public void testGetTableByPattern() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); List db1Tables = cachedStore.getTables(DEFAULT_CATALOG_NAME, db1.getName(), "cs_db1.*"); Assert.assertEquals(2, db1Tables.size()); db1Tables = cachedStore.getTables(DEFAULT_CATALOG_NAME, db1.getName(), "cs_db1.un*"); @@ -639,7 +639,7 @@ public void testAlterTable() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); List db1Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(2, db1Tables.size()); List db2Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName()); @@ -683,7 +683,7 @@ public void testDropTable() throws Exception { ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); List db1Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName()); Assert.assertEquals(2, db1Tables.size()); List db2Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName()); @@ -807,15 +807,15 @@ public void testSharedStoreTable() { newTbl1.setSd(newSd1); newTbl1.setPartitionKeys(new ArrayList<>()); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl1", tbl1); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl2", tbl2); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl3", tbl3); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db2", "tbl1", tbl1); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl1", tbl1, null, true); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl2", tbl2, null, true); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl3", tbl3, null, true); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db2", "tbl1", tbl1, null, true); Assert.assertEquals(sharedCache.getCachedTableCount(), 4); Assert.assertEquals(sharedCache.getSdCache().size(), 2); - Table t = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1"); + Table t = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1", null); Assert.assertEquals(t.getSd().getLocation(), "loc1"); sharedCache.removeTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1"); @@ -855,9 +855,9 @@ public void testSharedStorePartition() { cols.add(col2); List ptnCols = new ArrayList(); Table tbl1 = createTestTbl(dbName, tbl1Name, owner, cols, ptnCols); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, tbl1); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, tbl1, null, true); Table tbl2 = createTestTbl(dbName, tbl2Name, owner, cols, ptnCols); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, tbl2); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, tbl2, null, true); Partition part1 = new Partition(); StorageDescriptor sd1 = new StorageDescriptor(); @@ -909,10 +909,10 @@ public void testSharedStorePartition() { newPart1.setSd(newSd1); newPart1.setValues(Arrays.asList("201701")); - sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part1); - sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part2); - sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part3); - sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, part1); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part1, null); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part2, null); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part3, null); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, part1, null); Partition t = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701")); Assert.assertEquals(t.getSd().getLocation(), "loc1"); @@ -921,7 +921,7 @@ public void testSharedStorePartition() { t = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, Arrays.asList("201701")); Assert.assertNull(t); - sharedCache.alterPartitionInCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701"), newPart1); + sharedCache.alterPartitionInCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701"), newPart1, null); t = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701")); Assert.assertEquals(t.getSd().getLocation(), "loc1new"); cachedStore.shutdown(); @@ -954,7 +954,7 @@ public void testAggrStatsRepeatedRead() throws Exception { Table tbl = new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString()); tbl.setCatName(DEFAULT_CATALOG_NAME); - cachedStore.createTable(tbl); + cachedStore.createTable(tbl, null); List partVals1 = new ArrayList<>(); partVals1.add("1"); @@ -963,10 +963,10 @@ public void testAggrStatsRepeatedRead() throws Exception { Partition ptn1 = new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>()); ptn1.setCatName(DEFAULT_CATALOG_NAME); - cachedStore.addPartition(ptn1); + cachedStore.addPartition(ptn1, null); Partition ptn2 = new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>()); ptn2.setCatName(DEFAULT_CATALOG_NAME); - cachedStore.addPartition(ptn2); + cachedStore.addPartition(ptn2, null); ColumnStatistics stats = new ColumnStatistics(); ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName); @@ -1037,7 +1037,7 @@ public void testPartitionAggrStats() throws Exception { Table tbl = new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString()); tbl.setCatName(DEFAULT_CATALOG_NAME); - cachedStore.createTable(tbl); + cachedStore.createTable(tbl, null); List partVals1 = new ArrayList<>(); partVals1.add("1"); @@ -1046,10 +1046,10 @@ public void testPartitionAggrStats() throws Exception { Partition ptn1 = new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>()); ptn1.setCatName(DEFAULT_CATALOG_NAME); - cachedStore.addPartition(ptn1); + cachedStore.addPartition(ptn1, null); Partition ptn2 = new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>()); ptn2.setCatName(DEFAULT_CATALOG_NAME); - cachedStore.addPartition(ptn2); + cachedStore.addPartition(ptn2, null); ColumnStatistics stats = new ColumnStatistics(); ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName); @@ -1116,7 +1116,7 @@ public void testPartitionAggrStatsBitVector() throws Exception { Table tbl = new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString()); tbl.setCatName(DEFAULT_CATALOG_NAME); - cachedStore.createTable(tbl); + cachedStore.createTable(tbl, null); List partVals1 = new ArrayList<>(); partVals1.add("1"); @@ -1125,10 +1125,10 @@ public void testPartitionAggrStatsBitVector() throws Exception { Partition ptn1 = new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>()); ptn1.setCatName(DEFAULT_CATALOG_NAME); - cachedStore.addPartition(ptn1); + cachedStore.addPartition(ptn1, null); Partition ptn2 = new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>()); ptn2.setCatName(DEFAULT_CATALOG_NAME); - cachedStore.addPartition(ptn2); + cachedStore.addPartition(ptn2, null); ColumnStatistics stats = new ColumnStatistics(); ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName); @@ -1236,7 +1236,7 @@ public Object call() { Callable c = new Callable() { public Object call() { Table tbl = createTestTbl(dbNames.get(0), tblName, "user1", cols, ptnCols); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, tbl); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, tbl, null, true); return null; } }; @@ -1244,7 +1244,7 @@ public Object call() { } executor.invokeAll(tasks); for (String tblName : tblNames) { - Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName); + Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, null); Assert.assertNotNull(tbl); Assert.assertEquals(tblName, tbl.getTableName()); } @@ -1253,14 +1253,14 @@ public Object call() { List ptnVals = new ArrayList(Arrays.asList("aaa", "bbb", "ccc", "ddd", "eee")); tasks.clear(); for (String tblName : tblNames) { - Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName); + Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, null); for (String ptnVal : ptnVals) { Map partParams = new HashMap(); Callable c = new Callable() { public Object call() { Partition ptn = new Partition(Arrays.asList(ptnVal), dbNames.get(0), tblName, 0, 0, tbl.getSd(), partParams); - sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, ptn); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, ptn, null); return null; } }; @@ -1296,14 +1296,14 @@ public Object call() { } } for (String tblName : addPtnTblNames) { - Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName); + Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, null); for (String ptnVal : newPtnVals) { Map partParams = new HashMap(); Callable c = new Callable() { public Object call() { Partition ptn = new Partition(Arrays.asList(ptnVal), dbNames.get(0), tblName, 0, 0, tbl.getSd(), partParams); - sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, ptn); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, ptn, null); return null; } }; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java index 423dce8..583af42 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java @@ -87,7 +87,7 @@ public void defaultHiveOnly() throws Exception { CachedStore.stopCacheUpdateService(1); cachedStore.resetCatalogCache(); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Only the hive catalog should be cached List cachedCatalogs = cachedStore.getCatalogs(); @@ -107,7 +107,7 @@ public void cacheAll() throws Exception { // prewarm gets the conf object cachedStore.resetCatalogCache(); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // All the catalogs should be cached List cachedCatalogs = cachedStore.getCatalogs(); @@ -130,7 +130,7 @@ public void cacheSome() throws Exception { // prewarm gets the conf object cachedStore.resetCatalogCache(); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // All the catalogs should be cached List cachedCatalogs = cachedStore.getCatalogs(); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java index a15f5ea..08a1c08 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java @@ -113,7 +113,7 @@ public void testAddPartition() throws Exception { Table table = createTable(); Partition partition = buildPartition(Lists.newArrayList(DEFAULT_YEAR_VALUE), getYearPartCol(), 1); - Partition resultPart = client.add_partition(partition); + Partition resultPart = client.add_partition(partition, null); Assert.assertNotNull(resultPart); verifyPartition(table, "year=2017", Lists.newArrayList(DEFAULT_YEAR_VALUE), 1); } @@ -125,7 +125,7 @@ public void testAddPartitionTwoValues() throws Exception { Table table = createTable(DB_NAME, TABLE_NAME, getYearAndMonthPartCols(), tableLocation); Partition partition = buildPartition(Lists.newArrayList("2017", "march"), getYearAndMonthPartCols(), 1); - client.add_partition(partition); + client.add_partition(partition, null); verifyPartition(table, "year=2017/month=march", Lists.newArrayList("2017", "march"), 1); } @@ -151,7 +151,7 @@ public void addPartitionOtherCatalog() throws TException { .addCol("id", "int") .addCol("name", "string") .addPartCol("partcol", "string") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Partition[] parts = new Partition[5]; for (int i = 0; i < parts.length; i++) { @@ -160,9 +160,9 @@ public void addPartitionOtherCatalog() throws TException { .addValue("a" + i) .build(metaStore.getConf()); } - client.add_partition(parts[0]); - Assert.assertEquals(2, client.add_partitions(Arrays.asList(parts[1], parts[2]))); - client.add_partitions(Arrays.asList(parts), true, false); + client.add_partition(parts[0], null); + Assert.assertEquals(2, client.add_partitions(Arrays.asList(parts[1], parts[2]), null)); + client.add_partitions(Arrays.asList(parts), true, false, null); for (int i = 0; i < parts.length; i++) { Partition fetched = client.getPartition(catName, dbName, tableName, @@ -184,7 +184,7 @@ public void noSuchCatalog() throws TException { .addCol("id", "int") .addCol("name", "string") .addPartCol("partcol", "string") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Partition part = new PartitionBuilder() .inTable(table) @@ -192,7 +192,7 @@ public void noSuchCatalog() throws TException { .build(metaStore.getConf()); // Explicitly mis-set the catalog name part.setCatName("nosuch"); - client.add_partition(part); + client.add_partition(part, null); } @Test @@ -209,7 +209,7 @@ public void testAddPartitionWithDefaultAttributes() throws Exception { .addCol("test_value", "string", "test col value") .build(metaStore.getConf()); - client.add_partition(partition); + client.add_partition(partition, null); // Check if the default values are set for all unfilled attributes Partition part = client.getPartition(DB_NAME, TABLE_NAME, "year=2017"); @@ -232,7 +232,7 @@ public void testAddPartitionUpperCase() throws Exception { createTable(DB_NAME, TABLE_NAME, getMonthPartCol(), tableLocation); Partition partition = buildPartition(Lists.newArrayList("APRIL"), getMonthPartCol(), 1); - client.add_partition(partition); + client.add_partition(partition, null); Partition part = client.getPartition(DB_NAME, TABLE_NAME, "month=APRIL"); Assert.assertNotNull(part); @@ -254,7 +254,7 @@ public void testAddPartitionUpperCaseDBAndTableName() throws Exception { // Create partition with table name 'TEST_ADD_PART_TABLE' and db name 'TEST_PARTITION_DB' Partition partition = buildPartition(DB_NAME.toUpperCase(), tableName.toUpperCase(), "2013", tableLocation + "/year=2013"); - client.add_partition(partition); + client.add_partition(partition, null); // Validate the partition attributes // The db and table name should be all lower case: 'test_partition_db' and @@ -274,42 +274,42 @@ public void testAddPartitionUpperCaseDBAndTableName() throws Exception { public void testAddPartitionNonExistingDb() throws Exception { Partition partition = buildPartition("nonexistingdb", TABLE_NAME, DEFAULT_YEAR_VALUE); - client.add_partition(partition); + client.add_partition(partition, null); } @Test(expected = InvalidObjectException.class) public void testAddPartitionNonExistingTable() throws Exception { Partition partition = buildPartition(DB_NAME, "nonexistingtable", DEFAULT_YEAR_VALUE); - client.add_partition(partition); + client.add_partition(partition, null); } @Test(expected = MetaException.class) public void testAddPartitionNullDb() throws Exception { Partition partition = buildPartition(null, TABLE_NAME, DEFAULT_YEAR_VALUE); - client.add_partition(partition); + client.add_partition(partition, null); } @Test(expected = MetaException.class) public void testAddPartitionNullTable() throws Exception { Partition partition = buildPartition(DB_NAME, null, DEFAULT_YEAR_VALUE); - client.add_partition(partition); + client.add_partition(partition, null); } @Test(expected = InvalidObjectException.class) public void testAddPartitionEmptyDb() throws Exception { Partition partition = buildPartition("", TABLE_NAME, DEFAULT_YEAR_VALUE); - client.add_partition(partition); + client.add_partition(partition, null); } @Test(expected = InvalidObjectException.class) public void testAddPartitionEmptyTable() throws Exception { Partition partition = buildPartition(DB_NAME, "", DEFAULT_YEAR_VALUE); - client.add_partition(partition); + client.add_partition(partition, null); } @Test(expected = AlreadyExistsException.class) @@ -318,8 +318,8 @@ public void testAddPartitionAlreadyExists() throws Exception { createTable(); Partition partition1 = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); Partition partition2 = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); - client.add_partition(partition1); - client.add_partition(partition2); + client.add_partition(partition1, null); + client.add_partition(partition2, null); } @Test @@ -330,8 +330,8 @@ public void testAddPartitionsWithSameNameCaseSensitive() throws Exception { Partition partition1 = buildPartition(Lists.newArrayList("may"), getMonthPartCol(), 1); Partition partition2 = buildPartition(Lists.newArrayList("MAY"), getMonthPartCol(), 2); - client.add_partition(partition1); - client.add_partition(partition2); + client.add_partition(partition1, null); + client.add_partition(partition2, null); Partition part = client.getPartition(DB_NAME, TABLE_NAME, "month=MAY"); Assert.assertEquals(DEFAULT_PARAM_VALUE + "2", @@ -346,7 +346,7 @@ public void testAddPartitionNullSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.setSd(null); - client.add_partition(partition); + client.add_partition(partition, null); } @Test @@ -355,7 +355,7 @@ public void testAddPartitionNullColsInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.getSd().setCols(null); - client.add_partition(partition); + client.add_partition(partition, null); // TODO: Not sure that this is the correct behavior. It doesn't make sense to create the // partition without column info. This should be investigated later. @@ -371,7 +371,7 @@ public void testAddPartitionEmptyColsInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.getSd().setCols(new ArrayList<>()); - client.add_partition(partition); + client.add_partition(partition, null); // TODO: Not sure that this is the correct behavior. It doesn't make sense to create the // partition without column info. This should be investigated later. @@ -387,7 +387,7 @@ public void testAddPartitionNullColTypeInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.getSd().getCols().get(0).setType(null); - client.add_partition(partition); + client.add_partition(partition, null); } @Test(expected = MetaException.class) @@ -396,7 +396,7 @@ public void testAddPartitionNullColNameInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.getSd().getCols().get(0).setName(null); - client.add_partition(partition); + client.add_partition(partition, null); } @Test @@ -405,7 +405,7 @@ public void testAddPartitionInvalidColTypeInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.getSd().getCols().get(0).setType("xyz"); - client.add_partition(partition); + client.add_partition(partition, null); // TODO: Not sure that this is the correct behavior. It doesn't make sense to create the // partition with column with invalid type. This should be investigated later. @@ -421,7 +421,7 @@ public void testAddPartitionEmptySerdeInfo() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.getSd().setSerdeInfo(null); - client.add_partition(partition); + client.add_partition(partition, null); } @Test @@ -429,7 +429,7 @@ public void testAddPartitionNullLocation() throws Exception { createTable(DB_NAME, TABLE_NAME, metaStore.getWarehouseRoot() + "/addparttest2"); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE, null); - client.add_partition(partition); + client.add_partition(partition, null); Partition part = client.getPartition(DB_NAME, TABLE_NAME, "year=2017"); Assert.assertEquals(metaStore.getWarehouseRoot() + "/addparttest2/year=2017", part.getSd().getLocation()); @@ -441,7 +441,7 @@ public void testAddPartitionEmptyLocation() throws Exception { createTable(DB_NAME, TABLE_NAME, metaStore.getWarehouseRoot() + "/addparttest3"); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE, ""); - client.add_partition(partition); + client.add_partition(partition, null); Partition part = client.getPartition(DB_NAME, TABLE_NAME, "year=2017"); Assert.assertEquals(metaStore.getWarehouseRoot() + "/addparttest3/year=2017", part.getSd().getLocation()); @@ -453,7 +453,7 @@ public void testAddPartitionNullLocationInTableToo() throws Exception { createTable(DB_NAME, TABLE_NAME, null); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE, null); - client.add_partition(partition); + client.add_partition(partition, null); Partition part = client.getPartition(DB_NAME, TABLE_NAME, "year=2017"); Assert.assertEquals( metaStore.getWarehouseRoot() + "/test_partition_db.db/test_partition_table/year=2017", @@ -467,7 +467,7 @@ public void testAddPartitionForView() throws Exception { String tableName = "test_add_partition_view"; createView(tableName); Partition partition = buildPartition(DB_NAME, tableName, DEFAULT_YEAR_VALUE); - client.add_partition(partition); + client.add_partition(partition, null); } @Test @@ -478,7 +478,7 @@ public void testAddPartitionsForViewNullPartLocation() throws Exception { Partition partition = buildPartition(DB_NAME, tableName, DEFAULT_YEAR_VALUE); partition.getSd().setLocation(null); List partitions = Lists.newArrayList(partition); - client.add_partitions(partitions); + client.add_partitions(partitions, null); Partition part = client.getPartition(DB_NAME, tableName, "year=2017"); Assert.assertNull(part.getSd().getLocation()); } @@ -491,7 +491,7 @@ public void testAddPartitionsForViewNullPartSd() throws Exception { Partition partition = buildPartition(DB_NAME, tableName, DEFAULT_YEAR_VALUE); partition.setSd(null); List partitions = Lists.newArrayList(partition); - client.add_partitions(partitions); + client.add_partitions(partitions, null); Partition part = client.getPartition(DB_NAME, tableName, "year=2017"); Assert.assertNull(part.getSd()); } @@ -504,7 +504,7 @@ public void testAddPartitionForExternalTable() throws Exception { String partitionLocation = tableLocation + "/addparttest"; createExternalTable(tableName, tableLocation); Partition partition = buildPartition(DB_NAME, tableName, DEFAULT_YEAR_VALUE, partitionLocation); - client.add_partition(partition); + client.add_partition(partition, null); Partition resultPart = client.getPartition(DB_NAME, tableName, Lists.newArrayList(DEFAULT_YEAR_VALUE)); Assert.assertNotNull(resultPart); @@ -518,7 +518,7 @@ public void testAddPartitionForExternalTableNullLocation() throws Exception { String tableName = "part_add_ext_table"; createExternalTable(tableName, null); Partition partition = buildPartition(DB_NAME, tableName, DEFAULT_YEAR_VALUE, null); - client.add_partition(partition); + client.add_partition(partition, null); Partition resultPart = client.getPartition(DB_NAME, tableName, Lists.newArrayList(DEFAULT_YEAR_VALUE)); Assert.assertNotNull(resultPart); @@ -534,7 +534,7 @@ public void testAddPartitionTooManyValues() throws Exception { createTable(); Partition partition = buildPartition(Lists.newArrayList(DEFAULT_YEAR_VALUE, "march"), getYearAndMonthPartCols(), 1); - client.add_partition(partition); + client.add_partition(partition, null); } @Test(expected = MetaException.class) @@ -545,9 +545,9 @@ public void testAddPartitionNoPartColOnTable() throws Exception { .setTableName(TABLE_NAME) .addCol("test_id", "int", "test col id") .addCol("test_value", "string", "test col value") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); - client.add_partition(partition); + client.add_partition(partition, null); } @Test(expected=MetaException.class) @@ -560,7 +560,7 @@ public void testAddPartitionNoColInPartition() throws Exception { .addValue(DEFAULT_YEAR_VALUE) .setLocation(metaStore.getWarehouseRoot() + "/addparttest") .build(metaStore.getConf()); - client.add_partition(partition); + client.add_partition(partition, null); } @Test @@ -574,7 +574,7 @@ public void testAddPartitionDifferentNamesAndTypesInColAndTableCol() throws Exce .addCol("time", "int") .build(metaStore.getConf()); - client.add_partition(partition); + client.add_partition(partition, null); Partition part = client.getPartition(DB_NAME, TABLE_NAME, "year=1000"); Assert.assertNotNull(part); Assert.assertEquals(TABLE_NAME, part.getTableName()); @@ -592,7 +592,7 @@ public void testAddPartitionNoValueInPartition() throws Exception { .addCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .setLocation(metaStore.getWarehouseRoot() + "/addparttest") .build(metaStore.getConf()); - client.add_partition(partition); + client.add_partition(partition, null); } @Test(expected = MetaException.class) @@ -600,13 +600,13 @@ public void testAddPartitionMorePartColInTable() throws Exception { createTable(DB_NAME, TABLE_NAME, getYearAndMonthPartCols(), null); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); - client.add_partition(partition); + client.add_partition(partition, null); } @Test(expected = MetaException.class) public void testAddPartitionNullPartition() throws Exception { - client.add_partition(null); + client.add_partition(null, null); } @Test(expected = MetaException.class) @@ -615,7 +615,7 @@ public void testAddPartitionNullValues() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, null); partition.setValues(null); - client.add_partition(partition); + client.add_partition(partition, null); } @Test @@ -623,7 +623,7 @@ public void testAddPartitionEmptyValue() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, ""); - client.add_partition(partition); + client.add_partition(partition, null); List partitionNames = client.listPartitionNames(DB_NAME, TABLE_NAME, (short) 10); Assert.assertNotNull(partitionNames); Assert.assertTrue(partitionNames.size() == 1); @@ -636,7 +636,7 @@ public void testAddPartitionSetInvalidLocation() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE, "%^#$$%#$testlocation/part1"); - client.add_partition(partition); + client.add_partition(partition, null); } // Tests for int add_partitions(List partitions) method @@ -653,7 +653,7 @@ public void testAddPartitions() throws Exception { partitions.add(partition1); partitions.add(partition2); partitions.add(partition3); - int numberOfCreatedParts = client.add_partitions(partitions); + int numberOfCreatedParts = client.add_partitions(partitions, null); Assert.assertEquals(3, numberOfCreatedParts); verifyPartition(table, "year=2017", Lists.newArrayList("2017"), 1); @@ -678,7 +678,7 @@ public void testAddPartitionsMultipleValues() throws Exception { partitions.add(partition1); partitions.add(partition2); partitions.add(partition3); - client.add_partitions(partitions); + client.add_partitions(partitions, null); verifyPartition(table, "year=2017/month=march", Lists.newArrayList("2017", "march"), 1); verifyPartition(table, "year=2017/month=june", Lists.newArrayList("2017", "june"), 2); @@ -699,7 +699,7 @@ public void testAddPartitionsWithDefaultAttributes() throws Exception { .addCol("test_value", "string", "test col value") .build(metaStore.getConf()); - client.add_partitions(Lists.newArrayList(partition)); + client.add_partitions(Lists.newArrayList(partition), null); // Check if the default values are set for all unfilled attributes List parts = @@ -731,7 +731,7 @@ public void testAddPartitionsUpperCaseDBAndTableName() throws Exception { tableLocation + "/year=2017"); Partition partition2 = buildPartition(DB_NAME.toUpperCase(), tableName.toUpperCase(), "2018", tableLocation + "/year=2018"); - client.add_partitions(Lists.newArrayList(partition1, partition2)); + client.add_partitions(Lists.newArrayList(partition1, partition2), null); // Validate the partitions attributes // The db and table name should be all lower case: 'test_partition_db' and @@ -766,7 +766,7 @@ public void testAddPartitionsUpperCaseDBAndTableNameInOnePart() throws Exception tableLocation + "/year=2018"); Partition partition3 = buildPartition(DB_NAME, tableName, "2019", tableLocation + "/year=2019"); try { - client.add_partitions(Lists.newArrayList(partition1, partition2, partition3)); + client.add_partitions(Lists.newArrayList(partition1, partition2, partition3), null); Assert.fail("MetaException should have been thrown."); } catch (MetaException e) { // Expected exception @@ -780,13 +780,13 @@ public void testAddPartitionsUpperCaseDBAndTableNameInOnePart() throws Exception @Test(expected = MetaException.class) public void testAddPartitionsNullList() throws Exception { - client.add_partitions(null); + client.add_partitions(null, null); } @Test public void testAddPartitionsEmptyList() throws Exception { - client.add_partitions(new ArrayList<>()); + client.add_partitions(new ArrayList<>(), null); } @Test(expected = MetaException.class) @@ -805,7 +805,7 @@ public void testAddPartitionsDifferentTable() throws Exception { partitions.add(partition1); partitions.add(partition2); partitions.add(partition3); - client.add_partitions(partitions); + client.add_partitions(partitions, null); } @Test @@ -824,7 +824,7 @@ public void testAddPartitionsDifferentDBs() throws Exception { partitions.add(partition2); partitions.add(partition3); try { - client.add_partitions(partitions); + client.add_partitions(partitions, null); Assert.fail("MetaException should have been thrown."); } catch (MetaException e) { // Expected exception @@ -840,7 +840,7 @@ public void testAddPartitionsDuplicateInTheList() throws Exception { Lists.newArrayList("2014", "2015", "2017", "2017", "2018", "2019")); try { - client.add_partitions(partitions); + client.add_partitions(partitions, null); Assert.fail("MetaException should have happened."); } catch (MetaException e) { // Expected exception @@ -867,7 +867,7 @@ public void testAddPartitionsWithSameNameInTheListCaseSensitive() throws Excepti partitions.add(partition1); partitions.add(partition2); partitions.add(partition3); - client.add_partitions(partitions); + client.add_partitions(partitions, null); List parts = client.listPartitionNames(DB_NAME, TABLE_NAME, MAX); Assert.assertEquals(3, parts.size()); @@ -883,13 +883,13 @@ public void testAddPartitionsAlreadyExists() throws Exception { String tableLocation = metaStore.getWarehouseRoot() + "/" + TABLE_NAME; Partition partition = buildPartition(DB_NAME, TABLE_NAME, "2016", tableLocation + "/year=2016a"); - client.add_partition(partition); + client.add_partition(partition, null); List partitions = buildPartitions(DB_NAME, TABLE_NAME, Lists.newArrayList("2014", "2015", "2016", "2017", "2018")); try { - client.add_partitions(partitions); + client.add_partitions(partitions, null); Assert.fail("AlreadyExistsException should have happened."); } catch (AlreadyExistsException e) { // Expected exception @@ -914,7 +914,7 @@ public void testAddPartitionsNonExistingTable() throws Exception { List partitions = new ArrayList<>(); partitions.add(partition1); partitions.add(partition2); - client.add_partitions(partitions); + client.add_partitions(partitions, null); } @Test(expected = InvalidObjectException.class) @@ -927,7 +927,7 @@ public void testAddPartitionsNonExistingDb() throws Exception { List partitions = new ArrayList<>(); partitions.add(partition1); partitions.add(partition2); - client.add_partitions(partitions); + client.add_partitions(partitions, null); } @Test(expected = MetaException.class) @@ -941,7 +941,7 @@ public void testAddPartitionsNullDb() throws Exception { List partitions = new ArrayList<>(); partitions.add(partition1); partitions.add(partition2); - client.add_partitions(partitions); + client.add_partitions(partitions, null); } @Test(expected = MetaException.class) @@ -954,7 +954,7 @@ public void testAddPartitionsEmptyDb() throws Exception { List partitions = new ArrayList<>(); partitions.add(partition1); partitions.add(partition2); - client.add_partitions(partitions); + client.add_partitions(partitions, null); } @Test(expected = MetaException.class) @@ -968,7 +968,7 @@ public void testAddPartitionsNullTable() throws Exception { List partitions = new ArrayList<>(); partitions.add(partition1); partitions.add(partition2); - client.add_partitions(partitions); + client.add_partitions(partitions, null); } @Test(expected = MetaException.class) @@ -981,7 +981,7 @@ public void testAddPartitionsEmptyTable() throws Exception { List partitions = new ArrayList<>(); partitions.add(partition1); partitions.add(partition2); - client.add_partitions(partitions); + client.add_partitions(partitions, null); } @Test @@ -1009,7 +1009,7 @@ public void testAddPartitionsOneInvalid() throws Exception { partitions.add(partition5); try { - client.add_partitions(partitions); + client.add_partitions(partitions, null); Assert.fail("MetaException should have happened."); } catch (MetaException e) { // Expected exception @@ -1031,7 +1031,7 @@ public void testAddPartitionsNullSd() throws Exception { partition.setSd(null); List partitions = new ArrayList<>(); partitions.add(partition); - client.add_partitions(partitions); + client.add_partitions(partitions, null); } @Test @@ -1040,7 +1040,7 @@ public void testAddPartitionsNullColsInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.getSd().setCols(null); - client.add_partitions(Lists.newArrayList(partition)); + client.add_partitions(Lists.newArrayList(partition), null); // TODO: Not sure that this is the correct behavior. It doesn't make sense to create the // partition without column info. This should be investigated later. @@ -1056,7 +1056,7 @@ public void testAddPartitionsEmptyColsInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.getSd().setCols(new ArrayList<>()); - client.add_partitions(Lists.newArrayList(partition)); + client.add_partitions(Lists.newArrayList(partition), null); // TODO: Not sure that this is the correct behavior. It doesn't make sense to create the // partition without column info. This should be investigated later. @@ -1072,7 +1072,7 @@ public void testAddPartitionsNullColTypeInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.getSd().getCols().get(0).setType(null); - client.add_partitions(Lists.newArrayList(partition)); + client.add_partitions(Lists.newArrayList(partition), null); } @Test(expected = MetaException.class) @@ -1081,7 +1081,7 @@ public void testAddPartitionsNullColNameInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.getSd().getCols().get(0).setName(null); - client.add_partitions(Lists.newArrayList(partition)); + client.add_partitions(Lists.newArrayList(partition), null); } @Test @@ -1090,7 +1090,7 @@ public void testAddPartitionsInvalidColTypeInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.getSd().getCols().get(0).setType("xyz"); - client.add_partitions(Lists.newArrayList(partition)); + client.add_partitions(Lists.newArrayList(partition), null); // TODO: Not sure that this is the correct behavior. It doesn't make sense to create the // partition with column with invalid type. This should be investigated later. @@ -1106,7 +1106,7 @@ public void testAddPartitionsEmptySerdeInfo() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.getSd().setSerdeInfo(null); - client.add_partitions(Lists.newArrayList(partition)); + client.add_partitions(Lists.newArrayList(partition), null); } @Test @@ -1119,7 +1119,7 @@ public void testAddPartitionNullAndEmptyLocation() throws Exception { partitions.add(partition1); partitions.add(partition2); - client.add_partitions(partitions); + client.add_partitions(partitions, null); Partition part1 = client.getPartition(DB_NAME, TABLE_NAME, "year=2017"); Assert.assertEquals(metaStore.getWarehouseRoot() + "/addparttest2/year=2017", @@ -1138,7 +1138,7 @@ public void testAddPartitionsNullLocationInTableToo() throws Exception { List partitions = new ArrayList<>(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE, null); partitions.add(partition); - client.add_partitions(partitions); + client.add_partitions(partitions, null); Partition part = client.getPartition(DB_NAME, TABLE_NAME, "year=2017"); Assert.assertEquals( @@ -1154,7 +1154,7 @@ public void testAddPartitionsForView() throws Exception { createView(tableName); Partition partition = buildPartition(DB_NAME, tableName, DEFAULT_YEAR_VALUE); List partitions = Lists.newArrayList(partition); - client.add_partitions(partitions); + client.add_partitions(partitions, null); } @Test @@ -1168,7 +1168,7 @@ public void testAddPartitionsForExternalTable() throws Exception { Partition partition1 = buildPartition(DB_NAME, tableName, "2017", location1); Partition partition2 = buildPartition(DB_NAME, tableName, "2018", location2); List partitions = Lists.newArrayList(partition1, partition2); - client.add_partitions(partitions); + client.add_partitions(partitions, null); List resultParts = client.getPartitionsByNames(DB_NAME, tableName, Lists.newArrayList("year=2017", "year=2018")); @@ -1191,7 +1191,7 @@ public void testAddPartitionsForExternalTableNullLocation() throws Exception { Partition partition1 = buildPartition(DB_NAME, tableName, "2017", null); Partition partition2 = buildPartition(DB_NAME, tableName, "2018", null); List partitions = Lists.newArrayList(partition1, partition2); - client.add_partitions(partitions); + client.add_partitions(partitions, null); List resultParts = client.getPartitionsByNames(DB_NAME, tableName, Lists.newArrayList("year=2017", "year=2018")); @@ -1221,7 +1221,7 @@ public void testAddPartitionsNoValueInPartition() throws Exception { .build(metaStore.getConf()); List partitions = new ArrayList<>(); partitions.add(partition); - client.add_partitions(partitions); + client.add_partitions(partitions, null); } @Test(expected=MetaException.class) @@ -1231,7 +1231,7 @@ public void testAddPartitionsMorePartColInTable() throws Exception { Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); List partitions = new ArrayList<>(); partitions.add(partition); - client.add_partitions(partitions); + client.add_partitions(partitions, null); } @Test(expected = MetaException.class) @@ -1239,7 +1239,7 @@ public void testAddPartitionsNullPartition() throws Exception { List partitions = new ArrayList<>(); partitions.add(null); - client.add_partitions(partitions); + client.add_partitions(partitions, null); } @Test(expected = MetaException.class) @@ -1250,7 +1250,7 @@ public void testAddPartitionsNullValues() throws Exception { partition.setValues(null); List partitions = new ArrayList<>(); partitions.add(partition); - client.add_partitions(partitions); + client.add_partitions(partitions, null); } @Test @@ -1260,7 +1260,7 @@ public void testAddPartitionsEmptyValue() throws Exception { Partition partition = buildPartition(DB_NAME, TABLE_NAME, ""); List partitions = new ArrayList<>(); partitions.add(partition); - client.add_partitions(partitions); + client.add_partitions(partitions, null); List partitionNames = client.listPartitionNames(DB_NAME, TABLE_NAME, MAX); Assert.assertNotNull(partitionNames); @@ -1282,7 +1282,7 @@ public void testAddPartitionsInvalidLocation() throws Exception { List partitions = buildPartitions(DB_NAME, TABLE_NAME, valuesAndLocations); try { - client.add_partitions(partitions); + client.add_partitions(partitions, null); Assert.fail("MetaException should have happened."); } catch (MetaException e) { @@ -1316,7 +1316,7 @@ public void testAddPartitionsMoreThanThreadCountsOneFails() throws Exception { } try { - client.add_partitions(partitions); + client.add_partitions(partitions, null); Assert.fail("MetaException should have happened."); } catch (MetaException e) { @@ -1348,7 +1348,7 @@ public void testAddParts() throws Exception { partitions.add(partition2); partitions.add(partition3); - List addedPartitions = client.add_partitions(partitions, false, false); + List addedPartitions = client.add_partitions(partitions, false, false, null); Assert.assertNull(addedPartitions); verifyPartition(table, "year=2017", Lists.newArrayList("2017"), 1); verifyPartition(table, "year=2016", Lists.newArrayList("2016"), 2); @@ -1372,7 +1372,7 @@ public void testAddPartsMultipleValues() throws Exception { partitions.add(partition1); partitions.add(partition2); partitions.add(partition3); - List addedPartitions = client.add_partitions(partitions, false, true); + List addedPartitions = client.add_partitions(partitions, false, true, null); Assert.assertNotNull(addedPartitions); Assert.assertEquals(3, addedPartitions.size()); verifyPartition(table, "year=2017/month=march", Lists.newArrayList("2017", "march"), 1); @@ -1383,14 +1383,14 @@ public void testAddPartsMultipleValues() throws Exception { @Test(expected = MetaException.class) public void testAddPartsNullList() throws Exception { - client.add_partitions(null, false, false); + client.add_partitions(null, false, false, null); } @Test public void testAddPartsEmptyList() throws Exception { List addedPartitions = - client.add_partitions(new ArrayList<>(), false, true); + client.add_partitions(new ArrayList<>(), false, true, null); Assert.assertNotNull(addedPartitions); Assert.assertTrue(addedPartitions.isEmpty()); } @@ -1411,7 +1411,7 @@ public void testAddPartsDifferentTable() throws Exception { partitions.add(partition1); partitions.add(partition2); partitions.add(partition3); - client.add_partitions(partitions, false, false); + client.add_partitions(partitions, false, false, null); } @Test @@ -1430,7 +1430,7 @@ public void testAddPartsDifferentDBs() throws Exception { partitions.add(partition2); partitions.add(partition3); try { - client.add_partitions(partitions, false, false); + client.add_partitions(partitions, false, false, null); Assert.fail("MetaException should have been thrown."); } catch (MetaException e) { // Expected exception @@ -1451,7 +1451,7 @@ public void testAddPartsDuplicateInTheList() throws Exception { partitions.add(partition1); partitions.add(partition2); partitions.add(partition3); - client.add_partitions(partitions, true, false); + client.add_partitions(partitions, true, false, null); } @Test(expected = AlreadyExistsException.class) @@ -1459,7 +1459,7 @@ public void testAddPartsAlreadyExists() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, "2017"); - client.add_partition(partition); + client.add_partition(partition, null); Partition partition1 = buildPartition(DB_NAME, TABLE_NAME, "2015"); Partition partition2 = buildPartition(DB_NAME, TABLE_NAME, "2017"); @@ -1469,7 +1469,7 @@ public void testAddPartsAlreadyExists() throws Exception { partitions.add(partition1); partitions.add(partition2); partitions.add(partition3); - client.add_partitions(partitions, false, false); + client.add_partitions(partitions, false, false, null); } @Test @@ -1477,7 +1477,7 @@ public void testAddPartsAlreadyExistsIfExistsTrue() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, "2017"); - client.add_partition(partition); + client.add_partition(partition, null); Partition partition1 = buildPartition(DB_NAME, TABLE_NAME, "2015"); Partition partition2 = buildPartition(DB_NAME, TABLE_NAME, "2017"); @@ -1487,7 +1487,7 @@ public void testAddPartsAlreadyExistsIfExistsTrue() throws Exception { partitions.add(partition1); partitions.add(partition2); partitions.add(partition3); - List addedPartitions = client.add_partitions(partitions, true, true); + List addedPartitions = client.add_partitions(partitions, true, true, null); Assert.assertEquals(2, addedPartitions.size()); List partitionNames = client.listPartitionNames(DB_NAME, TABLE_NAME, MAX); Assert.assertEquals(3, partitionNames.size()); @@ -1501,7 +1501,7 @@ public void testAddPartsNullPartition() throws Exception { List partitions = new ArrayList<>(); partitions.add(null); - client.add_partitions(partitions, false, false); + client.add_partitions(partitions, false, false, null); } // Helper methods @@ -1531,7 +1531,7 @@ private Table createTable(String dbName, String tableName, List par .setStoredAsSubDirectories(false) .addSerdeParam("partTestSerdeParamKey", "partTestSerdeParamValue") .setLocation(location) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); return client.getTable(dbName, tableName); } @@ -1544,7 +1544,7 @@ private void createExternalTable(String tableName, String location) throws Excep .addPartCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .addTableParam("EXTERNAL", "TRUE") .setLocation(location) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); } private Partition buildPartition(String dbName, String tableName, String value) @@ -1731,6 +1731,6 @@ private void createView(String tableName) throws Exception { .addCol("test_value", "string", "test col value") .addPartCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .setLocation(null) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); } } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java index 2564349..39265b0 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java @@ -120,7 +120,7 @@ public void testAddPartitionSpec() throws Exception { String rootPath = table.getSd().getLocation() + "/addpartspectest/"; PartitionSpecProxy partitionSpec = buildPartitionSpec(DB_NAME, TABLE_NAME, rootPath, partitions); - client.add_partitions_pspec(partitionSpec); + client.add_partitions_pspec(partitionSpec, null); verifyPartition(table, "year=2013", Lists.newArrayList("2013"), 1); verifyPartition(table, "year=2014", Lists.newArrayList("2014"), 2); @@ -139,7 +139,7 @@ public void testAddPartitionSpecWithSharedSD() throws Exception { String location = table.getSd().getLocation() + "/sharedSDTest/"; PartitionSpecProxy partitionSpecProxy = buildPartitionSpecWithSharedSD(partitions, buildSD(location)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); verifyPartitionSharedSD(table, "year=2013", Lists.newArrayList("2013"), 1); verifyPartitionSharedSD(table, "year=2014", Lists.newArrayList("2014"), 2); @@ -162,7 +162,7 @@ public void testAddPartitionSpecWithSharedSDUpperCaseDBAndTableName() throws Exc partitionList.setSd(buildSD(location)); partitionSpec.setSharedSDPartitionSpec(partitionList); PartitionSpecProxy partitionSpecProxy = PartitionSpecProxy.Factory.get(partitionSpec); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Partition part = client.getPartition(DB_NAME, TABLE_NAME, "year=2013"); Assert.assertNotNull(part); @@ -188,7 +188,7 @@ public void testAddPartitionSpecsMultipleValues() throws Exception { List partitionsWithoutSD = Lists.newArrayList(partition3, partition4); PartitionSpecProxy partitionSpec = buildPartitionSpec(partitions, partitionsWithoutSD); - client.add_partitions_pspec(partitionSpec); + client.add_partitions_pspec(partitionSpec, null); verifyPartition(table, "year=2002/month=march", Lists.newArrayList("2002", "march"), 1); verifyPartition(table, "year=2003/month=april", Lists.newArrayList("2003", "april"), 2); @@ -214,7 +214,7 @@ public void testAddPartitionSpecUpperCaseDBAndTableName() throws Exception { String rootPath = tableLocation + "/addpartspectest/"; PartitionSpecProxy partitionSpec = buildPartitionSpec(DB_NAME.toUpperCase(), tableName.toUpperCase(), rootPath, partitions); - client.add_partitions_pspec(partitionSpec); + client.add_partitions_pspec(partitionSpec, null); // Validate the partition attributes // The db and table name should be all lower case: 'test_partition_db' and @@ -252,7 +252,7 @@ public void testAddPartitionSpecUpperCaseDBAndTableNameInOnePart() throws Except String rootPath = tableLocation + "/addpartspectest/"; PartitionSpecProxy partitionSpec = buildPartitionSpec(DB_NAME, tableName, rootPath, partitions); - client.add_partitions_pspec(partitionSpec); + client.add_partitions_pspec(partitionSpec, null); Partition part = client.getPartition(DB_NAME, tableName, "year=2013"); Assert.assertNotNull(part); @@ -271,7 +271,7 @@ public void testAddPartitionSpecUpperCaseDBAndTableNameInOnePart() throws Except @Test(expected = MetaException.class) public void testAddPartitionSpecNullSpec() throws Exception { - client.add_partitions_pspec(null); + client.add_partitions_pspec(null, null); } @Test @@ -280,7 +280,7 @@ public void testAddPartitionSpecEmptyPartList() throws Exception { createTable(); List partitions = new ArrayList<>(); PartitionSpecProxy partitionSpec = buildPartitionSpec(DB_NAME, TABLE_NAME, null, partitions); - client.add_partitions_pspec(partitionSpec); + client.add_partitions_pspec(partitionSpec, null); } @Test(expected = MetaException.class) @@ -289,7 +289,7 @@ public void testAddPartitionSpecNullPartList() throws Exception { createTable(); List partitions = null; PartitionSpecProxy partitionSpec = buildPartitionSpec(DB_NAME, TABLE_NAME, null, partitions); - client.add_partitions_pspec(partitionSpec); + client.add_partitions_pspec(partitionSpec, null); } @Test(expected = MetaException.class) @@ -299,7 +299,7 @@ public void testAddPartitionSpecNoDB() throws Exception { Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(null, TABLE_NAME, null, Lists.newArrayList(partition)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test(expected = MetaException.class) @@ -309,7 +309,7 @@ public void testAddPartitionSpecNoTable() throws Exception { Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, null, null, Lists.newArrayList(partition)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test(expected = MetaException.class) @@ -321,7 +321,7 @@ public void testAddPartitionSpecNoDBAndTableInPartition() throws Exception { partition.setTableName(null); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, Lists.newArrayList(partition)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test @@ -335,7 +335,7 @@ public void testAddPartitionSpecDBAndTableSetFromSpecProxy() throws Exception { buildPartitionSpec(null, null, null, Lists.newArrayList(partition)); partitionSpecProxy.setDbName(DB_NAME); partitionSpecProxy.setTableName(TABLE_NAME); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Partition resultPart = client.getPartition(DB_NAME, TABLE_NAME, Lists.newArrayList(DEFAULT_YEAR_VALUE)); @@ -353,7 +353,7 @@ public void testAddPartitionSpecWithSharedSDDBAndTableSetFromSpecProxy() throws buildPartitionSpecWithSharedSD(Lists.newArrayList(partition), buildSD(location)); partitionSpecProxy.setDbName(DB_NAME); partitionSpecProxy.setTableName(TABLE_NAME); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Partition resultPart = client.getPartition(DB_NAME, TABLE_NAME, Lists.newArrayList(DEFAULT_YEAR_VALUE)); @@ -367,7 +367,7 @@ public void testAddPartitionSpecEmptyDB() throws Exception { Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec("", TABLE_NAME, null, Lists.newArrayList(partition)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test(expected = InvalidObjectException.class) @@ -377,7 +377,7 @@ public void testAddPartitionSpecEmptyTable() throws Exception { Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, "", null, Lists.newArrayList(partition)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test(expected = InvalidObjectException.class) @@ -387,7 +387,7 @@ public void testAddPartitionSpecNonExistingDB() throws Exception { Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec("nonexistingdb", TABLE_NAME, null, Lists.newArrayList(partition)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test(expected = InvalidObjectException.class) @@ -397,7 +397,7 @@ public void testAddPartitionSpecNonExistingTable() throws Exception { Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, "nonexistingtable", null, Lists.newArrayList(partition)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test @@ -414,7 +414,7 @@ public void testAddPartitionSpecDiffDBName() throws Exception { PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, partitions); try { - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Assert.fail("MetaException should have been thrown."); } catch (MetaException e) { // Expected exception @@ -434,7 +434,7 @@ public void testAddPartitionSpecNullPart() throws Exception { partitions.add(partition2); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, partitions); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test @@ -448,7 +448,7 @@ public void testAddPartitionSpecUnsupportedPartSpecType() throws Exception { partitionSpec.setSharedSDPartitionSpec(null); try { PartitionSpecProxy bubu = PartitionSpecProxy.Factory.get(partitionSpec); - client.add_partitions_pspec(bubu); + client.add_partitions_pspec(bubu, null); Assert.fail("AssertionError should have been thrown."); } catch (AssertionError e) { // Expected error @@ -475,7 +475,7 @@ public void testAddPartitionSpecBothTypeSet() throws Exception { partitionSpec.setSharedSDPartitionSpec(partitionSpecWithSharedSD); PartitionSpecProxy partitionSpecProxy = PartitionSpecProxy.Factory.get(partitionSpec); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); List partitionNames = client.listPartitionNames(DB_NAME, TABLE_NAME, MAX); Assert.assertNotNull(partitionNames); @@ -493,7 +493,7 @@ public void testAddPartitionSpecSetRootPath() throws Exception { Partition partition = buildPartition(DB_NAME, TABLE_NAME, "2007", rootPath + "part2007/"); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, rootPath1, Lists.newArrayList(partition)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Partition resultPart = client.getPartition(DB_NAME, TABLE_NAME, Lists.newArrayList("2007")); Assert.assertEquals(rootPath + "part2007", resultPart.getSd().getLocation()); @@ -510,7 +510,7 @@ public void testAddPartitionSpecChangeRootPath() throws Exception { PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, rootPath, Lists.newArrayList(partition)); partitionSpecProxy.setRootLocation(rootPath1); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Partition resultPart = client.getPartition(DB_NAME, TABLE_NAME, Lists.newArrayList("2007")); Assert.assertEquals(rootPath1 + "part2007", resultPart.getSd().getLocation()); @@ -527,7 +527,7 @@ public void testAddPartitionSpecChangeRootPathFromNull() throws Exception { PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, Lists.newArrayList(partition)); partitionSpecProxy.setRootLocation(rootPath1); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test(expected = MetaException.class) @@ -539,7 +539,7 @@ public void testAddPartitionSpecChangeRootPathToNull() throws Exception { PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, rootPath, Lists.newArrayList(partition)); partitionSpecProxy.setRootLocation(null); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test(expected = MetaException.class) @@ -553,7 +553,7 @@ public void testAddPartitionSpecChangeRootPathDiffInSd() throws Exception { PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, rootPath, Lists.newArrayList(partition)); partitionSpecProxy.setRootLocation(rootPath2); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test @@ -567,7 +567,7 @@ public void testAddPartitionSpecWithSharedSDChangeRootPath() throws Exception { PartitionSpecProxy partitionSpecProxy = buildPartitionSpecWithSharedSD(Lists.newArrayList(partition), buildSD(rootPath)); partitionSpecProxy.setRootLocation(rootPath1); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Partition resultPart = client.getPartition(DB_NAME, TABLE_NAME, Lists.newArrayList("2014")); Assert.assertEquals(rootPath1 + "partwithoutsd0", resultPart.getSd().getLocation()); @@ -582,7 +582,7 @@ public void testAddPartitionSpecWithSharedSDWithoutRelativePath() throws Excepti String location = table.getSd().getLocation() + "/sharedSDTest/"; PartitionSpecProxy partitionSpecProxy = buildPartitionSpecWithSharedSD(Lists.newArrayList(partition), buildSD(location)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Partition part = client.getPartition(DB_NAME, TABLE_NAME, "year=2014"); Assert.assertNotNull(part); @@ -598,7 +598,7 @@ public void testAddPartitionSpecPartAlreadyExists() throws Exception { String tableLocation = metaStore.getWarehouseRoot() + "/" + TABLE_NAME; Partition partition = buildPartition(DB_NAME, TABLE_NAME, "2016", tableLocation + "/year=2016a"); - client.add_partition(partition); + client.add_partition(partition, null); List partitions = buildPartitions(DB_NAME, TABLE_NAME, Lists.newArrayList("2014", "2015", "2016", "2017", "2018")); @@ -606,7 +606,7 @@ public void testAddPartitionSpecPartAlreadyExists() throws Exception { buildPartitionSpec(DB_NAME, TABLE_NAME, null, partitions); try { - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Assert.fail("AlreadyExistsException should have happened."); } catch (AlreadyExistsException e) { // Expected exception @@ -630,7 +630,7 @@ public void testAddPartitionSpecPartDuplicateInSpec() throws Exception { PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, partitions); try { - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Assert.fail("MetaException should have happened."); } catch (MetaException e) { // Expected exception @@ -654,7 +654,7 @@ public void testAddPartitionSpecPartDuplicateInSpecs() throws Exception { PartitionWithoutSD partitionWithoutSD = buildPartitionWithoutSD(Lists.newArrayList("2002"), 0); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(Lists.newArrayList(partition), Lists.newArrayList(partitionWithoutSD)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test(expected = MetaException.class) @@ -665,7 +665,7 @@ public void testAddPartitionSpecNullSd() throws Exception { partition.setSd(null); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, Lists.newArrayList(partition)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test(expected = MetaException.class) @@ -676,7 +676,7 @@ public void testAddPartitionSpecWithSharedSDNullSd() throws Exception { StorageDescriptor sd = null; PartitionSpecProxy partitionSpecProxy = buildPartitionSpecWithSharedSD(Lists.newArrayList(partition), sd); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test(expected = MetaException.class) @@ -688,7 +688,7 @@ public void testAddPartitionSpecWithSharedSDNullLocation() throws Exception { String location = null; PartitionSpecProxy partitionSpecProxy = buildPartitionSpecWithSharedSD(Lists.newArrayList(partition), buildSD(location)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test(expected = MetaException.class) @@ -698,7 +698,7 @@ public void testAddPartitionSpecWithSharedSDEmptyLocation() throws Exception { PartitionWithoutSD partition = buildPartitionWithoutSD(Lists.newArrayList("2002"), 0); partition.setRelativePath("year2002"); PartitionSpecProxy partitionSpecProxy = buildPartitionSpecWithSharedSD(Lists.newArrayList(partition), buildSD("")); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test(expected = MetaException.class) @@ -711,7 +711,7 @@ public void testAddPartitionSpecWithSharedSDInvalidSD() throws Exception { sd.setLocation(table.getSd().getLocation() + "/nullLocationTest/"); PartitionSpecProxy partitionSpecProxy = buildPartitionSpecWithSharedSD(Lists.newArrayList(partition), sd); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test @@ -721,7 +721,7 @@ public void testAddPartitionSpecNullLocation() throws Exception { Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE, null); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, Lists.newArrayList(partition)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Partition resultPart = client.getPartition(DB_NAME, TABLE_NAME, Lists.newArrayList(DEFAULT_YEAR_VALUE)); @@ -737,7 +737,7 @@ public void testAddPartitionSpecEmptyLocation() throws Exception { Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE, ""); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, Lists.newArrayList(partition)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Partition resultPart = client.getPartition(DB_NAME, TABLE_NAME, Lists.newArrayList(DEFAULT_YEAR_VALUE)); @@ -753,7 +753,7 @@ public void testAddPartitionSpecEmptyLocationInTableToo() throws Exception { Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE, ""); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, Lists.newArrayList(partition)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Partition resultPart = client.getPartition(DB_NAME, TABLE_NAME, Lists.newArrayList(DEFAULT_YEAR_VALUE)); @@ -770,7 +770,7 @@ public void testAddPartitionSpecForView() throws Exception { Partition partition = buildPartition(DB_NAME, tableName, DEFAULT_YEAR_VALUE); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, tableName, null, Lists.newArrayList(partition)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test @@ -782,7 +782,7 @@ public void testAddPartitionSpecForViewNullPartLocation() throws Exception { partition.getSd().setLocation(null); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, tableName, null, Lists.newArrayList(partition)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Partition part = client.getPartition(DB_NAME, tableName, "year=2017"); Assert.assertNull(part.getSd().getLocation()); } @@ -796,7 +796,7 @@ public void testAddPartitionsForViewNullPartSd() throws Exception { partition.setSd(null); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, tableName, null, Lists.newArrayList(partition)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Partition part = client.getPartition(DB_NAME, tableName, "year=2017"); Assert.assertNull(part.getSd()); } @@ -810,7 +810,7 @@ public void testAddPartitionSpecWithSharedSDNoValue() throws Exception { String location = table.getSd().getLocation() + "/nullValueTest/"; PartitionSpecProxy partitionSpecProxy = buildPartitionSpecWithSharedSD(Lists.newArrayList(partition), buildSD(location)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test(expected=MetaException.class) @@ -826,7 +826,7 @@ public void testAddPartitionSpecNoValue() throws Exception { PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, Lists.newArrayList(partition)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test(expected = MetaException.class) @@ -837,7 +837,7 @@ public void testAddPartitionSpecNullValues() throws Exception { partition.setValues(null); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, Lists.newArrayList(partition)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test @@ -850,7 +850,7 @@ public void testAddPartitionSpecWithSharedSDEmptyValue() throws Exception { String location = table.getSd().getLocation() + "/nullValueTest/"; PartitionSpecProxy partitionSpecProxy = buildPartitionSpecWithSharedSD(Lists.newArrayList(partition), buildSD(location)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); List partitionNames = client.listPartitionNames(DB_NAME, TABLE_NAME, MAX); Assert.assertNotNull(partitionNames); @@ -866,7 +866,7 @@ public void testAddPartitionSpecMoreValues() throws Exception { buildPartition(Lists.newArrayList("2017", "march"), getYearAndMonthPartCols(), 1); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, Lists.newArrayList(partition)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); } @Test @@ -880,7 +880,7 @@ public void testAddPartitionSpecWithSharedSDNoRelativePath() throws Exception { String location = table.getSd().getLocation() + "/noRelativePath/"; PartitionSpecProxy partitionSpecProxy = buildPartitionSpecWithSharedSD( Lists.newArrayList(partition1, partition2), buildSD(location)); - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Partition resultPart1 = client.getPartition(DB_NAME, TABLE_NAME, Lists.newArrayList("2007")); Assert.assertEquals(location + "null", resultPart1.getSd().getLocation()); @@ -913,7 +913,7 @@ public void testAddPartitionSpecOneInvalid() throws Exception { buildPartitionSpec(DB_NAME, TABLE_NAME, null, partitions); try { - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Assert.fail("MetaException should have happened."); } catch (MetaException e) { // Expected exception @@ -943,7 +943,7 @@ public void testAddPartitionSpecInvalidLocation() throws Exception { buildPartitionSpec(DB_NAME, TABLE_NAME, null, partitions); try { - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Assert.fail("MetaException should have happened."); } catch (MetaException e) { // Expected exception @@ -979,7 +979,7 @@ public void testAddPartitionSpecMoreThanThreadCountsOneFails() throws Exception buildPartitionSpec(DB_NAME, TABLE_NAME, null, partitions); try { - client.add_partitions_pspec(partitionSpecProxy); + client.add_partitions_pspec(partitionSpecProxy, null); Assert.fail("MetaException should have happened."); } catch (MetaException e) { // Expected exception @@ -1019,7 +1019,7 @@ private Table createTable(String dbName, String tableName, List par .setStoredAsSubDirectories(false) .addSerdeParam("partTestSerdeParamKey", "partTestSerdeParamValue") .setLocation(location) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); return client.getTable(dbName, tableName); } @@ -1262,6 +1262,6 @@ private void createView(String tableName) throws Exception { .addCol("test_value", "string", "test col value") .addPartCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .setLocation(null) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); } } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java index 4fc3688..e1bd426 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java @@ -130,7 +130,7 @@ private Table createTestTable(IMetaStoreClient client, String dbName, String tab table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true"); } - client.createTable(table); + client.createTable(table, null); return table; } @@ -138,7 +138,7 @@ private void addPartition(IMetaStoreClient client, Table table, List val throws TException { PartitionBuilder partitionBuilder = new PartitionBuilder().inTable(table); values.forEach(val -> partitionBuilder.addValue(val)); - client.add_partition(partitionBuilder.build(metaStore.getConf())); + client.add_partition(partitionBuilder.build(metaStore.getConf()), null); } private List> createTable4PartColsParts(IMetaStoreClient client) throws @@ -251,7 +251,7 @@ public void otherCatalog() throws TException { .addCol("id", "int") .addCol("name", "string") .addPartCol("partcol", "string") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Partition[] parts = new Partition[5]; for (int i = 0; i < 5; i++) { @@ -261,7 +261,7 @@ public void otherCatalog() throws TException { .setLocation(MetaStoreTestUtils.getTestWarehouseDir("b" + i)) .build(metaStore.getConf()); } - client.add_partitions(Arrays.asList(parts)); + client.add_partitions(Arrays.asList(parts), null); Partition newPart = client.getPartition(catName, dbName, tableName, Collections.singletonList("a0")); @@ -314,7 +314,7 @@ public void deprecatedCalls() throws TException { .addCol("id", "int") .addCol("name", "string") .addPartCol("partcol", "string") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Partition[] parts = new Partition[5]; for (int i = 0; i < 5; i++) { @@ -324,7 +324,7 @@ public void deprecatedCalls() throws TException { .setLocation(MetaStoreTestUtils.getTestWarehouseDir("a" + i)) .build(metaStore.getConf()); } - client.add_partitions(Arrays.asList(parts)); + client.add_partitions(Arrays.asList(parts), null); Partition newPart = client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a0")); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java index 462584a..0e6335a 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java @@ -118,7 +118,7 @@ public void testAppendPartition() throws Exception { Table table = tableWithPartitions; Partition appendedPart = - client.appendPartition(table.getDbName(), table.getTableName(), partitionValues); + client.appendPartition(table.getDbName(), table.getTableName(), partitionValues, null); Assert.assertNotNull(appendedPart); Partition partition = @@ -137,7 +137,7 @@ public void testAppendPartitionToExternalTable() throws Exception { Table table = externalTable; Partition appendedPart = - client.appendPartition(table.getDbName(), table.getTableName(), partitionValues); + client.appendPartition(table.getDbName(), table.getTableName(), partitionValues, null); Assert.assertNotNull(appendedPart); Partition partition = @@ -157,9 +157,9 @@ public void testAppendPartitionMultiplePartitions() throws Exception { Table table = tableWithPartitions; - client.appendPartition(table.getDbName(), table.getTableName(), partitionValues1); - client.appendPartition(table.getDbName(), table.getTableName(), partitionValues2); - client.appendPartition(table.getDbName(), table.getTableName(), partitionValues3); + client.appendPartition(table.getDbName(), table.getTableName(), partitionValues1, null); + client.appendPartition(table.getDbName(), table.getTableName(), partitionValues2, null); + client.appendPartition(table.getDbName(), table.getTableName(), partitionValues3, null); verifyPartitionNames(table, Lists.newArrayList("year=2017/month=may", "year=2018/month=may", "year=2017/month=june", @@ -171,7 +171,7 @@ public void testAppendPartitionToTableWithoutPartCols() throws Exception { List partitionValues = Lists.newArrayList("2017", "may"); Table table = tableNoPartColumns; - client.appendPartition(table.getDbName(), table.getTableName(), partitionValues); + client.appendPartition(table.getDbName(), table.getTableName(), partitionValues, null); } @Test(expected = MetaException.class) @@ -179,7 +179,7 @@ public void testAppendPartitionToView() throws Exception { List partitionValues = Lists.newArrayList("2017", "may"); Table table = tableView; - client.appendPartition(table.getDbName(), table.getTableName(), partitionValues); + client.appendPartition(table.getDbName(), table.getTableName(), partitionValues, null); } @Test(expected = AlreadyExistsException.class) @@ -187,63 +187,63 @@ public void testAppendPartitionAlreadyExists() throws Exception { List partitionValues = Lists.newArrayList("2017", "april"); Table table = tableWithPartitions; - client.appendPartition(table.getDbName(), table.getTableName(), partitionValues); + client.appendPartition(table.getDbName(), table.getTableName(), partitionValues, null); } @Test(expected = InvalidObjectException.class) public void testAppendPartitionNonExistingDB() throws Exception { List partitionValues = Lists.newArrayList("2017", "may"); - client.appendPartition("nonexistingdb", tableWithPartitions.getTableName(), partitionValues); + client.appendPartition("nonexistingdb", tableWithPartitions.getTableName(), partitionValues, null); } @Test(expected = InvalidObjectException.class) public void testAppendPartitionNonExistingTable() throws Exception { List partitionValues = Lists.newArrayList("2017", "may"); - client.appendPartition(tableWithPartitions.getDbName(), "nonexistingtable", partitionValues); + client.appendPartition(tableWithPartitions.getDbName(), "nonexistingtable", partitionValues, null); } @Test(expected = InvalidObjectException.class) public void testAppendPartitionEmptyDB() throws Exception { List partitionValues = Lists.newArrayList("2017", "may"); - client.appendPartition("", tableWithPartitions.getTableName(), partitionValues); + client.appendPartition("", tableWithPartitions.getTableName(), partitionValues, null); } @Test(expected = InvalidObjectException.class) public void testAppendPartitionEmptyTable() throws Exception { List partitionValues = Lists.newArrayList("2017", "may"); - client.appendPartition(tableWithPartitions.getDbName(), "", partitionValues); + client.appendPartition(tableWithPartitions.getDbName(), "", partitionValues, null); } @Test(expected = MetaException.class) public void testAppendPartitionNullDB() throws Exception { List partitionValues = Lists.newArrayList("2017", "may"); - client.appendPartition(null, tableWithPartitions.getTableName(), partitionValues); + client.appendPartition(null, tableWithPartitions.getTableName(), partitionValues, null); } @Test(expected = MetaException.class) public void testAppendPartitionNullTable() throws Exception { List partitionValues = Lists.newArrayList("2017", "may"); - client.appendPartition(tableWithPartitions.getDbName(), null, partitionValues); + client.appendPartition(tableWithPartitions.getDbName(), null, partitionValues, null); } @Test(expected = MetaException.class) public void testAppendPartitionEmptyPartValues() throws Exception { Table table = tableWithPartitions; - client.appendPartition(table.getDbName(), table.getTableName(), new ArrayList<>()); + client.appendPartition(table.getDbName(), table.getTableName(), new ArrayList<>(), null); } @Test(expected = MetaException.class) public void testAppendPartitionNullPartValues() throws Exception { Table table = tableWithPartitions; - client.appendPartition(table.getDbName(), table.getTableName(), (List) null); + client.appendPartition(table.getDbName(), table.getTableName(), (List) null, null); } @Test @@ -253,7 +253,7 @@ public void testAppendPartitionLessPartValues() throws Exception { Table table = tableWithPartitions; try { - client.appendPartition(table.getDbName(), table.getTableName(), partitionValues); + client.appendPartition(table.getDbName(), table.getTableName(), partitionValues, null); Assert.fail("Exception should have been thrown."); } catch (MetaException e) { // Expected exception @@ -271,7 +271,7 @@ public void testAppendPartitionMorePartValues() throws Exception { Table table = tableWithPartitions; try { - client.appendPartition(table.getDbName(), table.getTableName(), partitionValues); + client.appendPartition(table.getDbName(), table.getTableName(), partitionValues, null); Assert.fail("Exception should have been thrown."); } catch (MetaException e) { // Expected exception @@ -291,7 +291,7 @@ public void testAppendPart() throws Exception { String partitionName = "year=2017/month=may"; Partition appendedPart = - client.appendPartition(table.getDbName(), table.getTableName(), partitionName); + client.appendPartition(table.getDbName(), table.getTableName(), partitionName, null); Assert.assertNotNull(appendedPart); Partition partition = client.getPartition(table.getDbName(), table.getTableName(), @@ -310,7 +310,7 @@ public void testAppendPartToExternalTable() throws Exception { String partitionName = "year=2017/month=may"; Partition appendedPart = - client.appendPartition(table.getDbName(), table.getTableName(), partitionName); + client.appendPartition(table.getDbName(), table.getTableName(), partitionName, null); Assert.assertNotNull(appendedPart); Partition partition = client.getPartition(table.getDbName(), table.getTableName(), @@ -329,9 +329,9 @@ public void testAppendPartMultiplePartitions() throws Exception { String partitionName3 = "year=2017/month=june"; Table table = tableWithPartitions; - client.appendPartition(table.getDbName(), table.getTableName(), partitionName1); - client.appendPartition(table.getDbName(), table.getTableName(), partitionName2); - client.appendPartition(table.getDbName(), table.getTableName(), partitionName3); + client.appendPartition(table.getDbName(), table.getTableName(), partitionName1, null); + client.appendPartition(table.getDbName(), table.getTableName(), partitionName2, null); + client.appendPartition(table.getDbName(), table.getTableName(), partitionName3, null); verifyPartitionNames(table, Lists.newArrayList(partitionName1, partitionName2, partitionName3, "year=2017/month=march", "year=2017/month=april", "year=2018/month=march")); @@ -342,7 +342,7 @@ public void testAppendPartToTableWithoutPartCols() throws Exception { String partitionName = "year=2017/month=may"; Table table = tableNoPartColumns; - client.appendPartition(table.getDbName(), table.getTableName(), partitionName); + client.appendPartition(table.getDbName(), table.getTableName(), partitionName, null); } @Test(expected = MetaException.class) @@ -350,7 +350,7 @@ public void testAppendPartToView() throws Exception { String partitionName = "year=2017/month=may"; Table table = tableView; - client.appendPartition(table.getDbName(), table.getTableName(), partitionName); + client.appendPartition(table.getDbName(), table.getTableName(), partitionName, null); } @Test(expected = AlreadyExistsException.class) @@ -358,63 +358,63 @@ public void testAppendPartAlreadyExists() throws Exception { String partitionName = "year=2017/month=april"; Table table = tableWithPartitions; - client.appendPartition(table.getDbName(), table.getTableName(), partitionName); + client.appendPartition(table.getDbName(), table.getTableName(), partitionName, null); } @Test(expected = InvalidObjectException.class) public void testAppendPartNonExistingDB() throws Exception { String partitionName = "year=2017/month=april"; - client.appendPartition("nonexistingdb", tableWithPartitions.getTableName(), partitionName); + client.appendPartition("nonexistingdb", tableWithPartitions.getTableName(), partitionName, null); } @Test(expected = InvalidObjectException.class) public void testAppendPartNonExistingTable() throws Exception { String partitionName = "year=2017/month=april"; - client.appendPartition(tableWithPartitions.getDbName(), "nonexistingtable", partitionName); + client.appendPartition(tableWithPartitions.getDbName(), "nonexistingtable", partitionName, null); } @Test(expected = InvalidObjectException.class) public void testAppendPartEmptyDB() throws Exception { String partitionName = "year=2017/month=april"; - client.appendPartition("", tableWithPartitions.getTableName(), partitionName); + client.appendPartition("", tableWithPartitions.getTableName(), partitionName, null); } @Test(expected = InvalidObjectException.class) public void testAppendPartEmptyTable() throws Exception { String partitionName = "year=2017/month=april"; - client.appendPartition(tableWithPartitions.getDbName(), "", partitionName); + client.appendPartition(tableWithPartitions.getDbName(), "", partitionName, null); } @Test(expected = MetaException.class) public void testAppendPartNullDB() throws Exception { String partitionName = "year=2017/month=april"; - client.appendPartition(null, tableWithPartitions.getTableName(), partitionName); + client.appendPartition(null, tableWithPartitions.getTableName(), partitionName, null); } @Test(expected = MetaException.class) public void testAppendPartNullTable() throws Exception { String partitionName = "year=2017/month=april"; - client.appendPartition(tableWithPartitions.getDbName(), null, partitionName); + client.appendPartition(tableWithPartitions.getDbName(), null, partitionName, null); } @Test(expected = MetaException.class) public void testAppendPartEmptyPartName() throws Exception { Table table = tableWithPartitions; - client.appendPartition(table.getDbName(), table.getTableName(), ""); + client.appendPartition(table.getDbName(), table.getTableName(), "", null); } @Test(expected = MetaException.class) public void testAppendPartNullPartName() throws Exception { Table table = tableWithPartitions; - client.appendPartition(table.getDbName(), table.getTableName(), (String) null); + client.appendPartition(table.getDbName(), table.getTableName(), (String) null, null); } @Test(expected = InvalidObjectException.class) @@ -422,7 +422,7 @@ public void testAppendPartLessPartValues() throws Exception { String partitionName = "year=2019"; Table table = tableWithPartitions; - client.appendPartition(table.getDbName(), table.getTableName(), partitionName); + client.appendPartition(table.getDbName(), table.getTableName(), partitionName, null); } @Test @@ -430,7 +430,7 @@ public void testAppendPartMorePartValues() throws Exception { String partitionName = "year=2019/month=march/day=12"; Table table = tableWithPartitions; - client.appendPartition(table.getDbName(), table.getTableName(), partitionName); + client.appendPartition(table.getDbName(), table.getTableName(), partitionName, null); } @Test(expected = InvalidObjectException.class) @@ -438,7 +438,7 @@ public void testAppendPartInvalidPartName() throws Exception { String partitionName = "invalidpartname"; Table table = tableWithPartitions; - client.appendPartition(table.getDbName(), table.getTableName(), partitionName); + client.appendPartition(table.getDbName(), table.getTableName(), partitionName, null); } @Test(expected = InvalidObjectException.class) @@ -446,7 +446,7 @@ public void testAppendPartWrongColumnInPartName() throws Exception { String partitionName = "year=2019/honap=march"; Table table = tableWithPartitions; - client.appendPartition(table.getDbName(), table.getTableName(), partitionName); + client.appendPartition(table.getDbName(), table.getTableName(), partitionName, null); } @Test @@ -471,10 +471,10 @@ public void otherCatalog() throws TException { .addCol("id", "int") .addCol("name", "string") .addPartCol("partcol", "string") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Partition created = - client.appendPartition(catName, dbName, tableName, Collections.singletonList("a1")); + client.appendPartition(catName, dbName, tableName, Collections.singletonList("a1"), null); Assert.assertEquals(1, created.getValuesSize()); Assert.assertEquals("a1", created.getValues().get(0)); Partition fetched = @@ -493,7 +493,7 @@ public void otherCatalog() throws TException { @Test(expected = InvalidObjectException.class) public void testAppendPartitionBogusCatalog() throws Exception { client.appendPartition("nosuch", DB_NAME, tableWithPartitions.getTableName(), - Lists.newArrayList("2017", "may")); + Lists.newArrayList("2017", "may"), null); } @Test(expected = InvalidObjectException.class) @@ -546,7 +546,7 @@ private Table createTable(String tableName, List partCols, Map 0") .build(metaStore.getConf()); - client.createTableWithConstraints(table, null, null, null, null, null, cc); + client.createTableWithConstraints(table, null, null, null, null, null, cc, null); CheckConstraintsRequest rqst = new CheckConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); List fetched = client.getCheckConstraints(rqst); Assert.assertEquals(1, fetched.size()); @@ -271,7 +271,7 @@ public void createTableWithConstraintsPkInOtherCatalog() throws TException { .setCheckExpression("> 0") .build(metaStore.getConf()); - client.createTableWithConstraints(table, null, null, null, null, null, cc); + client.createTableWithConstraints(table, null, null, null, null, null, cc, null); CheckConstraintsRequest rqst = new CheckConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); List fetched = client.getCheckConstraints(rqst); Assert.assertEquals(1, fetched.size()); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java index 86b69f0..274cb32 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java @@ -338,7 +338,7 @@ public void testDropDatabaseWithTable() throws Exception { .setDbName(database.getName()) .setTableName("test_table") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); client.dropDatabase(database.getName(), true, true, false); } @@ -351,7 +351,7 @@ public void testDropDatabaseWithTableCascade() throws Exception { .setDbName(database.getName()) .setTableName("test_table") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); client.dropDatabase(database.getName(), true, true, true); Assert.assertFalse("The directory should be removed", diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDefaultConstraint.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDefaultConstraint.java index f3e026c..c145d83 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDefaultConstraint.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDefaultConstraint.java @@ -103,7 +103,7 @@ public void setUp() throws Exception { .setTableName("test_table_1") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[1] = new TableBuilder() @@ -111,7 +111,7 @@ public void setUp() throws Exception { .setTableName("test_table_2") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[2] = new TableBuilder() @@ -119,7 +119,7 @@ public void setUp() throws Exception { .setTableName("test_table_3") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { @@ -235,7 +235,7 @@ public void createTableWithConstraintsPk() throws TException { .setDefaultVal(0) .build(metaStore.getConf()); - client.createTableWithConstraints(table, null, null, null, null, dv, null); + client.createTableWithConstraints(table, null, null, null, null, dv, null, null); DefaultConstraintsRequest rqst = new DefaultConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); List fetched = client.getDefaultConstraints(rqst); Assert.assertEquals(1, fetched.size()); @@ -271,7 +271,7 @@ public void createTableWithConstraintsPkInOtherCatalog() throws TException { .setDefaultVal(0) .build(metaStore.getConf()); - client.createTableWithConstraints(table, null, null, null, null, dv, null); + client.createTableWithConstraints(table, null, null, null, null, dv, null, null); DefaultConstraintsRequest rqst = new DefaultConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); List fetched = client.getDefaultConstraints(rqst); Assert.assertEquals(1, fetched.size()); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDropPartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDropPartitions.java index 2a566e8..d8ca55a 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDropPartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDropPartitions.java @@ -522,7 +522,7 @@ public void otherCatalog() throws TException { .addCol("id", "int") .addCol("name", "string") .addPartCol("partcol", "string") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Partition[] parts = new Partition[2]; for (int i = 0; i < parts.length; i++) { @@ -531,7 +531,7 @@ public void otherCatalog() throws TException { .addValue("a" + i) .build(metaStore.getConf()); } - client.add_partitions(Arrays.asList(parts)); + client.add_partitions(Arrays.asList(parts), null); List fetched = client.listPartitions(catName, dbName, tableName, (short)-1); Assert.assertEquals(parts.length, fetched.size()); @@ -576,7 +576,7 @@ private Table createTable(String tableName, List partCols, .setPartCols(partCols) .setLocation(metaStore.getWarehouseRoot() + "/" + tableName) .setTableParams(tableParams) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); return table; } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java index 1a2b7e4..ba70bb0 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java @@ -109,7 +109,7 @@ public void testExchangePartitions() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); List exchangedPartitions = client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); Assert.assertEquals(1, exchangedPartitions.size()); String partitionName = @@ -134,7 +134,7 @@ public void testExchangePartitionsDestTableHasPartitions() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, DB_NAME, sourceTable.getTableName(), DB_NAME, - destTable.getTableName()); + destTable.getTableName(), null); checkExchangedPartitions(sourceTable, destTable, Lists.newArrayList(partitions[1])); checkRemainingPartitions(sourceTable, destTable, @@ -157,7 +157,7 @@ public void testExchangePartitionsYearSet() throws Exception { Map partitionSpecs = getPartitionSpec(Lists.newArrayList("2017", "", "")); List exchangedPartitions = client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); Assert.assertEquals(4, exchangedPartitions.size()); List exchangedPartNames = new ArrayList<>(); @@ -180,7 +180,7 @@ public void testExchangePartitionsYearAndMonthSet() throws Exception { Map partitionSpecs = getPartitionSpec(Lists.newArrayList("2017", "march", "")); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); checkExchangedPartitions(sourceTable, destTable, Lists.newArrayList(partitions[0], partitions[1])); @@ -197,7 +197,7 @@ public void testExchangePartitionsBetweenDBs() throws Exception { Map partitionSpecs = getPartitionSpec(Lists.newArrayList("2017", "march", "")); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - dest.getDbName(), dest.getTableName()); + dest.getDbName(), dest.getTableName(), null); checkExchangedPartitions(sourceTable, dest, Lists.newArrayList(partitions[0], partitions[1])); checkRemainingPartitions(sourceTable, dest, @@ -219,7 +219,7 @@ public void testExchangePartitionsCustomTableLocations() throws Exception { Map partitionSpecs = getPartitionSpec(parts[1]); client.exchange_partitions(partitionSpecs, source.getDbName(), source.getTableName(), - dest.getDbName(), dest.getTableName()); + dest.getDbName(), dest.getTableName(), null); checkExchangedPartitions(source, dest, Lists.newArrayList(parts[1])); checkRemainingPartitions(source, dest, Lists.newArrayList(parts[0])); @@ -241,7 +241,7 @@ public void testExchangePartitionsCustomTableAndPartLocation() throws Exception Map partitionSpecs = getPartitionSpec(parts[1]); try { client.exchange_partitions(partitionSpecs, source.getDbName(), - source.getTableName(), dest.getDbName(), dest.getTableName()); + source.getTableName(), dest.getDbName(), dest.getTableName(), null); Assert.fail("MetaException should have been thrown."); } catch (MetaException e) { // Expected exception as FileNotFoundException will occur if the partitions have custom @@ -269,7 +269,7 @@ public void testExchangePartitionsCustomPartLocation() throws Exception { Map partitionSpecs = getPartitionSpec(parts[1]); try { client.exchange_partitions(partitionSpecs, source.getDbName(), - source.getTableName(), dest.getDbName(), dest.getTableName()); + source.getTableName(), dest.getDbName(), dest.getTableName(), null); Assert.fail("MetaException should have been thrown."); } catch (MetaException e) { // Expected exception as FileNotFoundException will occur if the partitions have custom @@ -289,7 +289,7 @@ public void testExchangePartitionsNonExistingPartLocation() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); metaStore.cleanWarehouseDirs(); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -297,7 +297,7 @@ public void testExchangePartitionsNonExistingSourceTable() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, DB_NAME, "nonexistingtable", destTable.getDbName(), - destTable.getTableName()); + destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -305,7 +305,7 @@ public void testExchangePartitionsNonExistingSourceDB() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, "nonexistingdb", sourceTable.getTableName(), - destTable.getDbName(), destTable.getTableName()); + destTable.getDbName(), destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -313,7 +313,7 @@ public void testExchangePartitionsNonExistingDestTable() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - DB_NAME, "nonexistingtable"); + DB_NAME, "nonexistingtable", null); } @Test(expected = MetaException.class) @@ -321,7 +321,7 @@ public void testExchangePartitionsNonExistingDestDB() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - "nonexistingdb", destTable.getTableName()); + "nonexistingdb", destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -329,7 +329,7 @@ public void testExchangePartitionsEmptySourceTable() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, DB_NAME, "", destTable.getDbName(), - destTable.getTableName()); + destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -337,7 +337,7 @@ public void testExchangePartitionsEmptySourceDB() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, "", sourceTable.getTableName(), - destTable.getDbName(), destTable.getTableName()); + destTable.getDbName(), destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -345,7 +345,7 @@ public void testExchangePartitionsEmptyDestTable() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - DB_NAME, ""); + DB_NAME, "", null); } @Test(expected = MetaException.class) @@ -353,7 +353,7 @@ public void testExchangePartitionsEmptyDestDB() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - "", destTable.getTableName()); + "", destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -361,7 +361,7 @@ public void testExchangePartitionsNullSourceTable() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, DB_NAME, null, destTable.getDbName(), - destTable.getTableName()); + destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -369,7 +369,7 @@ public void testExchangePartitionsNullSourceDB() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, null, sourceTable.getTableName(), - destTable.getDbName(), destTable.getTableName()); + destTable.getDbName(), destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -377,7 +377,7 @@ public void testExchangePartitionsNullDestTable() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - DB_NAME, null); + DB_NAME, null, null); } @Test(expected = MetaException.class) @@ -385,7 +385,7 @@ public void testExchangePartitionsNullDestDB() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - null, destTable.getTableName()); + null, destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -393,13 +393,13 @@ public void testExchangePartitionsEmptyPartSpec() throws Exception { Map partitionSpecs = new HashMap<>(); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); } @Test(expected = MetaException.class) public void testExchangePartitionsNullPartSpec() throws Exception { client.exchange_partitions(null, sourceTable.getDbName(), sourceTable.getTableName(), null, - destTable.getTableName()); + destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -407,11 +407,11 @@ public void testExchangePartitionsPartAlreadyExists() throws Exception { Partition partition = buildPartition(destTable, Lists.newArrayList("2017", "march", "22"), null); - client.add_partition(partition); + client.add_partition(partition, null); Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, DB_NAME, sourceTable.getTableName(), DB_NAME, - destTable.getTableName()); + destTable.getTableName(), null); } @Test @@ -419,12 +419,12 @@ public void testExchangePartitionsOneFail() throws Exception { Partition partition = buildPartition(destTable, Lists.newArrayList("2017", "march", "22"), null); - client.add_partition(partition); + client.add_partition(partition, null); Map partitionSpecs = getPartitionSpec(Lists.newArrayList("2017", "", "")); try { client.exchange_partitions(partitionSpecs, DB_NAME, sourceTable.getTableName(), DB_NAME, - destTable.getTableName()); + destTable.getTableName(), null); Assert.fail( "Exception should have been thrown as one of the partitions already exists in the dest table."); } catch (MetaException e) { @@ -456,7 +456,7 @@ public void testExchangePartitionsDifferentColsInTables() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - dest.getDbName(), dest.getTableName()); + dest.getDbName(), dest.getTableName(), null); } @Test(expected = MetaException.class) @@ -469,7 +469,7 @@ public void testExchangePartitionsDifferentColNameInTables() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - dest.getDbName(), dest.getTableName()); + dest.getDbName(), dest.getTableName(), null); } @Test(expected = MetaException.class) @@ -482,7 +482,7 @@ public void testExchangePartitionsDifferentColTypesInTables() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - dest.getDbName(), dest.getTableName()); + dest.getDbName(), dest.getTableName(), null); } @Test(expected = MetaException.class) @@ -495,7 +495,7 @@ public void testExchangePartitionsDifferentPartColsInTables() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), dest.getDbName(), dest.getTableName()); + sourceTable.getTableName(), dest.getDbName(), dest.getTableName(), null); } @Test(expected = MetaException.class) @@ -509,7 +509,7 @@ public void testExchangePartitionsDifferentPartColNameInTables() throws Exceptio Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), dest.getDbName(), dest.getTableName()); + sourceTable.getTableName(), dest.getDbName(), dest.getTableName(), null); } @Test(expected = MetaException.class) @@ -523,7 +523,7 @@ public void testExchangePartitionsDifferentPartColTypesInTables() throws Excepti Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), dest.getDbName(), dest.getTableName()); + sourceTable.getTableName(), dest.getDbName(), dest.getTableName(), null); } @Test @@ -533,7 +533,7 @@ public void testExchangePartitionsLessValueInPartSpec() throws Exception { partitionSpecs.put(YEAR_COL_NAME, "2017"); partitionSpecs.put(MONTH_COL_NAME, "march"); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); checkExchangedPartitions(sourceTable, destTable, Lists.newArrayList(partitions[0], partitions[1])); checkRemainingPartitions(sourceTable, destTable, @@ -549,7 +549,7 @@ public void testExchangePartitionsMoreValueInPartSpec() throws Exception { partitionSpecs.put(DAY_COL_NAME, "22"); partitionSpecs.put("hour", "18"); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); checkExchangedPartitions(sourceTable, destTable, Lists.newArrayList(partitions[1])); checkRemainingPartitions(sourceTable, destTable, Lists.newArrayList(partitions[0], partitions[2], partitions[3], partitions[4])); @@ -563,7 +563,7 @@ public void testExchangePartitionsDifferentValuesInPartSpec() throws Exception { partitionSpecs.put("honap", "march"); partitionSpecs.put("nap", "22"); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); checkExchangedPartitions(sourceTable, destTable, Lists.newArrayList(partitions[0], partitions[1], partitions[2], partitions[3])); checkRemainingPartitions(sourceTable, destTable, Lists.newArrayList(partitions[4])); @@ -577,7 +577,7 @@ public void testExchangePartitionsNonExistingValuesInPartSpec() throws Exception partitionSpecs.put("honap", "march"); partitionSpecs.put("nap", "22"); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); } @Test @@ -589,7 +589,7 @@ public void testExchangePartitionsOnlyMonthSetInPartSpec() throws Exception { partitionSpecs.put(DAY_COL_NAME, ""); try { client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); Assert.fail("MetaException should have been thrown."); } catch (MetaException e) { // Expected exception @@ -610,7 +610,7 @@ public void testExchangePartitionsYearAndDaySetInPartSpec() throws Exception { partitionSpecs.put(DAY_COL_NAME, "22"); try { client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); Assert.fail("MetaException should have been thrown."); } catch (MetaException e) { // Expected exception @@ -628,7 +628,7 @@ public void testExchangePartitionsNoPartExists() throws Exception { Map partitionSpecs = getPartitionSpec(Lists.newArrayList("2017", "march", "25")); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -636,7 +636,7 @@ public void testExchangePartitionsNoPartExistsYearAndMonthSet() throws Exception Map partitionSpecs = getPartitionSpec(Lists.newArrayList("2017", "august", "")); client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); } // Tests for the Partition exchange_partition(Map partitionSpecs, String @@ -648,7 +648,7 @@ public void testExchangePartition() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); Partition exchangedPartition = client.exchange_partition(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); Assert.assertEquals(new Partition(), exchangedPartition); checkExchangedPartitions(sourceTable, destTable, Lists.newArrayList(partitions[1])); @@ -667,7 +667,7 @@ public void testExchangePartitionDestTableHasPartitions() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, DB_NAME, sourceTable.getTableName(), DB_NAME, - destTable.getTableName()); + destTable.getTableName(), null); checkExchangedPartitions(sourceTable, destTable, Lists.newArrayList(partitions[1])); checkRemainingPartitions(sourceTable, destTable, @@ -690,7 +690,7 @@ public void testExchangePartitionYearSet() throws Exception { Map partitionSpecs = getPartitionSpec(Lists.newArrayList("2017", "", "")); Partition exchangedPartition = client.exchange_partition(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); Assert.assertEquals(new Partition(), exchangedPartition); checkExchangedPartitions(sourceTable, destTable, @@ -703,7 +703,7 @@ public void testExchangePartitionYearAndMonthSet() throws Exception { Map partitionSpecs = getPartitionSpec(Lists.newArrayList("2017", "march", "")); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); checkExchangedPartitions(sourceTable, destTable, Lists.newArrayList(partitions[0], partitions[1])); @@ -720,7 +720,7 @@ public void testExchangePartitionBetweenDBs() throws Exception { Map partitionSpecs = getPartitionSpec(Lists.newArrayList("2017", "march", "")); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - dest.getDbName(), dest.getTableName()); + dest.getDbName(), dest.getTableName(), null); checkExchangedPartitions(sourceTable, dest, Lists.newArrayList(partitions[0], partitions[1])); checkRemainingPartitions(sourceTable, dest, @@ -742,7 +742,7 @@ public void testExchangePartitionCustomTableLocations() throws Exception { Map partitionSpecs = getPartitionSpec(parts[1]); client.exchange_partition(partitionSpecs, source.getDbName(), source.getTableName(), - dest.getDbName(), dest.getTableName()); + dest.getDbName(), dest.getTableName(), null); checkExchangedPartitions(source, dest, Lists.newArrayList(parts[1])); checkRemainingPartitions(source, dest, Lists.newArrayList(parts[0])); @@ -764,7 +764,7 @@ public void testExchangePartitionCustomTableAndPartLocation() throws Exception { Map partitionSpecs = getPartitionSpec(parts[1]); try { client.exchange_partition(partitionSpecs, source.getDbName(), - source.getTableName(), dest.getDbName(), dest.getTableName()); + source.getTableName(), dest.getDbName(), dest.getTableName(), null); Assert.fail("MetaException should have been thrown."); } catch (MetaException e) { // Expected exception as FileNotFoundException will occur if the partitions have custom @@ -792,7 +792,7 @@ public void testExchangePartitionCustomPartLocation() throws Exception { Map partitionSpecs = getPartitionSpec(parts[1]); try { client.exchange_partition(partitionSpecs, source.getDbName(), - source.getTableName(), dest.getDbName(), dest.getTableName()); + source.getTableName(), dest.getDbName(), dest.getTableName(), null); Assert.fail("MetaException should have been thrown."); } catch (MetaException e) { // Expected exception as FileNotFoundException will occur if the partitions have custom @@ -812,7 +812,7 @@ public void testExchangePartitionNonExistingPartLocation() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); metaStore.cleanWarehouseDirs(); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -820,7 +820,7 @@ public void testExchangePartitionNonExistingSourceTable() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, DB_NAME, "nonexistingtable", destTable.getDbName(), - destTable.getTableName()); + destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -828,7 +828,7 @@ public void testExchangePartitionNonExistingSourceDB() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, "nonexistingdb", sourceTable.getTableName(), - destTable.getDbName(), destTable.getTableName()); + destTable.getDbName(), destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -836,7 +836,7 @@ public void testExchangePartitionNonExistingDestTable() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - DB_NAME, "nonexistingtable"); + DB_NAME, "nonexistingtable", null); } @Test(expected = MetaException.class) @@ -844,7 +844,7 @@ public void testExchangePartitionNonExistingDestDB() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - "nonexistingdb", destTable.getTableName()); + "nonexistingdb", destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -852,7 +852,7 @@ public void testExchangePartitionEmptySourceTable() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, DB_NAME, "", destTable.getDbName(), - destTable.getTableName()); + destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -860,7 +860,7 @@ public void testExchangePartitionEmptySourceDB() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, "", sourceTable.getTableName(), destTable.getDbName(), - destTable.getTableName()); + destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -868,7 +868,7 @@ public void testExchangePartitionEmptyDestTable() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - DB_NAME, ""); + DB_NAME, "", null); } @Test(expected = MetaException.class) @@ -876,7 +876,7 @@ public void testExchangePartitionEmptyDestDB() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - "", destTable.getTableName()); + "", destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -884,7 +884,7 @@ public void testExchangePartitionNullSourceTable() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, DB_NAME, null, destTable.getDbName(), - destTable.getTableName()); + destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -892,7 +892,7 @@ public void testExchangePartitionNullSourceDB() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, null, sourceTable.getTableName(), - destTable.getDbName(), destTable.getTableName()); + destTable.getDbName(), destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -900,7 +900,7 @@ public void testExchangePartitionNullDestTable() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - DB_NAME, null); + DB_NAME, null, null); } @Test(expected = MetaException.class) @@ -908,7 +908,7 @@ public void testExchangePartitionNullDestDB() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - null, destTable.getTableName()); + null, destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -916,14 +916,14 @@ public void testExchangePartitionEmptyPartSpec() throws Exception { Map partitionSpecs = new HashMap<>(); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); } @Test(expected = MetaException.class) public void testExchangePartitionNullPartSpec() throws Exception { client.exchange_partition(null, sourceTable.getDbName(), sourceTable.getTableName(), null, - destTable.getTableName()); + destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -931,11 +931,11 @@ public void testExchangePartitionPartAlreadyExists() throws Exception { Partition partition = buildPartition(destTable, Lists.newArrayList("2017", "march", "22"), null); - client.add_partition(partition); + client.add_partition(partition, null); Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, DB_NAME, sourceTable.getTableName(), DB_NAME, - destTable.getTableName()); + destTable.getTableName(), null); } @Test @@ -943,12 +943,12 @@ public void testExchangePartitionOneFail() throws Exception { Partition partition = buildPartition(destTable, Lists.newArrayList("2017", "march", "22"), null); - client.add_partition(partition); + client.add_partition(partition, null); Map partitionSpecs = getPartitionSpec(Lists.newArrayList("2017", "", "")); try { client.exchange_partition(partitionSpecs, DB_NAME, sourceTable.getTableName(), DB_NAME, - destTable.getTableName()); + destTable.getTableName(), null); Assert.fail( "Exception should have been thrown as one of the partitions already exists in the dest table."); } catch (MetaException e) { @@ -980,7 +980,7 @@ public void testExchangePartitionDifferentColsInTables() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - dest.getDbName(), dest.getTableName()); + dest.getDbName(), dest.getTableName(), null); } @Test(expected = MetaException.class) @@ -993,7 +993,7 @@ public void testExchangePartitionDifferentColNameInTables() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - dest.getDbName(), dest.getTableName()); + dest.getDbName(), dest.getTableName(), null); } @Test(expected = MetaException.class) @@ -1006,7 +1006,7 @@ public void testExchangePartitionDifferentColTypesInTables() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), - dest.getDbName(), dest.getTableName()); + dest.getDbName(), dest.getTableName(), null); } @Test(expected = MetaException.class) @@ -1019,7 +1019,7 @@ public void testExchangePartitionDifferentPartColsInTables() throws Exception { Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), dest.getDbName(), dest.getTableName()); + sourceTable.getTableName(), dest.getDbName(), dest.getTableName(), null); } @Test(expected = MetaException.class) @@ -1033,7 +1033,7 @@ public void testExchangePartitionDifferentPartColNameInTables() throws Exception Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), dest.getDbName(), dest.getTableName()); + sourceTable.getTableName(), dest.getDbName(), dest.getTableName(), null); } @Test(expected = MetaException.class) @@ -1047,7 +1047,7 @@ public void testExchangePartitionDifferentPartColTypesInTables() throws Exceptio Map partitionSpecs = getPartitionSpec(partitions[1]); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), dest.getDbName(), dest.getTableName()); + sourceTable.getTableName(), dest.getDbName(), dest.getTableName(), null); } @Test @@ -1057,7 +1057,7 @@ public void testExchangePartitionLessValueInPartSpec() throws Exception { partitionSpecs.put(YEAR_COL_NAME, "2017"); partitionSpecs.put(MONTH_COL_NAME, "march"); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); checkExchangedPartitions(sourceTable, destTable, Lists.newArrayList(partitions[0], partitions[1])); checkRemainingPartitions(sourceTable, destTable, @@ -1073,7 +1073,7 @@ public void testExchangePartitionMoreValueInPartSpec() throws Exception { partitionSpecs.put(DAY_COL_NAME, "22"); partitionSpecs.put("hour", "18"); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); checkExchangedPartitions(sourceTable, destTable, Lists.newArrayList(partitions[1])); checkRemainingPartitions(sourceTable, destTable, Lists.newArrayList(partitions[0], partitions[2], partitions[3], partitions[4])); @@ -1087,7 +1087,7 @@ public void testExchangePartitionDifferentValuesInPartSpec() throws Exception { partitionSpecs.put("honap", "march"); partitionSpecs.put("nap", "22"); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); checkExchangedPartitions(sourceTable, destTable, Lists.newArrayList(partitions[0], partitions[1], partitions[2], partitions[3])); checkRemainingPartitions(sourceTable, destTable, Lists.newArrayList(partitions[4])); @@ -1101,7 +1101,7 @@ public void testExchangePartitionNonExistingValuesInPartSpec() throws Exception partitionSpecs.put("honap", "march"); partitionSpecs.put("nap", "22"); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); } @Test @@ -1113,7 +1113,7 @@ public void testExchangePartitionOnlyMonthSetInPartSpec() throws Exception { partitionSpecs.put(DAY_COL_NAME, ""); try { client.exchange_partition(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); Assert.fail("MetaException should have been thrown."); } catch (MetaException e) { // Expected exception @@ -1134,7 +1134,7 @@ public void testExchangePartitionYearAndDaySetInPartSpec() throws Exception { partitionSpecs.put(DAY_COL_NAME, "22"); try { client.exchange_partition(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); Assert.fail("MetaException should have been thrown."); } catch (MetaException e) { // Expected exception @@ -1152,7 +1152,7 @@ public void testExchangePartitionNoPartExists() throws Exception { Map partitionSpecs = getPartitionSpec(Lists.newArrayList("2017", "march", "25")); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); } @Test(expected = MetaException.class) @@ -1160,7 +1160,7 @@ public void testExchangePartitionNoPartExistsYearAndMonthSet() throws Exception Map partitionSpecs = getPartitionSpec(Lists.newArrayList("2017", "august", "")); client.exchange_partition(partitionSpecs, sourceTable.getDbName(), - sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); + sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName(), null); } // Helper methods @@ -1194,7 +1194,7 @@ private Table createTable(String dbName, String tableName, List par .setCols(cols) .setPartCols(partCols) .setLocation(location) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); return client.getTable(dbName, tableName); } @@ -1210,7 +1210,7 @@ private Table createTable(String dbName, String tableName, List par Partition partition5 = buildPartition(sourceTable, Lists.newArrayList("2018", "april", "23"), null); client.add_partitions( - Lists.newArrayList(partition1, partition2, partition3, partition4, partition5)); + Lists.newArrayList(partition1, partition2, partition3, partition4, partition5), null); Partition[] parts = new Partition[5]; parts[0] = @@ -1229,7 +1229,7 @@ private Table createTable(String dbName, String tableName, List par private Partition createPartition(Table table, List values, String location) throws Exception { Partition partition = buildPartition(table, values, location); - client.add_partition(partition); + client.add_partition(partition, null); return client.getPartition(DB_NAME, table.getTableName(), partition.getValues()); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestForeignKey.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestForeignKey.java index b058dd2..1d0848e 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestForeignKey.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestForeignKey.java @@ -105,7 +105,7 @@ public void setUp() throws Exception { .setTableName("test_table_1") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[1] = new TableBuilder() @@ -113,7 +113,7 @@ public void setUp() throws Exception { .setTableName("test_table_2") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[2] = new TableBuilder() @@ -121,7 +121,7 @@ public void setUp() throws Exception { .setTableName("test_table_3") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[3] = new TableBuilder() @@ -129,7 +129,7 @@ public void setUp() throws Exception { .setTableName("test_table_4") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { @@ -352,7 +352,7 @@ public void createTableWithConstraints() throws TException { .setConstraintName(constraintName) .build(metaStore.getConf()); - client.createTableWithConstraints(table, null, fk, null, null, null, null); + client.createTableWithConstraints(table, null, fk, null, null, null, null, null); ForeignKeysRequest rqst = new ForeignKeysRequest(parentTable.getDbName(), parentTable .getTableName(), @@ -399,7 +399,7 @@ public void createTableWithConstraintsInOtherCatalog() throws TException { .setConstraintName(constraintName) .build(metaStore.getConf()); - client.createTableWithConstraints(table, null, fk, null, null, null, null); + client.createTableWithConstraints(table, null, fk, null, null, null, null, null); ForeignKeysRequest rqst = new ForeignKeysRequest(parentTable.getDbName(), parentTable .getTableName(), diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java index 4d7f7c1..7211af3 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java @@ -121,7 +121,7 @@ private Table createTestTable(IMetaStoreClient client, String dbName, String tab table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true"); } - client.createTable(table); + client.createTable(table, null); return table; } @@ -129,7 +129,7 @@ private void addPartition(IMetaStoreClient client, Table table, List val throws TException { PartitionBuilder partitionBuilder = new PartitionBuilder().inTable(table); values.forEach(val -> partitionBuilder.addValue(val)); - client.add_partition(partitionBuilder.build(metaStore.getConf())); + client.add_partition(partitionBuilder.build(metaStore.getConf()), null); } private void createTable3PartCols1PartGeneric(IMetaStoreClient client, boolean authOn) @@ -547,7 +547,7 @@ public void otherCatalog() throws TException { .addCol("name", "string") .addPartCol("partcol", "string") .addTableParam("PARTITION_LEVEL_PRIVILEGE", "true") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Partition[] parts = new Partition[5]; for (int i = 0; i < parts.length; i++) { @@ -556,7 +556,7 @@ public void otherCatalog() throws TException { .addValue("a" + i) .build(metaStore.getConf()); } - client.add_partitions(Arrays.asList(parts)); + client.add_partitions(Arrays.asList(parts), null); Partition fetched = client.getPartition(catName, dbName, tableName, Collections.singletonList("a0")); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetTableMeta.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetTableMeta.java index 7720aa2..1054e71 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetTableMeta.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetTableMeta.java @@ -152,7 +152,7 @@ private TableMeta createTestTable(String dbName, String tableName, TableType typ throws Exception { Table table = createTable(dbName, tableName, type); table.getParameters().put("comment", comment); - client.createTable(table); + client.createTable(table, null); TableMeta tableMeta = new TableMeta(dbName, tableName, type.name()); tableMeta.setComments(comment); tableMeta.setCatName("hive"); @@ -162,7 +162,7 @@ private TableMeta createTestTable(String dbName, String tableName, TableType typ private TableMeta createTestTable(String dbName, String tableName, TableType type) throws Exception { Table table = createTable(dbName, tableName, type); - client.createTable(table); + client.createTable(table, null); TableMeta tableMeta = new TableMeta(dbName, tableName, type.name()); tableMeta.setCatName("hive"); return tableMeta; @@ -305,7 +305,7 @@ public void tablesInDifferentCatalog() throws TException { .setTableName(tableNames[i]) .addCol("id", "int") .addCol("name", "string") - .build(metaStore.getConf())); + .build(metaStore.getConf()), null); TableMeta tableMeta = new TableMeta(dbName, tableNames[i], TableType.MANAGED_TABLE.name()); tableMeta.setCatName(catName); expected.add(tableMeta); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java index 34ceb34..fb400d1 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java @@ -135,7 +135,7 @@ private Table createTestTable(IMetaStoreClient client, String dbName, String tab table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true"); } - client.createTable(table); + client.createTable(table, null); return table; } @@ -143,7 +143,7 @@ private void addPartition(IMetaStoreClient client, Table table, List val throws TException { PartitionBuilder partitionBuilder = new PartitionBuilder().inTable(table); values.forEach(val -> partitionBuilder.addValue(val)); - client.add_partition(partitionBuilder.build(metaStore.getConf())); + client.add_partition(partitionBuilder.build(metaStore.getConf()), null); } private void createTable3PartCols1PartGeneric(IMetaStoreClient client, boolean authOn) @@ -1414,7 +1414,7 @@ public void otherCatalog() throws TException { .addCol("id", "int") .addCol("name", "string") .addPartCol("partcol", "string") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Partition[] parts = new Partition[5]; for (int i = 0; i < parts.length; i++) { @@ -1423,7 +1423,7 @@ public void otherCatalog() throws TException { .addValue("a" + i) .build(metaStore.getConf()); } - client.add_partitions(Arrays.asList(parts)); + client.add_partitions(Arrays.asList(parts), null); List fetched = client.listPartitions(catName, dbName, tableName, -1); Assert.assertEquals(parts.length, fetched.size()); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestNotNullConstraint.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestNotNullConstraint.java index b32eeda..d4fde3b 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestNotNullConstraint.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestNotNullConstraint.java @@ -103,7 +103,7 @@ public void setUp() throws Exception { .setTableName("test_table_1") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[1] = new TableBuilder() @@ -111,7 +111,7 @@ public void setUp() throws Exception { .setTableName("test_table_2") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[2] = new TableBuilder() @@ -119,7 +119,7 @@ public void setUp() throws Exception { .setTableName("test_table_3") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { @@ -230,7 +230,7 @@ public void createTableWithConstraintsPk() throws TException { .setConstraintName(constraintName) .build(metaStore.getConf()); - client.createTableWithConstraints(table, null, null, null, nn, null, null); + client.createTableWithConstraints(table, null, null, null, nn, null, null, null); NotNullConstraintsRequest rqst = new NotNullConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); List fetched = client.getNotNullConstraints(rqst); @@ -265,7 +265,7 @@ public void createTableWithConstraintsPkInOtherCatalog() throws TException { .addColumn("col1") .build(metaStore.getConf()); - client.createTableWithConstraints(table, null, null, null, nn, null, null); + client.createTableWithConstraints(table, null, null, null, nn, null, null, null); NotNullConstraintsRequest rqst = new NotNullConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); List fetched = client.getNotNullConstraints(rqst); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestPrimaryKey.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestPrimaryKey.java index c33572b..91f9b22 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestPrimaryKey.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestPrimaryKey.java @@ -103,7 +103,7 @@ public void setUp() throws Exception { .setTableName("test_table_1") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[1] = new TableBuilder() @@ -111,7 +111,7 @@ public void setUp() throws Exception { .setTableName("test_table_2") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[2] = new TableBuilder() @@ -119,7 +119,7 @@ public void setUp() throws Exception { .setTableName("test_table_3") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { @@ -292,7 +292,7 @@ public void createTableWithConstraintsPk() throws TException { .setConstraintName(constraintName) .build(metaStore.getConf()); - client.createTableWithConstraints(table, pk, null, null, null, null, null); + client.createTableWithConstraints(table, pk, null, null, null, null, null, null); PrimaryKeysRequest rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); rqst.setCatName(table.getCatName()); List fetched = client.getPrimaryKeys(rqst); @@ -329,7 +329,7 @@ public void createTableWithConstraintsPkInOtherCatalog() throws TException { .addColumn("col1") .build(metaStore.getConf()); - client.createTableWithConstraints(table, pk, null, null, null, null, null); + client.createTableWithConstraints(table, pk, null, null, null, null, null, null); PrimaryKeysRequest rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); rqst.setCatName(table.getCatName()); List fetched = client.getPrimaryKeys(rqst); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java index 6c8c943..0c65095 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java @@ -124,20 +124,20 @@ public void setUp() throws Exception { new TableBuilder() .setTableName("test_table") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[1] = new TableBuilder() .setTableName("test_view") .addCol("test_col", "int") .setType("VIRTUAL_VIEW") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[2] = new TableBuilder() .setTableName("test_table_to_find_1") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[3] = new TableBuilder() @@ -145,7 +145,7 @@ public void setUp() throws Exception { .addCol("test_col1", "int") .addCol("test_col2", "int") .addPartCol("test_part_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[4] = new TableBuilder() @@ -154,7 +154,7 @@ public void setUp() throws Exception { .setLocation(metaStore.getWarehouseRoot() + "/external/table_dir") .addTableParam("EXTERNAL", "TRUE") .setType("EXTERNAL_TABLE") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); @@ -164,7 +164,7 @@ public void setUp() throws Exception { .setDbName(OTHER_DATABASE) .setTableName("test_table") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); // Create partitions for the partitioned table for(int i=0; i < 2; i++) { @@ -224,7 +224,7 @@ public void tearDown() throws Exception { public void testCreateGetDeleteTable() throws Exception { // Try to create a table with all of the parameters set Table table = getTableWithAllParametersSet(); - client.createTable(table); + client.createTable(table, null); table.unsetId(); Table createdTable = client.getTable(table.getDbName(), table.getTableName()); @@ -271,7 +271,7 @@ public void testCreateTableDefaultValues() throws Exception { sd.setSerdeInfo(new SerDeInfo()); table.setSd(sd); - client.createTable(table); + client.createTable(table, null); Table createdTable = client.getTable(table.getDbName(), table.getTableName()); Assert.assertEquals("Comparing OwnerType", PrincipalType.USER, createdTable.getOwnerType()); @@ -344,7 +344,7 @@ public void testCreateTableDefaultLocationInSpecificDatabase() throws Exception sd.setSerdeInfo(new SerDeInfo()); table.setSd(sd); - client.createTable(table); + client.createTable(table, null); Table createdTable = client.getTable(table.getDbName(), table.getTableName()); Assert.assertEquals("Storage descriptor location", metaStore.getWarehouseRoot() + "/" + table.getDbName() + ".db/" + table.getTableName(), @@ -365,7 +365,7 @@ public void testCreateTableDefaultValuesView() throws Exception { sd.setSerdeInfo(new SerDeInfo()); table.setSd(sd); - client.createTable(table); + client.createTable(table, null); Table createdTable = client.getTable(table.getDbName(), table.getTableName()); // No location should be created for views @@ -378,7 +378,7 @@ public void testCreateTableNullDatabase() throws Exception { Table table = testTables[0]; table.setDbName(null); - client.createTable(table); + client.createTable(table, null); } @Test(expected = MetaException.class) @@ -386,7 +386,7 @@ public void testCreateTableNullTableName() throws Exception { Table table = testTables[0]; table.setTableName(null); - client.createTable(table); + client.createTable(table, null); } @Test(expected = InvalidObjectException.class) @@ -394,7 +394,7 @@ public void testCreateTableInvalidTableName() throws Exception { Table table = testTables[0]; table.setTableName("test_table;"); - client.createTable(table); + client.createTable(table, null); } @Test(expected = InvalidObjectException.class) @@ -402,7 +402,7 @@ public void testCreateTableEmptyName() throws Exception { Table table = testTables[0]; table.setTableName(""); - client.createTable(table); + client.createTable(table, null); } @Test(expected = MetaException.class) @@ -410,7 +410,7 @@ public void testCreateTableNullStorageDescriptor() throws Exception { Table table = testTables[0]; table.setSd(null); - client.createTable(table); + client.createTable(table, null); } private Table getNewTable() throws MetaException { @@ -424,7 +424,7 @@ private Table getNewTable() throws MetaException { public void testCreateTableInvalidStorageDescriptorNullColumns() throws Exception { Table table = getNewTable(); table.getSd().setCols(null); - client.createTable(table); + client.createTable(table, null); } @Test(expected = MetaException.class) @@ -432,7 +432,7 @@ public void testCreateTableInvalidStorageDescriptorNullSerdeInfo() throws Except Table table = getNewTable(); table.getSd().setSerdeInfo(null); - client.createTable(table); + client.createTable(table, null); } @Test(expected = MetaException.class) @@ -440,7 +440,7 @@ public void testCreateTableInvalidStorageDescriptorNullColumnType() throws Excep Table table = getNewTable(); table.getSd().getCols().get(0).setType(null); - client.createTable(table); + client.createTable(table, null); } @Test(expected = InvalidObjectException.class) @@ -448,7 +448,7 @@ public void testCreateTableInvalidStorageDescriptorInvalidColumnType() throws Ex Table table = getNewTable(); table.getSd().getCols().get(0).setType("xyz"); - client.createTable(table); + client.createTable(table, null); } @Test(expected = InvalidObjectException.class) @@ -456,7 +456,7 @@ public void testCreateTableNoSuchDatabase() throws Exception { Table table = testTables[0]; table.setDbName("no_such_database"); - client.createTable(table); + client.createTable(table, null); } @Test(expected = AlreadyExistsException.class) @@ -464,7 +464,7 @@ public void testCreateTableAlreadyExists() throws Exception { Table table = testTables[0]; table.unsetId(); - client.createTable(table); + client.createTable(table, null); } @Test(expected = NoSuchObjectException.class) @@ -532,7 +532,7 @@ public void testDropTableCaseInsensitive() throws Exception { table.unsetId(); // Test in mixed case - client.createTable(table); + client.createTable(table, null); client.dropTable("DeFaUlt", "TeST_tAbLE"); try { client.getTable(table.getDbName(), table.getTableName()); @@ -554,7 +554,7 @@ public void testDropTableDeleteDir() throws Exception { metaStore.isPathExists(new Path(table.getSd().getLocation()))); table.unsetId(); - client.createTable(table); + client.createTable(table, null); client.dropTable(table.getDbName(), table.getTableName(), false, false); Assert.assertTrue("Table path should be kept", @@ -697,7 +697,7 @@ public void testAlterTable() throws Exception { // Partition keys can not be set, but getTableWithAllParametersSet is added one, so remove for // this test newTable.setPartitionKeys(originalTable.getPartitionKeys()); - client.alter_table(originalDatabase, originalTableName, newTable); + client.alter_table(originalDatabase, originalTableName, newTable, null); Table alteredTable = client.getTable(originalDatabase, originalTableName); // The extra parameters will be added on server side, so check that the required ones are @@ -729,7 +729,7 @@ public void testAlterTableRename() throws Exception { // Do not change the location, so it is tested that the location will be changed even if the // location is not set to null, just remain the same newTable.setTableName("new_table"); - client.alter_table(originalDatabase, originalTableName, newTable); + client.alter_table(originalDatabase, originalTableName, newTable, null); List tableNames = client.getTables(originalDatabase, originalTableName); Assert.assertEquals("Original table should be removed", 0, tableNames.size()); Assert.assertFalse("Original table directory should be removed", @@ -756,7 +756,7 @@ public void testAlterTableChangingDatabase() throws Exception { Table newTable = originalTable.deepCopy(); newTable.setDbName(OTHER_DATABASE); - client.alter_table(originalDatabase, originalTableName, newTable); + client.alter_table(originalDatabase, originalTableName, newTable, null); List tableNames = client.getTables(originalDatabase, originalTableName); Assert.assertEquals("Original table should be removed", 0, tableNames.size()); Assert.assertFalse("Original table directory should be removed", @@ -783,7 +783,7 @@ public void testAlterTableExternalTable() throws Exception { Table newTable = originalTable.deepCopy(); newTable.setTableName("new_external_table_for_test"); - client.alter_table(originalDatabase, originalTableName, newTable); + client.alter_table(originalDatabase, originalTableName, newTable, null); List tableNames = client.getTables(originalDatabase, originalTableName); Assert.assertEquals("Original table should be removed", 0, tableNames.size()); Assert.assertTrue("Original table directory should be kept", @@ -812,7 +812,7 @@ public void testAlterTableExternalTableChangeLocation() throws Exception { // Change the location, and see the results Table newTable = originalTable.deepCopy(); newTable.getSd().setLocation(newTable.getSd().getLocation() + "_modified"); - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, null); Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName()); Assert.assertTrue("Original table directory should be kept", metaStore.isPathExists(new Path(originalTable.getSd().getLocation()))); @@ -849,7 +849,7 @@ public void testAlterTableChangeCols() throws Exception { // Add a new column cols.add(new FieldSchema("new_col", "int", null)); // Store the changes - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, null); Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName()); Assert.assertTrue("Original table directory should be kept", metaStore.isPathExists(new Path(originalTable.getSd().getLocation()))); @@ -862,7 +862,7 @@ public void testAlterTableChangeCols() throws Exception { newTable.getPartitionKeys().get(0).setType("string"); newTable.getPartitionKeys().get(0).setComment("changed comment"); - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, null); alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName()); // The following data might be changed alteredTable.setParameters(newTable.getParameters()); @@ -879,7 +879,7 @@ public void testAlterTableCascade() throws Exception { cols.add(new FieldSchema("new_col_1", "int", null)); // Run without cascade - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, false); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, false, null); Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName()); Assert.assertEquals("The table data should be changed", newTable, alteredTable); @@ -892,7 +892,7 @@ public void testAlterTableCascade() throws Exception { // Run with cascade cols.add(new FieldSchema("new_col_2", "int", null)); - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, true); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, true, null); alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName()); Assert.assertEquals("The table data should be changed", newTable, alteredTable); @@ -908,7 +908,7 @@ public void testAlterTableCascade() throws Exception { EnvironmentContext context = new EnvironmentContext(); context.putToProperties(StatsSetupConst.CASCADE, "true"); client.alter_table_with_environmentContext(originalTable.getDbName(), - originalTable.getTableName(), newTable, context); + originalTable.getTableName(), newTable, context, null); alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName()); Assert.assertEquals("The table data should be changed", newTable, alteredTable); @@ -926,7 +926,7 @@ public void testAlterTableNullDatabaseInNew() throws Exception { Table newTable = originalTable.deepCopy(); newTable.setDbName(null); - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, null); } @Test @@ -936,7 +936,7 @@ public void testAlterTableNullTableNameInNew() throws Exception { newTable.setTableName(null); try { - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, null); Assert.fail("Expected exception"); } catch (MetaException | TProtocolException ex) { // Expected. @@ -948,7 +948,7 @@ public void testAlterTableInvalidTableNameInNew() throws Exception { Table originalTable = testTables[0]; Table newTable = originalTable.deepCopy(); newTable.setTableName("test_table;"); - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, null); } @Test(expected = InvalidOperationException.class) @@ -957,7 +957,7 @@ public void testAlterTableEmptyTableNameInNew() throws Exception { Table newTable = originalTable.deepCopy(); newTable.setTableName(""); - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, null); } @Test(expected = MetaException.class) @@ -966,7 +966,7 @@ public void testAlterTableNullStorageDescriptorInNew() throws Exception { Table newTable = originalTable.deepCopy(); newTable.setSd(null); - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, null); } @Test @@ -974,7 +974,7 @@ public void testAlterTableNullDatabase() throws Exception { Table originalTable = testTables[0]; Table newTable = originalTable.deepCopy(); try { - client.alter_table(null, originalTable.getTableName(), newTable); + client.alter_table(null, originalTable.getTableName(), newTable, null); Assert.fail("Expected exception"); } catch (MetaException | TProtocolException ex) { } @@ -986,7 +986,7 @@ public void testAlterTableNullTableName() throws Exception { Table newTable = originalTable.deepCopy(); try { - client.alter_table(originalTable.getDbName(), null, newTable); + client.alter_table(originalTable.getDbName(), null, newTable, null); Assert.fail("Expected exception"); } catch (MetaException | TProtocolException ex) { // Expected. @@ -997,7 +997,7 @@ public void testAlterTableNullTableName() throws Exception { public void testAlterTableNullNewTable() throws Exception { Table originalTable = testTables[0]; try { - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), null); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), null, null); // TODO: Should be checked on server side. On Embedded metastore it throws // NullPointerException, on Remote metastore it throws TTransportException Assert.fail("Expected a NullPointerException or TTransportException to be thrown"); @@ -1014,7 +1014,7 @@ public void testAlterTableInvalidStorageDescriptorNullCols() throws Exception { Table newTable = originalTable.deepCopy(); newTable.getSd().setCols(null); - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, null); } @Test(expected = MetaException.class) @@ -1023,7 +1023,7 @@ public void testAlterTableInvalidStorageDescriptorNullSerdeInfo() throws Excepti Table newTable = originalTable.deepCopy(); newTable.getSd().setSerdeInfo(null); - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, null); } @Test(expected = MetaException.class) @@ -1032,7 +1032,7 @@ public void testAlterTableInvalidStorageDescriptorNullColumnType() throws Except Table newTable = originalTable.deepCopy(); newTable.getSd().getCols().get(0).setType(null); - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, null); } @Test(expected = MetaException.class) @@ -1041,7 +1041,7 @@ public void testAlterTableInvalidStorageDescriptorNullLocation() throws Exceptio Table newTable = originalTable.deepCopy(); newTable.getSd().setLocation(null); - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, null); } @Test(expected = InvalidOperationException.class) @@ -1050,7 +1050,7 @@ public void testAlterTableInvalidStorageDescriptorInvalidColumnType() throws Exc Table newTable = originalTable.deepCopy(); newTable.getSd().getCols().get(0).setType("xyz"); - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, null); } @Test(expected = InvalidOperationException.class) @@ -1059,7 +1059,7 @@ public void testAlterTableInvalidStorageDescriptorAddPartitionColumns() throws E Table newTable = originalTable.deepCopy(); newTable.addToPartitionKeys(new FieldSchema("new_part", "int", "comment")); - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, null); } @Test(expected = InvalidOperationException.class) @@ -1068,7 +1068,7 @@ public void testAlterTableInvalidStorageDescriptorAlterPartitionColumnName() thr Table newTable = originalTable.deepCopy(); newTable.getPartitionKeys().get(0).setName("altered_name"); - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, null); } @Test(expected = InvalidOperationException.class) @@ -1076,7 +1076,7 @@ public void testAlterTableInvalidStorageDescriptorRemovePartitionColumn() throws Table originalTable = partitionedTable; Table newTable = originalTable.deepCopy(); newTable.getPartitionKeys().remove(0); - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, null); } @Test(expected = InvalidOperationException.class) @@ -1084,7 +1084,7 @@ public void testAlterTableNoSuchDatabase() throws Exception { Table originalTable = testTables[2]; Table newTable = originalTable.deepCopy(); - client.alter_table("no_such_database", originalTable.getTableName(), newTable); + client.alter_table("no_such_database", originalTable.getTableName(), newTable, null); } @Test(expected = InvalidOperationException.class) @@ -1092,7 +1092,7 @@ public void testAlterTableNoSuchTable() throws Exception { Table originalTable = testTables[2]; Table newTable = originalTable.deepCopy(); - client.alter_table(originalTable.getDbName(), "no_such_table_name", newTable); + client.alter_table(originalTable.getDbName(), "no_such_table_name", newTable, null); } @Test(expected = InvalidOperationException.class) @@ -1100,7 +1100,7 @@ public void testAlterTableNoSuchTableInThisDatabase() throws Exception { Table originalTable = testTables[2]; Table newTable = originalTable.deepCopy(); - client.alter_table(OTHER_DATABASE, originalTable.getTableName(), newTable); + client.alter_table(OTHER_DATABASE, originalTable.getTableName(), newTable, null); } @Test @@ -1111,7 +1111,7 @@ public void testAlterTableAlreadyExists() throws Exception { newTable.setTableName(testTables[2].getTableName()); try { // Already existing table - client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); + client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, null); // TODO: Maybe throw AlreadyExistsException. Assert.fail("Expected an InvalidOperationException to be thrown"); } catch (InvalidOperationException exception) { @@ -1153,7 +1153,7 @@ public void tablesInOtherCatalogs() throws TException, URISyntaxException { .setRewriteEnabled(true) .addMaterializedViewReferencedTable(dbName + "." + tableNames[0]); } - client.createTable(builder.build(metaStore.getConf())); + client.createTable(builder.build(metaStore.getConf()), null); } // Add partitions for the partitioned table @@ -1222,13 +1222,13 @@ public void tablesInOtherCatalogs() throws TException, URISyntaxException { // Test altering the table Table t = client.getTable(catName, dbName, tableNames[0]).deepCopy(); t.getParameters().put("test", "test"); - client.alter_table(catName, dbName, tableNames[0], t); + client.alter_table(catName, dbName, tableNames[0], t, null); t = client.getTable(catName, dbName, tableNames[0]).deepCopy(); Assert.assertEquals("test", t.getParameters().get("test")); // Alter a table in the wrong catalog try { - client.alter_table(DEFAULT_DATABASE_NAME, tableNames[0], t); + client.alter_table(DEFAULT_DATABASE_NAME, tableNames[0], t, null); Assert.fail(); } catch (InvalidOperationException e) { // NOP @@ -1288,7 +1288,7 @@ public void createTableInBogusCatalog() throws TException { .setTableName("doomed") .addCol("col1", ColumnType.STRING_TYPE_NAME) .addCol("col2", ColumnType.INT_TYPE_NAME) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); } @Test(expected = NoSuchObjectException.class) @@ -1318,7 +1318,7 @@ public void getMaterializedViewsInBogusCatalog() throws TException { public void alterTableBogusCatalog() throws TException { Table t = testTables[0].deepCopy(); t.getParameters().put("a", "b"); - client.alter_table("nosuch", t.getDbName(), t.getTableName(), t); + client.alter_table("nosuch", t.getDbName(), t.getTableName(), t, null); } @Test(expected = InvalidOperationException.class) @@ -1343,10 +1343,10 @@ public void moveTablesBetweenCatalogsOnAlter() throws TException { .setTableName(tableName) .addCol("col1", ColumnType.STRING_TYPE_NAME) .addCol("col2", ColumnType.INT_TYPE_NAME) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Table after = before.deepCopy(); after.setCatName(DEFAULT_CATALOG_NAME); - client.alter_table(catName, dbName, tableName, after); + client.alter_table(catName, dbName, tableName, after, null); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java index e885c0a..70ec0ec 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java @@ -88,7 +88,7 @@ public void setUp() throws Exception { .setDbName(DEFAULT_DATABASE) .setTableName("test_table") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[1] = new TableBuilder() @@ -96,14 +96,14 @@ public void setUp() throws Exception { .setTableName("test_view") .addCol("test_col", "int") .setType("VIEW") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[2] = new TableBuilder() .setDbName(DEFAULT_DATABASE) .setTableName("test_table_to_find_1") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[3] = new TableBuilder() @@ -111,14 +111,14 @@ public void setUp() throws Exception { .setTableName("test_table_to_find_2") .addCol("test_col", "int") .setType("VIEW") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[4] = new TableBuilder() .setDbName(DEFAULT_DATABASE) .setTableName("test_table_hidden_1") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); @@ -127,14 +127,14 @@ public void setUp() throws Exception { .setDbName(OTHER_DATABASE) .setTableName("test_table") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[6] = new TableBuilder() .setDbName(OTHER_DATABASE) .setTableName("test_table_to_find_3") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { @@ -486,7 +486,7 @@ public void otherCatalog() throws TException { .setTableName(tableNames[i]) .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) .addCol("col2_" + i, ColumnType.INT_TYPE_NAME) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); } Set tables = new HashSet<>(client.getTables(catName, dbName, "*e_in_other_*")); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java index 20c3af0..72a15eb 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java @@ -86,7 +86,7 @@ public void setUp() throws Exception { .setOwner("Owner1") .setLastAccessTime(1000) .addTableParam("param1", "value1") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[1] = new TableBuilder() @@ -96,7 +96,7 @@ public void setUp() throws Exception { .setOwner("Owner1") .setLastAccessTime(2000) .addTableParam("param1", "value2") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[2] = new TableBuilder() @@ -106,7 +106,7 @@ public void setUp() throws Exception { .setOwner("Owner2") .setLastAccessTime(1000) .addTableParam("param1", "value2") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[3] = new TableBuilder() @@ -116,7 +116,7 @@ public void setUp() throws Exception { .setOwner("Owner3") .setLastAccessTime(3000) .addTableParam("param1", "value2") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[4] = new TableBuilder() @@ -126,14 +126,14 @@ public void setUp() throws Exception { .setOwner("Tester") .setLastAccessTime(2500) .addTableParam("param1", "value4") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[5] = new TableBuilder() .setDbName(DEFAULT_DATABASE) .setTableName("filter_test_table_5") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); @@ -145,7 +145,7 @@ public void setUp() throws Exception { .setOwner("Owner1") .setLastAccessTime(1000) .addTableParam("param1", "value1") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { @@ -302,7 +302,7 @@ public void otherCatalogs() throws TException { .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) .addCol("col2_" + i, ColumnType.INT_TYPE_NAME); if (i == 0) builder.addTableParam("the_key", "the_value"); - builder.create(client, metaStore.getConf()); + builder.create(client, metaStore.getConf(), null); } String filter = hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS + "the_key=\"the_value\""; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java index 5842ec5..5f42557 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java @@ -103,7 +103,7 @@ public void setUp() throws Exception { .setTableName("test_table_1") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[1] = new TableBuilder() @@ -111,7 +111,7 @@ public void setUp() throws Exception { .setTableName("test_table_2") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[2] = new TableBuilder() @@ -119,7 +119,7 @@ public void setUp() throws Exception { .setTableName("test_table_3") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { @@ -232,7 +232,7 @@ public void createTableWithConstraintsPk() throws TException { .setConstraintName(constraintName) .build(metaStore.getConf()); - client.createTableWithConstraints(table, null, null, uc, null, null, null); + client.createTableWithConstraints(table, null, null, uc, null, null, null, null); UniqueConstraintsRequest rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); List fetched = client.getUniqueConstraints(rqst); Assert.assertEquals(1, fetched.size()); @@ -267,7 +267,7 @@ public void createTableWithConstraintsPkInOtherCatalog() throws TException { .addColumn("col1") .build(metaStore.getConf()); - client.createTableWithConstraints(table, null, null, uc, null, null, null); + client.createTableWithConstraints(table, null, null, uc, null, null, null, null); UniqueConstraintsRequest rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); List fetched = client.getUniqueConstraints(rqst); Assert.assertEquals(1, fetched.size()); diff --git a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java index 7cc1e42..793829b 100644 --- a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java +++ b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java @@ -227,7 +227,8 @@ boolean dropDatabase(@NotNull String dbName) throws TException { } boolean createTable(Table table) throws TException { - client.create_table(table); + // TODO =====to be reworked in HIVE-21637====== + client.create_table(table, null); return true; } @@ -241,15 +242,18 @@ Table getTable(@NotNull String dbName, @NotNull String tableName) throws TExcept } Partition createPartition(@NotNull Table table, @NotNull List values) throws TException { - return client.add_partition(new Util.PartitionBuilder(table).withValues(values).build()); + // TODO =====to be reworked in HIVE-21637====== + return client.add_partition(new Util.PartitionBuilder(table).withValues(values).build(), null); } Partition addPartition(@NotNull Partition partition) throws TException { - return client.add_partition(partition); + // TODO =====to be reworked in HIVE-21637====== + return client.add_partition(partition, null); } void addPartitions(List partitions) throws TException { - client.add_partitions(partitions); + // TODO =====to be reworked in HIVE-21637====== + client.add_partitions(partitions, null); } @@ -300,7 +304,8 @@ DropPartitionsResult dropPartitions(@NotNull String dbName, @NotNull String tabl boolean alterTable(@NotNull String dbName, @NotNull String tableName, @NotNull Table newTable) throws TException { - client.alter_table(dbName, tableName, newTable); + // TODO =====to be reworked in HIVE-21637====== + client.alter_table(dbName, tableName, newTable, null); return true; } @@ -316,7 +321,8 @@ void alterPartitions(@NotNull String dbName, @NotNull String tableName, void appendPartition(@NotNull String dbName, @NotNull String tableName, @NotNull List partitionValues) throws TException { - client.append_partition_with_environment_context(dbName, tableName, partitionValues, null); + // TODO =====to be reworked in HIVE-21637====== + client.append_partition_with_environment_context(dbName, tableName, partitionValues, null, null); } private TTransport open(Configuration conf, @NotNull URI uri) throws diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java index bc8ac0d..982f156 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java @@ -260,5 +260,18 @@ public RangeResponse isWriteIdRangeAborted(long minWriteId, long maxWriteId) { public ValidReaderWriteIdList updateHighWatermark(long value) { return new ValidReaderWriteIdList(tableName, exceptions, abortedBits, value, minOpenWriteId); } + + public void commitWriteId(long writeId) { + if (writeId > highWatermark) { + highWatermark = writeId; + } else { + int pos = Arrays.binarySearch(exceptions, writeId); + if (pos != -1) { + long[] newExceptions = new long[exceptions.length-1]; + System.arraycopy(exceptions, 0, newExceptions, 0, pos); + System.arraycopy(exceptions, pos+1, newExceptions, pos, exceptions.length-pos-1); + } + } + } } diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java index cfe01fe..acda189 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java @@ -20,6 +20,7 @@ import java.util.HashMap; import java.util.Map; +import java.util.Set; /** * An implementation to store and manage list of ValidWriteIds for each tables read by current @@ -102,4 +103,8 @@ private String writeToString() { } return buf.toString(); } + + public Set getTableNames() { + return tablesValidWriteIdList.keySet(); + } } diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java index b3d6402..93de2df 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java @@ -118,4 +118,9 @@ * @return smallest Open write Id in this set, {@code null} if there is none. */ Long getMinOpenWriteId(); + + /** + * Mark the writeId as committed + */ + void commitWriteId(long writeId); } diff --git a/storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java b/storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java index 4b3cb7d..bd972d4 100644 --- a/storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java +++ b/storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java @@ -19,62 +19,69 @@ import org.apache.hadoop.hive.common.ValidWriteIdList; -import java.util.*; - public class TxnIdUtils { /** * Check if 2 ValidWriteIdLists are at an equivalent commit point. */ public static boolean checkEquivalentWriteIds(ValidWriteIdList a, ValidWriteIdList b) { + return compare(a, b) == 0; + } + + /*** Compare the freshness of two ValidWriteIdList + * @param a + * @param b + * @return 0, if a and b are equivalent + * 1, if a is more recent + * -1, if b is more recent + ***/ + public static int compare(ValidWriteIdList a, ValidWriteIdList b) { if (!a.getTableName().equalsIgnoreCase(b.getTableName())) { - return false; + return a.getTableName().toLowerCase().compareTo(b.getTableName().toLowerCase()); } - ValidWriteIdList newer = a; - ValidWriteIdList older = b; - if (a.getHighWatermark() < b.getHighWatermark()) { - newer = b; - older = a; + // The algorithm assumes invalidWriteIds are sorted and values are less or equal than hwm, here is how + // the algorithm works: + // 1. Compare two invalidWriteIds until one the list ends, difference means the mismatch writeid is + // committed in one ValidWriteIdList but not the other, the comparison end + // 2. Every writeid from the last writeid in the short invalidWriteIds till its hwm should be committed + // in the other ValidWriteIdList, otherwise the comparison end + // 3. Every writeid from lower hwm to higher hwm should be invalid, otherwise, the comparison end + int minLen = Math.min(a.getInvalidWriteIds().length, b.getInvalidWriteIds().length); + for (int i=0;i b.getInvalidWriteIds()[i]?1:-1; } - - return checkEquivalentCommittedIds( - older.getHighWatermark(), older.getInvalidWriteIds(), - newer.getHighWatermark(), newer.getInvalidWriteIds()); - } - - /** - * Check the min open ID/highwater mark/exceptions list to see if 2 ID lists are at the same commit point. - * This can also be used for ValidTxnList as well as ValidWriteIdList. - */ - private static boolean checkEquivalentCommittedIds( - long oldHWM, long[] oldInvalidIds, - long newHWM, long[] newInvalidIds) { - - // There should be no valid txns in newer list that are not also in older. - // - All values in oldInvalidIds should also be in newInvalidIds. - // - if oldHWM < newHWM, then all IDs between oldHWM .. newHWM should exist in newInvalidTxns. - // A Gap in the sequence means a committed txn in newer list (lists are not equivalent) - - if (newInvalidIds.length < oldInvalidIds.length) { - return false; + if (a.getInvalidWriteIds().length == b.getInvalidWriteIds().length) { + return Long.signum(a.getHighWatermark() - b.getHighWatermark()); } - - // Check that the values in the older list are also in newer. Lists should already be sorted. - for (int idx = 0; idx < oldInvalidIds.length; ++idx) { - if (oldInvalidIds[idx] != newInvalidIds[idx]) { - return false; + if (a.getInvalidWriteIds().length == minLen) { + if (a.getHighWatermark() != b.getInvalidWriteIds()[minLen] -1) { + return Long.signum(a.getHighWatermark() - (b.getInvalidWriteIds()[minLen] -1)); + } + if (allInvalidFrom(b.getInvalidWriteIds(), minLen, b.getHighWatermark())) { + return 0; + } else { + return -1; + } + } else { + if (b.getHighWatermark() != a.getInvalidWriteIds()[minLen] -1) { + return Long.signum(b.getHighWatermark() - (a.getInvalidWriteIds()[minLen] -1)); + } + if (allInvalidFrom(a.getInvalidWriteIds(), minLen, a.getHighWatermark())) { + return 0; + } else { + return 1; } } - - // If older committed state is equivalent to newer state, then there should be no committed IDs - // between oldHWM and newHWM, and newInvalidIds should have exactly (newHWM - oldHWM) - // more entries than oldInvalidIds. - long oldNewListSizeDifference = newInvalidIds.length - oldInvalidIds.length; - long oldNewHWMDifference = newHWM - oldHWM; - if (oldNewHWMDifference != oldNewListSizeDifference) { - return false; + } + private static boolean allInvalidFrom(long[] invalidIds, int start, long hwm) { + for (int i=start+1;i partitionValu return new PartitionInfo(partName, partLocation, false); } - getMSC().add_partition(partition); + // TODO =====to be reworked in HIVE-21637====== + getMSC().add_partition(partition, null); if (LOG.isDebugEnabled()) { LOG.debug("Created partition {} for table {}", partName, tableObject.getFullyQualifiedName()); diff --git a/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java b/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java index a625759..2b41822 100644 --- a/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java +++ b/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java @@ -291,7 +291,8 @@ private void commitImpl(Set partitions, String key, String value) conn.getMSC().commitTxnWithKeyValue(txnToWriteId.getTxnId(), tableId, key, value); } else { - conn.getMSC().commitTxn(txnToWriteId.getTxnId()); + // TODO =====to be reworked in HIVE-21637====== + conn.getMSC().commitTxn(txnToWriteId.getTxnId(), null); } // increment the min txn id so that heartbeat thread will heartbeat only from the next open transaction. // the current transaction is going to committed or fail, so don't need heartbeat for current transaction.