diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java index 3cf172b..b4be219 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java @@ -114,7 +114,7 @@ public void testCustomPerms() throws Exception { // Lets first test for default permissions, this is the case when user specified nothing. Table tbl = getTable(dbName, tblName, typeName); - msc.createTable(tbl); + msc.createTable(tbl, null); Database db = Hive.get(hcatConf).getDatabase(dbName); Path dfsPath = clientWH.getDefaultTablePath(db, tblName); cleanupTbl(dbName, tblName, typeName); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java index fe1d8af..e340699 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java @@ -208,7 +208,7 @@ public void testHMSCBreakability() throws IOException, MetaException, LoginExcep // Break the client try { - client.createTable(tbl); + client.createTable(tbl, null); fail("Exception was expected while creating table with long name"); } catch (Exception e) { } diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java index 983a66a..ebca35a 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java @@ -220,7 +220,7 @@ public void createTable() throws Exception { tableParams.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "false"); tbl.setParameters(tableParams); - client.createTable(tbl); + client.createTable(tbl, null); } /* diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java index 8a8a326..d706ab8 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java @@ -243,7 +243,7 @@ private static void createTable(String tableName, String tablePerm) throws Excep org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.class.getName()); tbl.setPartitionKeys(ColumnHolder.partitionCols); - hmsc.createTable(tbl); + hmsc.createTable(tbl, null); Path path = new Path(warehousedir, tableName); FileSystem fs = path.getFileSystem(hiveConf); fs.setPermission(path, new FsPermission(tablePerm)); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatOutputFormat.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatOutputFormat.java index 4ac01df..4aafedf 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatOutputFormat.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatOutputFormat.java @@ -130,7 +130,7 @@ private void initTable() throws Exception { tbl.setParameters(tableParams); - client.createTable(tbl); + client.createTable(tbl, null); Path tblPath = new Path(client.getTable(dbName, tblName).getSd().getLocation()); assertTrue(tblPath.getFileSystem(hiveConf).mkdirs(new Path(tblPath, "colname=p1"))); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java index 22a0d3f..28edff4 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java @@ -261,7 +261,7 @@ private void createTable(String dbName, String tableName) throws Exception { Map tableParams = new HashMap(); tbl.setParameters(tableParams); - msc.createTable(tbl); + msc.createTable(tbl, null); } protected List getPartitionKeys() { diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java index e611394..821469f 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java @@ -206,7 +206,7 @@ public void onCreateTable(CreateTableEvent tableEvent) throws MetaException { FileIterator fileIter = MetaStoreUtils.isExternalTable(t) ? null : new FileIterator(t.getSd().getLocation()); CreateTableMessage msg = - MessageBuilder.getInstance().buildCreateTableMessage(t, fileIter); + MessageBuilder.getInstance().buildCreateTableMessage(t, fileIter, tableEvent.getValidWriteIdList()); NotificationEvent event = new NotificationEvent(0, now(), EventType.CREATE_TABLE.toString(), msgEncoder.getSerializer().serialize(msg)); @@ -571,7 +571,7 @@ public void onOpenTxn(OpenTxnEvent openTxnEvent, Connection dbConn, SQLGenerator public void onCommitTxn(CommitTxnEvent commitTxnEvent, Connection dbConn, SQLGenerator sqlGenerator) throws MetaException { CommitTxnMessage msg = - MessageBuilder.getInstance().buildCommitTxnMessage(commitTxnEvent.getTxnId()); + MessageBuilder.getInstance().buildCommitTxnMessage(commitTxnEvent.getTxnId(), commitTxnEvent.getTxnWriteIds()); NotificationEvent event = new NotificationEvent(0, now(), EventType.COMMIT_TXN.toString(), diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java index 66a1737..867b640 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java @@ -898,7 +898,8 @@ public Void run() throws StreamingException { private void commitImpl() throws TransactionError, StreamingException { try { recordWriter.flush(); - msClient.commitTxn(txnToWriteIds.get(currentTxnIndex).getTxnId()); + // TODO =====to be reworked in HIVE-21637====== + msClient.commitTxn(txnToWriteIds.get(currentTxnIndex).getTxnId(), null); state = TxnState.COMMITTED; txnStatus[currentTxnIndex] = TxnState.COMMITTED; } catch (NoSuchTxnException e) { diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/Transaction.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/Transaction.java index e1c6735..6ddffdba 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/Transaction.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/Transaction.java @@ -83,7 +83,8 @@ public void commit() throws TransactionException { throw new TransactionException("Unable to release lock: " + lock + " for transaction: " + transactionId, e); } try { - metaStoreClient.commitTxn(transactionId); + // TODO =====to be reworked in HIVE-21637====== + metaStoreClient.commitTxn(transactionId, null); state = TxnState.COMMITTED; } catch (NoSuchTxnException e) { throw new TransactionException("Invalid transaction id: " + transactionId, e); diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingTestUtils.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingTestUtils.java index afda7d5..e459b14 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingTestUtils.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingTestUtils.java @@ -264,7 +264,7 @@ private Table internalCreate(IMetaStoreClient metaStoreClient) throws Exception table.setPartitionKeys(partitionFields); } if (metaStoreClient != null) { - metaStoreClient.createTable(table); + metaStoreClient.createTable(table, null); } for (List partitionValues : partitions) { diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestTransaction.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestTransaction.java index c47cf4d..51f94ea 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestTransaction.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestTransaction.java @@ -84,7 +84,7 @@ public void testCommit() throws Exception { transaction.commit(); verify(mockLock).release(); - verify(mockMetaStoreClient).commitTxn(TRANSACTION_ID); + verify(mockMetaStoreClient).commitTxn(TRANSACTION_ID, null); assertThat(transaction.getState(), is(TransactionBatch.TxnState.COMMITTED)); } diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java index a06191d..f734e07 100644 --- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java +++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java @@ -209,7 +209,8 @@ public HCatTable getTable(String dbName, String tableName) public void createTable(HCatCreateTableDesc createTableDesc) throws HCatException { try { - hmsClient.createTable(createTableDesc.getHCatTable().toHiveTable()); + // TODO =====to be reworked in HIVE-21637====== + hmsClient.createTable(createTableDesc.getHCatTable().toHiveTable(), null); } catch (AlreadyExistsException e) { if (!createTableDesc.getIfNotExists()) { throw new HCatException( @@ -286,7 +287,8 @@ public void createTableLike(String dbName, String existingTblName, newTableName, ifNotExists, location); if (hiveTable != null) { try { - hmsClient.createTable(hiveTable); + // TODO =====to be reworked in HIVE-21637====== + hmsClient.createTable(hiveTable, null); } catch (AlreadyExistsException e) { if (!ifNotExists) { throw new HCatException( diff --git a/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java index 93e9d48..6aab262 100644 --- a/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java @@ -56,8 +56,8 @@ public synchronized long openTxn(String user) throws TException { return client.openTxn(user); } - public synchronized void commitTxn(long txnid) throws TException { - client.commitTxn(txnid); + public synchronized void commitTxn(long txnid, String writeIds) throws TException { + client.commitTxn(txnid, writeIds); } public synchronized void rollbackTxn(long txnid) throws TException { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 4f14fa5..c256f7c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -1729,10 +1729,6 @@ public void releaseLocksAndCommitOrRollback(boolean commit, HiveTxnManager txnMa } else { txnMgr = txnManager; } - // If we've opened a transaction we need to commit or rollback rather than explicitly - // releasing the locks. - conf.unset(ValidTxnList.VALID_TXNS_KEY); - conf.unset(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); if(!checkConcurrency()) { return; } @@ -1759,6 +1755,11 @@ public void releaseLocksAndCommitOrRollback(boolean commit, HiveTxnManager txnMa ctx.setHiveLocks(null); } + // If we've opened a transaction we need to commit or rollback rather than explicitly + // releasing the locks. + conf.unset(ValidTxnList.VALID_TXNS_KEY); + conf.unset(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.RELEASE_LOCKS); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java index d556d55..d84d5ff 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLOperation.java @@ -18,6 +18,17 @@ package org.apache.hadoop.hive.ql.ddl; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; +import org.apache.hadoop.hive.ql.lockmgr.LockException; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.session.SessionState; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,4 +45,29 @@ public DDLOperation(DDLOperationContext context) { } public abstract int execute() throws Exception; + + protected ValidWriteIdList advanceWriteId(Table tbl) throws LockException { + HiveTxnManager txnMgr = SessionState.get().getTxnMgr(); + long writeId = txnMgr.getTableWriteId(tbl.getDbName(), tbl.getTableName()); + List txnTables = new ArrayList<>(); + String fullTableName = TableName.getDbTable(tbl.getDbName(), tbl.getTableName()); + txnTables.add(fullTableName); + ValidTxnWriteIdList txnWriteIds; + if (context.getConf().get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY) != null) { + txnWriteIds = new ValidTxnWriteIdList(context.getConf().get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY)); + } else { + String txnString; + if (context.getConf().get(ValidTxnList.VALID_TXNS_KEY) != null) { + txnString = context.getConf().get(ValidTxnList.VALID_TXNS_KEY); + } else { + ValidTxnList txnIds = txnMgr.getValidTxns(); + txnString = txnIds.toString(); + } + txnWriteIds = txnMgr.getValidWriteIds(txnTables, txnString); + } + ValidWriteIdList writeIds = txnWriteIds.getTableValidWriteIdList(fullTableName); + writeIds.commitWriteId(writeId); + context.getConf().set(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY, txnWriteIds.toString()); + return writeIds; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeOperation.java index 7cec1e3..1a7e288 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableLikeOperation.java @@ -79,7 +79,8 @@ public int execute() throws HiveException { } // create the table - context.getDb().createTable(tbl, desc.getIfNotExists()); + // TODO =====to be reworked in HIVE-21637====== + context.getDb().createTable(tbl, desc.getIfNotExists(), null); DDLUtils.addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK), context); return 0; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java index 7da3d26..30cd770 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateTableOperation.java @@ -21,6 +21,7 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.TableType; @@ -79,11 +80,13 @@ public int execute() throws HiveException { } } + ValidWriteIdList writeIds = advanceWriteId(tbl); + // create the table if (desc.getReplaceMode()) { createTableReplaceMode(tbl, replDataLocationChanged); } else { - createTableNonReplaceMode(tbl); + createTableNonReplaceMode(tbl, writeIds); } DDLUtils.addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK), context); @@ -122,7 +125,7 @@ private void createTableReplaceMode(Table tbl, boolean replDataLocationChanged) true, writeId); } - private void createTableNonReplaceMode(Table tbl) throws HiveException { + private void createTableNonReplaceMode(Table tbl, ValidWriteIdList writeIds) throws HiveException { if (CollectionUtils.isNotEmpty(desc.getPrimaryKeys()) || CollectionUtils.isNotEmpty(desc.getForeignKeys()) || CollectionUtils.isNotEmpty(desc.getUniqueConstraints()) || @@ -131,9 +134,9 @@ private void createTableNonReplaceMode(Table tbl) throws HiveException { CollectionUtils.isNotEmpty(desc.getCheckConstraints())) { context.getDb().createTable(tbl, desc.getIfNotExists(), desc.getPrimaryKeys(), desc.getForeignKeys(), desc.getUniqueConstraints(), desc.getNotNullConstraints(), desc.getDefaultConstraints(), - desc.getCheckConstraints()); + desc.getCheckConstraints(), writeIds); } else { - context.getDb().createTable(tbl, desc.getIfNotExists()); + context.getDb().createTable(tbl, desc.getIfNotExists(), writeIds); } if (desc.isCTAS()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateViewOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateViewOperation.java index bb9b189..2b92b2c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateViewOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/CreateViewOperation.java @@ -104,7 +104,8 @@ public int execute() throws HiveException { cm.setValidTxnList(context.getConf().get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY)); tbl.getTTable().setCreationMetadata(cm); } - context.getDb().createTable(tbl, desc.getIfNotExists()); + // TODO =====to be reworked in HIVE-21637====== + context.getDb().createTable(tbl, desc.getIfNotExists(), null); DDLUtils.addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK), context.getWork().getOutputs()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java index 1d94ff3..ec37ce1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/DescTableOperation.java @@ -85,7 +85,8 @@ public int execute() throws Exception { String tableName = desc.getTableName(); // describe the table - populate the output stream - Table tbl = context.getDb().getTable(tableName, false); + String[] names = Utilities.getDbTableName(tableName); + Table tbl = context.getDb().getTable(names[0], names[1], true, true, false); if (tbl == null) { throw new HiveException(ErrorMsg.INVALID_TABLE, tableName); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index 800d80a..acc7f2b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -492,7 +492,8 @@ public void commitTxn() throws LockException { // do all new clear in clearLocksAndHB method to make sure that same code is there for replCommitTxn flow. clearLocksAndHB(); LOG.debug("Committing txn " + JavaUtils.txnIdToString(txnId)); - getMS().commitTxn(txnId); + String txnWriteIdString = conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY); + getMS().commitTxn(txnId, txnWriteIdString); } catch (NoSuchTxnException e) { LOG.error("Metastore could not find " + JavaUtils.txnIdToString(txnId)); throw new LockException(e, ErrorMsg.TXN_NO_SUCH_TRANSACTION, JavaUtils.txnIdToString(txnId)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 40c6cf6..4506b3c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -554,9 +554,9 @@ public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownD */ public void createTable(String tableName, List columns, List partCols, Class fileInputFormat, - Class fileOutputFormat) throws HiveException { + Class fileOutputFormat, ValidWriteIdList writeIds) throws HiveException { this.createTable(tableName, columns, partCols, fileInputFormat, - fileOutputFormat, -1, null); + fileOutputFormat, -1, null, writeIds); } /** @@ -581,10 +581,10 @@ public void createTable(String tableName, List columns, */ public void createTable(String tableName, List columns, List partCols, Class fileInputFormat, - Class fileOutputFormat, int bucketCount, List bucketCols) + Class fileOutputFormat, int bucketCount, List bucketCols, ValidWriteIdList writeIds) throws HiveException { createTable(tableName, columns, partCols, fileInputFormat, fileOutputFormat, bucketCount, - bucketCols, null); + bucketCols, null, writeIds); } /** @@ -603,7 +603,7 @@ public void createTable(String tableName, List columns, public void createTable(String tableName, List columns, List partCols, Class fileInputFormat, Class fileOutputFormat, int bucketCount, List bucketCols, - Map parameters) throws HiveException { + Map parameters, ValidWriteIdList writeIds) throws HiveException { if (columns == null) { throw new HiveException("columns not specified for table " + tableName); } @@ -631,7 +631,7 @@ public void createTable(String tableName, List columns, List par if (parameters != null) { tbl.setParameters(parameters); } - createTable(tbl); + createTable(tbl, writeIds); } @@ -957,8 +957,8 @@ public void alterDatabase(String dbName, Database db) * a table object * @throws HiveException */ - public void createTable(Table tbl) throws HiveException { - createTable(tbl, false); + public void createTable(Table tbl, ValidWriteIdList txnWriteIds) throws HiveException { + createTable(tbl, false, txnWriteIds); } // TODO: from here down dozens of methods do not support catalog. I got tired marking them. @@ -991,7 +991,8 @@ public void createTable(Table tbl, boolean ifNotExists, List uniqueConstraints, List notNullConstraints, List defaultConstraints, - List checkConstraints) + List checkConstraints, + ValidWriteIdList writeIds) throws HiveException { try { if (tbl.getDbName() == null || "".equals(tbl.getDbName().trim())) { @@ -1030,7 +1031,7 @@ public void createTable(Table tbl, boolean ifNotExists, if (primaryKeys == null && foreignKeys == null && uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null && checkConstraints == null) { - getMSC().createTable(tTbl); + getMSC().createTable(tTbl, writeIds.toString()); } else { getMSC().createTableWithConstraints(tTbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); @@ -1045,9 +1046,9 @@ public void createTable(Table tbl, boolean ifNotExists, } } - public void createTable(Table tbl, boolean ifNotExists) throws HiveException { + public void createTable(Table tbl, boolean ifNotExists, ValidWriteIdList writeIds) throws HiveException { createTable(tbl, ifNotExists, null, null, null, null, - null, null); + null, null, writeIds); } public static List getFieldsFromDeserializerForMsStorage( diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index 410868c..5166df1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -111,7 +111,7 @@ private Warehouse getWh() throws MetaException { @Override protected void create_table_with_environment_context( - org.apache.hadoop.hive.metastore.api.Table tbl, EnvironmentContext envContext) + org.apache.hadoop.hive.metastore.api.Table tbl, EnvironmentContext envContext, String validWriteIdList) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { @@ -120,7 +120,7 @@ protected void create_table_with_environment_context( return; } // non-temp tables should use underlying client. - super.create_table_with_environment_context(tbl, envContext); + super.create_table_with_environment_context(tbl, envContext, validWriteIdList); } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/MergeSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/MergeSemanticAnalyzer.java index db6d551..7db181e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/MergeSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/MergeSemanticAnalyzer.java @@ -384,7 +384,8 @@ private boolean handleCardinalityViolation(StringBuilder rewrittenQueryStr, ASTN table.setStoredAsSubDirectories(false); table.setInputFormatClass(format.getInputFormat()); table.setOutputFormatClass(format.getOutputFormat()); - db.createTable(table, true); + // TODO =====to be reworked in HIVE-21637====== + db.createTable(table, true, null); } } catch(HiveException|MetaException e) { throw new SemanticException(e.getMessage(), e); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index 4fccfff..5c5dc94 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -213,7 +213,8 @@ public Object run() throws Exception { } heartbeater.cancel(); msc.markCompacted(CompactionInfo.compactionInfoToStruct(ci)); - msc.commitTxn(compactorTxnId); + // TODO =====to be reworked in HIVE-21637====== + msc.commitTxn(compactorTxnId, null); if (conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST)) { mrJob = mr.getMrJob(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java index 80025b7..3baa46b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java @@ -1058,7 +1058,8 @@ private void closeTxnCtx(TxnCtx txnCtx, IMetaStoreClient msc, boolean isOk) if (txnCtx == null) return; try { if (isOk) { - msc.commitTxn(txnCtx.txnId); + // TODO =====to be reworked in HIVE-21637====== + msc.commitTxn(txnCtx.txnId, null); } else { msc.abortTxns(Lists.newArrayList(txnCtx.txnId)); } diff --git a/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java b/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java index 1becbb8..3a09e82 100644 --- a/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java +++ b/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java @@ -120,7 +120,7 @@ public void testPartitionExpr() throws Exception { addSd(cols, tbl); tbl.setPartitionKeys(partCols); - client.createTable(tbl); + client.createTable(tbl, null); tbl = client.getTable(dbName, tblName); addPartition(client, tbl, Lists.newArrayList("p11", "32"), "part1"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java index 78f2585..cf10ce1 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java @@ -140,7 +140,7 @@ for (String src : srctables) { db.dropTable(Warehouse.DEFAULT_DATABASE_NAME, src, true, true); db.createTable(src, cols, null, TextInputFormat.class, - HiveIgnoreKeyTextOutputFormat.class); + HiveIgnoreKeyTextOutputFormat.class, null); db.loadTable(hadoopDataFile[i], src, LoadFileType.KEEP_EXISTING, true, false, false, true, null, 0, false); i++; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java index 3e45016..9076ded 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java @@ -109,7 +109,7 @@ private Table createPartitionedTable(String catName, String dbName, String table sd.setCols(Arrays.asList(col1, col2)); table.setPartitionKeys(Arrays.asList(col3)); table.setSd(sd); - db.createTable(table); + db.createTable(table, null); return db.getTable(catName, dbName, tableName); } catch (Exception exception) { fail("Unable to drop and create table " + StatsUtils.getFullyQualifiedTableName(dbName, tableName) + " because " diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java index 1ec4636..ffcf264 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java @@ -106,7 +106,7 @@ private Table createPartitionedTable(String catName, String dbName, String table sd.setCols(Arrays.asList(col1, col2)); table.setPartitionKeys(Arrays.asList(col3)); table.setSd(sd); - db.createTable(table); + db.createTable(table, null); return db.getTable(catName, dbName, tableName); } catch (Exception exception) { fail("Unable to drop and create table " + StatsUtils diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index 8d55fec..44a5fc2 100755 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -182,7 +182,7 @@ public void testTable() throws Throwable { // create table setNullCreateTableGrants(); try { - hm.createTable(tbl); + hm.createTable(tbl, null); } catch (HiveException e) { e.printStackTrace(); assertTrue("Unable to create table: " + tableName, false); @@ -242,7 +242,7 @@ public void testThriftTable() throws Throwable { setNullCreateTableGrants(); try { - hm.createTable(tbl); + hm.createTable(tbl, null); } catch (HiveException e) { System.err.println(StringUtils.stringifyException(e)); assertTrue("Unable to create table: " + tableName, false); @@ -394,10 +394,10 @@ public void testGetAndDropTables() throws Throwable { ts.add(table1Name); ts.add("table2"); Table tbl1 = createTestTable(dbName, ts.get(0)); - hm.createTable(tbl1); + hm.createTable(tbl1, null); Table tbl2 = createTestTable(dbName, ts.get(1)); - hm.createTable(tbl2); + hm.createTable(tbl2, null); List fts = hm.getTablesForDb(dbName, ".*"); assertEquals(ts, fts); @@ -495,9 +495,9 @@ public void testDropTableTrash() throws Throwable { ts.add(tableBaseName + "1"); ts.add(tableBaseName + "2"); Table tbl1 = createTestTable(dbName, ts.get(0)); - hm.createTable(tbl1); + hm.createTable(tbl1, null); Table tbl2 = createTestTable(dbName, ts.get(1)); - hm.createTable(tbl2); + hm.createTable(tbl2, null); // test dropping tables and trash behavior Table table1 = hm.getTable(dbName, ts.get(0)); assertNotNull(table1); @@ -565,7 +565,7 @@ private Table createPartitionedTable(String dbName, String tableName) throws Exc Arrays.asList("key", "value"), // Data columns. Arrays.asList("ds", "hr"), // Partition columns. TextInputFormat.class, - HiveIgnoreKeyTextOutputFormat.class); + HiveIgnoreKeyTextOutputFormat.class, null); return hm.getTable(dbName, tableName); } catch (Exception exception) { @@ -722,7 +722,7 @@ public void testPartition() throws Throwable { part_cols.add("hr"); try { hm.createTable(tableName, cols, part_cols, TextInputFormat.class, - HiveIgnoreKeyTextOutputFormat.class); + HiveIgnoreKeyTextOutputFormat.class, null); } catch (HiveException e) { System.err.println(StringUtils.stringifyException(e)); assertTrue("Unable to create table: " + tableName, false); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java index 520eb1b..e1244f3 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java @@ -142,7 +142,7 @@ public void testTableCheck() throws HiveException, IOException, TException, Meta table.setInputFormatClass(TextInputFormat.class); table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class); - hive.createTable(table); + hive.createTable(table, null); Assert.assertTrue(table.getTTable().isSetId()); table.getTTable().unsetId(); // now we've got a table, check that it works @@ -194,7 +194,7 @@ public void testTableCheck() throws HiveException, IOException, TException, Meta // create a new external table hive.dropTable(dbName, tableName); table.setProperty("EXTERNAL", "TRUE"); - hive.createTable(table); + hive.createTable(table, null); // should return all ok result = new CheckResult(); @@ -284,7 +284,7 @@ private Table createTestTable() throws HiveException, AlreadyExistsException { table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class); table.setPartCols(partCols); - hive.createTable(table); + hive.createTable(table, null); table = hive.getTable(dbName, tableName); Assert.assertTrue(table.getTTable().isSetId()); table.getTTable().unsetId(); @@ -345,7 +345,7 @@ public void testPartitionsCheck() throws HiveException, // cleanup hive.dropTable(dbName, tableName, true, true); - hive.createTable(table); + hive.createTable(table, null); result = new CheckResult(); checker.checkMetastore(catName, dbName, null, null, result); assertEquals(Collections.emptySet(), result.getTablesNotInMs()); @@ -369,7 +369,7 @@ public void testDataDeletion() throws HiveException, table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class); table.setPartCols(partCols); - hive.createTable(table); + hive.createTable(table, null); table = hive.getTable(dbName, tableName); Path fakeTable = table.getPath().getParent().suffix( @@ -618,7 +618,7 @@ private Table createPartitionedTestTable(String dbName, String tableName, int nu } table.setPartCols(partKeys); // create table - hive.createTable(table, true); + hive.createTable(table, true, null); table = hive.getTable(dbName, tableName); if (valuesPerPartition == 0) { return table; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java index 90df85c..9c61c3e 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java @@ -62,7 +62,7 @@ public static void init() throws Exception { cols.add("a"); List partCols = new ArrayList(); partCols.add("ds"); - h.createTable("foo", cols, partCols, OrcInputFormat.class, OrcOutputFormat.class); + h.createTable("foo", cols, partCols, OrcInputFormat.class, OrcOutputFormat.class, null); Table t = h.getTable("foo"); Map partSpec = new HashMap(); partSpec.put("ds", "today"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java index 9b6827e..3c64842 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java @@ -271,9 +271,9 @@ private ReturnInfo parseAndAnalyze(String query, String testName) Map params = new HashMap(1); params.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true"); db.createTable("T", Arrays.asList("a", "b"), null, OrcInputFormat.class, - OrcOutputFormat.class, 2, Arrays.asList("a"), params); + OrcOutputFormat.class, 2, Arrays.asList("a"), params, null); db.createTable("U", Arrays.asList("a", "b"), Arrays.asList("ds"), OrcInputFormat.class, - OrcOutputFormat.class, 2, Arrays.asList("a"), params); + OrcOutputFormat.class, 2, Arrays.asList("a"), params, null); Table u = db.getTable("U"); Map partVals = new HashMap(2); partVals.put("ds", "yesterday"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java b/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java index a2f8bab..d920d64 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java @@ -211,7 +211,7 @@ public void testTxnTable() throws Exception { currentWriteIds = msClient.getValidWriteIds(fqName).toString(); verifyStatsUpToDate(tblName, cols, msClient, currentWriteIds, false); - msClient.commitTxn(badTxnId); + msClient.commitTxn(badTxnId, null); // Analyze should be able to override stats of an committed txn. assertTrue(su.runOneIteration()); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java index cfd7290..d298a95 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java @@ -174,7 +174,7 @@ protected Table newTable(String dbName, String tableName, boolean partitioned, // drop the table first, in case some previous test created it ms.dropTable(dbName, tableName); - ms.createTable(table); + ms.createTable(table, null); return table; } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java index bbefc3d..7fd96ff 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java @@ -43,6 +43,7 @@ private static final org.apache.thrift.protocol.TField WRITE_EVENT_INFOS_FIELD_DESC = new org.apache.thrift.protocol.TField("writeEventInfos", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField KEY_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("keyValue", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final org.apache.thrift.protocol.TField REPL_LAST_ID_INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("replLastIdInfo", org.apache.thrift.protocol.TType.STRUCT, (short)5); + private static final org.apache.thrift.protocol.TField TXN_WRITE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("txnWriteIds", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private List writeEventInfos; // optional private CommitTxnKeyValue keyValue; // optional private ReplLastIdInfo replLastIdInfo; // optional + private String txnWriteIds; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -62,7 +64,8 @@ REPL_POLICY((short)2, "replPolicy"), WRITE_EVENT_INFOS((short)3, "writeEventInfos"), KEY_VALUE((short)4, "keyValue"), - REPL_LAST_ID_INFO((short)5, "replLastIdInfo"); + REPL_LAST_ID_INFO((short)5, "replLastIdInfo"), + TXN_WRITE_IDS((short)6, "txnWriteIds"); private static final Map byName = new HashMap(); @@ -87,6 +90,8 @@ public static _Fields findByThriftId(int fieldId) { return KEY_VALUE; case 5: // REPL_LAST_ID_INFO return REPL_LAST_ID_INFO; + case 6: // TXN_WRITE_IDS + return TXN_WRITE_IDS; default: return null; } @@ -129,7 +134,7 @@ public String getFieldName() { // isset id assignments private static final int __TXNID_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.REPL_POLICY,_Fields.WRITE_EVENT_INFOS,_Fields.KEY_VALUE,_Fields.REPL_LAST_ID_INFO}; + private static final _Fields optionals[] = {_Fields.REPL_POLICY,_Fields.WRITE_EVENT_INFOS,_Fields.KEY_VALUE,_Fields.REPL_LAST_ID_INFO,_Fields.TXN_WRITE_IDS}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -144,6 +149,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CommitTxnKeyValue.class))); tmpMap.put(_Fields.REPL_LAST_ID_INFO, new org.apache.thrift.meta_data.FieldMetaData("replLastIdInfo", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ReplLastIdInfo.class))); + tmpMap.put(_Fields.TXN_WRITE_IDS, new org.apache.thrift.meta_data.FieldMetaData("txnWriteIds", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CommitTxnRequest.class, metaDataMap); } @@ -181,6 +188,9 @@ public CommitTxnRequest(CommitTxnRequest other) { if (other.isSetReplLastIdInfo()) { this.replLastIdInfo = new ReplLastIdInfo(other.replLastIdInfo); } + if (other.isSetTxnWriteIds()) { + this.txnWriteIds = other.txnWriteIds; + } } public CommitTxnRequest deepCopy() { @@ -195,6 +205,7 @@ public void clear() { this.writeEventInfos = null; this.keyValue = null; this.replLastIdInfo = null; + this.txnWriteIds = null; } public long getTxnid() { @@ -326,6 +337,29 @@ public void setReplLastIdInfoIsSet(boolean value) { } } + public String getTxnWriteIds() { + return this.txnWriteIds; + } + + public void setTxnWriteIds(String txnWriteIds) { + this.txnWriteIds = txnWriteIds; + } + + public void unsetTxnWriteIds() { + this.txnWriteIds = null; + } + + /** Returns true if field txnWriteIds is set (has been assigned a value) and false otherwise */ + public boolean isSetTxnWriteIds() { + return this.txnWriteIds != null; + } + + public void setTxnWriteIdsIsSet(boolean value) { + if (!value) { + this.txnWriteIds = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TXNID: @@ -368,6 +402,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TXN_WRITE_IDS: + if (value == null) { + unsetTxnWriteIds(); + } else { + setTxnWriteIds((String)value); + } + break; + } } @@ -388,6 +430,9 @@ public Object getFieldValue(_Fields field) { case REPL_LAST_ID_INFO: return getReplLastIdInfo(); + case TXN_WRITE_IDS: + return getTxnWriteIds(); + } throw new IllegalStateException(); } @@ -409,6 +454,8 @@ public boolean isSet(_Fields field) { return isSetKeyValue(); case REPL_LAST_ID_INFO: return isSetReplLastIdInfo(); + case TXN_WRITE_IDS: + return isSetTxnWriteIds(); } throw new IllegalStateException(); } @@ -471,6 +518,15 @@ public boolean equals(CommitTxnRequest that) { return false; } + boolean this_present_txnWriteIds = true && this.isSetTxnWriteIds(); + boolean that_present_txnWriteIds = true && that.isSetTxnWriteIds(); + if (this_present_txnWriteIds || that_present_txnWriteIds) { + if (!(this_present_txnWriteIds && that_present_txnWriteIds)) + return false; + if (!this.txnWriteIds.equals(that.txnWriteIds)) + return false; + } + return true; } @@ -503,6 +559,11 @@ public int hashCode() { if (present_replLastIdInfo) list.add(replLastIdInfo); + boolean present_txnWriteIds = true && (isSetTxnWriteIds()); + list.add(present_txnWriteIds); + if (present_txnWriteIds) + list.add(txnWriteIds); + return list.hashCode(); } @@ -564,6 +625,16 @@ public int compareTo(CommitTxnRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTxnWriteIds()).compareTo(other.isSetTxnWriteIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxnWriteIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnWriteIds, other.txnWriteIds); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -627,6 +698,16 @@ public String toString() { } first = false; } + if (isSetTxnWriteIds()) { + if (!first) sb.append(", "); + sb.append("txnWriteIds:"); + if (this.txnWriteIds == null) { + sb.append("null"); + } else { + sb.append(this.txnWriteIds); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -735,6 +816,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CommitTxnRequest st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // TXN_WRITE_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.txnWriteIds = iprot.readString(); + struct.setTxnWriteIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -786,6 +875,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CommitTxnRequest s oprot.writeFieldEnd(); } } + if (struct.txnWriteIds != null) { + if (struct.isSetTxnWriteIds()) { + oprot.writeFieldBegin(TXN_WRITE_IDS_FIELD_DESC); + oprot.writeString(struct.txnWriteIds); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -817,7 +913,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CommitTxnRequest st if (struct.isSetReplLastIdInfo()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetTxnWriteIds()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetReplPolicy()) { oprot.writeString(struct.replPolicy); } @@ -836,6 +935,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CommitTxnRequest st if (struct.isSetReplLastIdInfo()) { struct.replLastIdInfo.write(oprot); } + if (struct.isSetTxnWriteIds()) { + oprot.writeString(struct.txnWriteIds); + } } @Override @@ -843,7 +945,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CommitTxnRequest str TTupleProtocol iprot = (TTupleProtocol) prot; struct.txnid = iprot.readI64(); struct.setTxnidIsSet(true); - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.replPolicy = iprot.readString(); struct.setReplPolicyIsSet(true); @@ -872,6 +974,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CommitTxnRequest str struct.replLastIdInfo.read(iprot); struct.setReplLastIdInfoIsSet(true); } + if (incoming.get(4)) { + struct.txnWriteIds = iprot.readString(); + struct.setTxnWriteIdsIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index e0431e5..1d0e690 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -86,7 +86,7 @@ public void create_table(Table tbl) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException; - public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException; + public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, String validWriteIdList) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException; public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException; @@ -536,7 +536,7 @@ public void create_table(Table tbl, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -1600,17 +1600,18 @@ public void recv_create_table() throws AlreadyExistsException, InvalidObjectExce return; } - public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException + public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, String validWriteIdList) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_create_table_with_environment_context(tbl, environment_context); + send_create_table_with_environment_context(tbl, environment_context, validWriteIdList); recv_create_table_with_environment_context(); } - public void send_create_table_with_environment_context(Table tbl, EnvironmentContext environment_context) throws org.apache.thrift.TException + public void send_create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, String validWriteIdList) throws org.apache.thrift.TException { create_table_with_environment_context_args args = new create_table_with_environment_context_args(); args.setTbl(tbl); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); sendBase("create_table_with_environment_context", args); } @@ -8069,9 +8070,9 @@ public void getResult() throws AlreadyExistsException, InvalidObjectException, M } } - public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - create_table_with_environment_context_call method_call = new create_table_with_environment_context_call(tbl, environment_context, resultHandler, this, ___protocolFactory, ___transport); + create_table_with_environment_context_call method_call = new create_table_with_environment_context_call(tbl, environment_context, validWriteIdList, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -8079,10 +8080,12 @@ public void create_table_with_environment_context(Table tbl, EnvironmentContext @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_table_with_environment_context_call extends org.apache.thrift.async.TAsyncMethodCall { private Table tbl; private EnvironmentContext environment_context; - public create_table_with_environment_context_call(Table tbl, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private String validWriteIdList; + public create_table_with_environment_context_call(Table tbl, EnvironmentContext environment_context, String validWriteIdList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.tbl = tbl; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -8090,6 +8093,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa create_table_with_environment_context_args args = new create_table_with_environment_context_args(); args.setTbl(tbl); args.setEnvironment_context(environment_context); + args.setValidWriteIdList(validWriteIdList); args.write(prot); prot.writeMessageEnd(); } @@ -15816,7 +15820,7 @@ protected boolean isOneway() { public create_table_with_environment_context_result getResult(I iface, create_table_with_environment_context_args args) throws org.apache.thrift.TException { create_table_with_environment_context_result result = new create_table_with_environment_context_result(); try { - iface.create_table_with_environment_context(args.tbl, args.environment_context); + iface.create_table_with_environment_context(args.tbl, args.environment_context, args.validWriteIdList); } catch (AlreadyExistsException o1) { result.o1 = o1; } catch (InvalidObjectException o2) { @@ -22606,7 +22610,7 @@ protected boolean isOneway() { } public void start(I iface, create_table_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.create_table_with_environment_context(args.tbl, args.environment_context,resultHandler); + iface.create_table_with_environment_context(args.tbl, args.environment_context, args.validWriteIdList,resultHandler); } } @@ -56239,6 +56243,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_result private static final org.apache.thrift.protocol.TField TBL_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environment_context", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -56248,11 +56253,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_result private Table tbl; // required private EnvironmentContext environment_context; // required + private String validWriteIdList; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { TBL((short)1, "tbl"), - ENVIRONMENT_CONTEXT((short)2, "environment_context"); + ENVIRONMENT_CONTEXT((short)2, "environment_context"), + VALID_WRITE_ID_LIST((short)3, "validWriteIdList"); private static final Map byName = new HashMap(); @@ -56271,6 +56278,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL; case 2: // ENVIRONMENT_CONTEXT return ENVIRONMENT_CONTEXT; + case 3: // VALID_WRITE_ID_LIST + return VALID_WRITE_ID_LIST; default: return null; } @@ -56318,6 +56327,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class))); tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environment_context", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); + tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_table_with_environment_context_args.class, metaDataMap); } @@ -56327,11 +56338,13 @@ public create_table_with_environment_context_args() { public create_table_with_environment_context_args( Table tbl, - EnvironmentContext environment_context) + EnvironmentContext environment_context, + String validWriteIdList) { this(); this.tbl = tbl; this.environment_context = environment_context; + this.validWriteIdList = validWriteIdList; } /** @@ -56344,6 +56357,9 @@ public create_table_with_environment_context_args(create_table_with_environment_ if (other.isSetEnvironment_context()) { this.environment_context = new EnvironmentContext(other.environment_context); } + if (other.isSetValidWriteIdList()) { + this.validWriteIdList = other.validWriteIdList; + } } public create_table_with_environment_context_args deepCopy() { @@ -56354,6 +56370,7 @@ public create_table_with_environment_context_args deepCopy() { public void clear() { this.tbl = null; this.environment_context = null; + this.validWriteIdList = null; } public Table getTbl() { @@ -56402,6 +56419,29 @@ public void setEnvironment_contextIsSet(boolean value) { } } + public String getValidWriteIdList() { + return this.validWriteIdList; + } + + public void setValidWriteIdList(String validWriteIdList) { + this.validWriteIdList = validWriteIdList; + } + + public void unsetValidWriteIdList() { + this.validWriteIdList = null; + } + + /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */ + public boolean isSetValidWriteIdList() { + return this.validWriteIdList != null; + } + + public void setValidWriteIdListIsSet(boolean value) { + if (!value) { + this.validWriteIdList = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TBL: @@ -56420,6 +56460,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case VALID_WRITE_ID_LIST: + if (value == null) { + unsetValidWriteIdList(); + } else { + setValidWriteIdList((String)value); + } + break; + } } @@ -56431,6 +56479,9 @@ public Object getFieldValue(_Fields field) { case ENVIRONMENT_CONTEXT: return getEnvironment_context(); + case VALID_WRITE_ID_LIST: + return getValidWriteIdList(); + } throw new IllegalStateException(); } @@ -56446,6 +56497,8 @@ public boolean isSet(_Fields field) { return isSetTbl(); case ENVIRONMENT_CONTEXT: return isSetEnvironment_context(); + case VALID_WRITE_ID_LIST: + return isSetValidWriteIdList(); } throw new IllegalStateException(); } @@ -56481,6 +56534,15 @@ public boolean equals(create_table_with_environment_context_args that) { return false; } + boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList(); + boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList(); + if (this_present_validWriteIdList || that_present_validWriteIdList) { + if (!(this_present_validWriteIdList && that_present_validWriteIdList)) + return false; + if (!this.validWriteIdList.equals(that.validWriteIdList)) + return false; + } + return true; } @@ -56498,6 +56560,11 @@ public int hashCode() { if (present_environment_context) list.add(environment_context); + boolean present_validWriteIdList = true && (isSetValidWriteIdList()); + list.add(present_validWriteIdList); + if (present_validWriteIdList) + list.add(validWriteIdList); + return list.hashCode(); } @@ -56529,6 +56596,16 @@ public int compareTo(create_table_with_environment_context_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidWriteIdList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -56564,6 +56641,14 @@ public String toString() { sb.append(this.environment_context); } first = false; + if (!first) sb.append(", "); + sb.append("validWriteIdList:"); + if (this.validWriteIdList == null) { + sb.append("null"); + } else { + sb.append(this.validWriteIdList); + } + first = false; sb.append(")"); return sb.toString(); } @@ -56631,6 +56716,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_e org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // VALID_WRITE_ID_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -56654,6 +56747,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ struct.environment_context.write(oprot); oprot.writeFieldEnd(); } + if (struct.validWriteIdList != null) { + oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC); + oprot.writeString(struct.validWriteIdList); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -56678,19 +56776,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_e if (struct.isSetEnvironment_context()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetValidWriteIdList()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetTbl()) { struct.tbl.write(oprot); } if (struct.isSetEnvironment_context()) { struct.environment_context.write(oprot); } + if (struct.isSetValidWriteIdList()) { + oprot.writeString(struct.validWriteIdList); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.tbl = new Table(); struct.tbl.read(iprot); @@ -56701,6 +56805,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_en struct.environment_context.read(iprot); struct.setEnvironment_contextIsSet(true); } + if (incoming.get(2)) { + struct.validWriteIdList = iprot.readString(); + struct.setValidWriteIdListIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index 333a2d9..3d9fef6 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -183,12 +183,13 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { /** * @param \metastore\Table $tbl * @param \metastore\EnvironmentContext $environment_context + * @param string $validWriteIdList * @throws \metastore\AlreadyExistsException * @throws \metastore\InvalidObjectException * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function create_table_with_environment_context(\metastore\Table $tbl, \metastore\EnvironmentContext $environment_context); + public function create_table_with_environment_context(\metastore\Table $tbl, \metastore\EnvironmentContext $environment_context, $validWriteIdList); /** * @param \metastore\Table $tbl * @param \metastore\SQLPrimaryKey[] $primaryKeys @@ -2920,17 +2921,18 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas return; } - public function create_table_with_environment_context(\metastore\Table $tbl, \metastore\EnvironmentContext $environment_context) + public function create_table_with_environment_context(\metastore\Table $tbl, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { - $this->send_create_table_with_environment_context($tbl, $environment_context); + $this->send_create_table_with_environment_context($tbl, $environment_context, $validWriteIdList); $this->recv_create_table_with_environment_context(); } - public function send_create_table_with_environment_context(\metastore\Table $tbl, \metastore\EnvironmentContext $environment_context) + public function send_create_table_with_environment_context(\metastore\Table $tbl, \metastore\EnvironmentContext $environment_context, $validWriteIdList) { $args = new \metastore\ThriftHiveMetastore_create_table_with_environment_context_args(); $args->tbl = $tbl; $args->environment_context = $environment_context; + $args->validWriteIdList = $validWriteIdList; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -19102,6 +19104,10 @@ class ThriftHiveMetastore_create_table_with_environment_context_args { * @var \metastore\EnvironmentContext */ public $environment_context = null; + /** + * @var string + */ + public $validWriteIdList = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -19116,6 +19122,10 @@ class ThriftHiveMetastore_create_table_with_environment_context_args { 'type' => TType::STRUCT, 'class' => '\metastore\EnvironmentContext', ), + 3 => array( + 'var' => 'validWriteIdList', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -19125,6 +19135,9 @@ class ThriftHiveMetastore_create_table_with_environment_context_args { if (isset($vals['environment_context'])) { $this->environment_context = $vals['environment_context']; } + if (isset($vals['validWriteIdList'])) { + $this->validWriteIdList = $vals['validWriteIdList']; + } } } @@ -19163,6 +19176,13 @@ class ThriftHiveMetastore_create_table_with_environment_context_args { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->validWriteIdList); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -19192,6 +19212,11 @@ class ThriftHiveMetastore_create_table_with_environment_context_args { $xfer += $this->environment_context->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->validWriteIdList !== null) { + $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 3); + $xfer += $output->writeString($this->validWriteIdList); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php index 89af97f..5ebdb94 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php @@ -18475,6 +18475,10 @@ class CommitTxnRequest { * @var \metastore\ReplLastIdInfo */ public $replLastIdInfo = null; + /** + * @var string + */ + public $txnWriteIds = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -18506,6 +18510,10 @@ class CommitTxnRequest { 'type' => TType::STRUCT, 'class' => '\metastore\ReplLastIdInfo', ), + 6 => array( + 'var' => 'txnWriteIds', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -18524,6 +18532,9 @@ class CommitTxnRequest { if (isset($vals['replLastIdInfo'])) { $this->replLastIdInfo = $vals['replLastIdInfo']; } + if (isset($vals['txnWriteIds'])) { + $this->txnWriteIds = $vals['txnWriteIds']; + } } } @@ -18594,6 +18605,13 @@ class CommitTxnRequest { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->txnWriteIds); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -18650,6 +18668,11 @@ class CommitTxnRequest { $xfer += $this->replLastIdInfo->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->txnWriteIds !== null) { + $xfer += $output->writeFieldBegin('txnWriteIds', TType::STRING, 6); + $xfer += $output->writeString($this->txnWriteIds); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index 9aeae9f..f6b5392 100755 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -46,7 +46,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' get_schema(string db_name, string table_name)') print(' get_schema_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)') print(' void create_table(Table tbl)') - print(' void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context)') + print(' void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, string validWriteIdList)') print(' void create_table_with_constraints(Table tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints)') print(' void drop_constraint(DropConstraintRequest req)') print(' void add_primary_key(AddPrimaryKeyRequest req)') @@ -449,10 +449,10 @@ elif cmd == 'create_table': pp.pprint(client.create_table(eval(args[0]),)) elif cmd == 'create_table_with_environment_context': - if len(args) != 2: - print('create_table_with_environment_context requires 2 args') + if len(args) != 3: + print('create_table_with_environment_context requires 3 args') sys.exit(1) - pp.pprint(client.create_table_with_environment_context(eval(args[0]),eval(args[1]),)) + pp.pprint(client.create_table_with_environment_context(eval(args[0]),eval(args[1]),args[2],)) elif cmd == 'create_table_with_constraints': if len(args) != 7: diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index eadf300..d473c63 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -179,11 +179,12 @@ def create_table(self, tbl): """ pass - def create_table_with_environment_context(self, tbl, environment_context): + def create_table_with_environment_context(self, tbl, environment_context, validWriteIdList): """ Parameters: - tbl - environment_context + - validWriteIdList """ pass @@ -2501,20 +2502,22 @@ def recv_create_table(self): raise result.o4 return - def create_table_with_environment_context(self, tbl, environment_context): + def create_table_with_environment_context(self, tbl, environment_context, validWriteIdList): """ Parameters: - tbl - environment_context + - validWriteIdList """ - self.send_create_table_with_environment_context(tbl, environment_context) + self.send_create_table_with_environment_context(tbl, environment_context, validWriteIdList) self.recv_create_table_with_environment_context() - def send_create_table_with_environment_context(self, tbl, environment_context): + def send_create_table_with_environment_context(self, tbl, environment_context, validWriteIdList): self._oprot.writeMessageBegin('create_table_with_environment_context', TMessageType.CALL, self._seqid) args = create_table_with_environment_context_args() args.tbl = tbl args.environment_context = environment_context + args.validWriteIdList = validWriteIdList args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -10466,7 +10469,7 @@ def process_create_table_with_environment_context(self, seqid, iprot, oprot): iprot.readMessageEnd() result = create_table_with_environment_context_result() try: - self._handler.create_table_with_environment_context(args.tbl, args.environment_context) + self._handler.create_table_with_environment_context(args.tbl, args.environment_context, args.validWriteIdList) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -18990,17 +18993,20 @@ class create_table_with_environment_context_args: Attributes: - tbl - environment_context + - validWriteIdList """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'tbl', (Table, Table.thrift_spec), None, ), # 1 (2, TType.STRUCT, 'environment_context', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 2 + (3, TType.STRING, 'validWriteIdList', None, None, ), # 3 ) - def __init__(self, tbl=None, environment_context=None,): + def __init__(self, tbl=None, environment_context=None, validWriteIdList=None,): self.tbl = tbl self.environment_context = environment_context + self.validWriteIdList = validWriteIdList def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -19023,6 +19029,11 @@ def read(self, iprot): self.environment_context.read(iprot) else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.validWriteIdList = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -19041,6 +19052,10 @@ def write(self, oprot): oprot.writeFieldBegin('environment_context', TType.STRUCT, 2) self.environment_context.write(oprot) oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin('validWriteIdList', TType.STRING, 3) + oprot.writeString(self.validWriteIdList) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -19052,6 +19067,7 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.tbl) value = (value * 31) ^ hash(self.environment_context) + value = (value * 31) ^ hash(self.validWriteIdList) return value def __repr__(self): diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 5f0a0a3..fc35326 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -12860,6 +12860,7 @@ class CommitTxnRequest: - writeEventInfos - keyValue - replLastIdInfo + - txnWriteIds """ thrift_spec = ( @@ -12869,14 +12870,16 @@ class CommitTxnRequest: (3, TType.LIST, 'writeEventInfos', (TType.STRUCT,(WriteEventInfo, WriteEventInfo.thrift_spec)), None, ), # 3 (4, TType.STRUCT, 'keyValue', (CommitTxnKeyValue, CommitTxnKeyValue.thrift_spec), None, ), # 4 (5, TType.STRUCT, 'replLastIdInfo', (ReplLastIdInfo, ReplLastIdInfo.thrift_spec), None, ), # 5 + (6, TType.STRING, 'txnWriteIds', None, None, ), # 6 ) - def __init__(self, txnid=None, replPolicy=None, writeEventInfos=None, keyValue=None, replLastIdInfo=None,): + def __init__(self, txnid=None, replPolicy=None, writeEventInfos=None, keyValue=None, replLastIdInfo=None, txnWriteIds=None,): self.txnid = txnid self.replPolicy = replPolicy self.writeEventInfos = writeEventInfos self.keyValue = keyValue self.replLastIdInfo = replLastIdInfo + self.txnWriteIds = txnWriteIds def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -12920,6 +12923,11 @@ def read(self, iprot): self.replLastIdInfo.read(iprot) else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.txnWriteIds = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -12953,6 +12961,10 @@ def write(self, oprot): oprot.writeFieldBegin('replLastIdInfo', TType.STRUCT, 5) self.replLastIdInfo.write(oprot) oprot.writeFieldEnd() + if self.txnWriteIds is not None: + oprot.writeFieldBegin('txnWriteIds', TType.STRING, 6) + oprot.writeString(self.txnWriteIds) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -12969,6 +12981,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.writeEventInfos) value = (value * 31) ^ hash(self.keyValue) value = (value * 31) ^ hash(self.replLastIdInfo) + value = (value * 31) ^ hash(self.txnWriteIds) return value def __repr__(self): diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb index 8e4b990..9e51d89 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -2861,13 +2861,15 @@ class CommitTxnRequest WRITEEVENTINFOS = 3 KEYVALUE = 4 REPLLASTIDINFO = 5 + TXNWRITEIDS = 6 FIELDS = { TXNID => {:type => ::Thrift::Types::I64, :name => 'txnid'}, REPLPOLICY => {:type => ::Thrift::Types::STRING, :name => 'replPolicy', :optional => true}, WRITEEVENTINFOS => {:type => ::Thrift::Types::LIST, :name => 'writeEventInfos', :element => {:type => ::Thrift::Types::STRUCT, :class => ::WriteEventInfo}, :optional => true}, KEYVALUE => {:type => ::Thrift::Types::STRUCT, :name => 'keyValue', :class => ::CommitTxnKeyValue, :optional => true}, - REPLLASTIDINFO => {:type => ::Thrift::Types::STRUCT, :name => 'replLastIdInfo', :class => ::ReplLastIdInfo, :optional => true} + REPLLASTIDINFO => {:type => ::Thrift::Types::STRUCT, :name => 'replLastIdInfo', :class => ::ReplLastIdInfo, :optional => true}, + TXNWRITEIDS => {:type => ::Thrift::Types::STRING, :name => 'txnWriteIds', :optional => true} } def struct_fields; FIELDS; end diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 39c671a..1518f68 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -384,13 +384,13 @@ module ThriftHiveMetastore return end - def create_table_with_environment_context(tbl, environment_context) - send_create_table_with_environment_context(tbl, environment_context) + def create_table_with_environment_context(tbl, environment_context, validWriteIdList) + send_create_table_with_environment_context(tbl, environment_context, validWriteIdList) recv_create_table_with_environment_context() end - def send_create_table_with_environment_context(tbl, environment_context) - send_message('create_table_with_environment_context', Create_table_with_environment_context_args, :tbl => tbl, :environment_context => environment_context) + def send_create_table_with_environment_context(tbl, environment_context, validWriteIdList) + send_message('create_table_with_environment_context', Create_table_with_environment_context_args, :tbl => tbl, :environment_context => environment_context, :validWriteIdList => validWriteIdList) end def recv_create_table_with_environment_context() @@ -4020,7 +4020,7 @@ module ThriftHiveMetastore args = read_args(iprot, Create_table_with_environment_context_args) result = Create_table_with_environment_context_result.new() begin - @handler.create_table_with_environment_context(args.tbl, args.environment_context) + @handler.create_table_with_environment_context(args.tbl, args.environment_context, args.validWriteIdList) rescue ::AlreadyExistsException => o1 result.o1 = o1 rescue ::InvalidObjectException => o2 @@ -7303,10 +7303,12 @@ module ThriftHiveMetastore include ::Thrift::Struct, ::Thrift::Struct_Union TBL = 1 ENVIRONMENT_CONTEXT = 2 + VALIDWRITEIDLIST = 3 FIELDS = { TBL => {:type => ::Thrift::Types::STRUCT, :name => 'tbl', :class => ::Table}, - ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext} + ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext}, + VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList'} } def struct_fields; FIELDS; end diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 6357d06..e38ebb1 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -59,6 +59,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -991,12 +992,12 @@ public void createDatabase(Database db) * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table) */ @Override - public void createTable(Table tbl) throws AlreadyExistsException, + public void createTable(Table tbl, String txnWriteIds) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { - createTable(tbl, null); + createTable(tbl, null, txnWriteIds); } - public void createTable(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException, + public void createTable(Table tbl, EnvironmentContext envContext, String txnWriteIds) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { if (!tbl.isSetCatName()) { tbl.setCatName(getDefaultCatalog(conf)); @@ -1008,7 +1009,7 @@ public void createTable(Table tbl, EnvironmentContext envContext) throws Already boolean success = false; try { // Subclasses can override this step (for example, for temporary tables) - create_table_with_environment_context(tbl, envContext); + create_table_with_environment_context(tbl, envContext, txnWriteIds); if (hook != null) { hook.commitCreateTable(tbl); } @@ -2949,9 +2950,11 @@ public void replRollbackTxn(long srcTxnId, String replPolicy) throws NoSuchTxnEx } @Override - public void commitTxn(long txnid) + public void commitTxn(long txnid, String txnWriteIds) throws NoSuchTxnException, TxnAbortedException, TException { - client.commit_txn(new CommitTxnRequest(txnid)); + CommitTxnRequest rqst = new CommitTxnRequest(txnid); + rqst.setTxnWriteIds(txnWriteIds); + client.commit_txn(rqst); } @Override @@ -3345,10 +3348,10 @@ public GetAllFunctionsResponse getAllFunctions() throws TException { return client.get_all_functions(); } - protected void create_table_with_environment_context(Table tbl, EnvironmentContext envContext) + protected void create_table_with_environment_context(Table tbl, EnvironmentContext envContext, String validWriteIdList) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { - client.create_table_with_environment_context(tbl, envContext); + client.create_table_with_environment_context(tbl, envContext, validWriteIdList); } protected void drop_table_with_environment_context(String catName, String dbname, String name, diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 7af9245..1346d03 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -1647,7 +1647,7 @@ boolean isPartitionMarkedForEvent(String catName, String db_name, String tbl_nam * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table) */ - void createTable(Table tbl) throws AlreadyExistsException, + void createTable(Table tbl, String validWriteIdList) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException; /** @@ -3009,7 +3009,7 @@ Function getFunction(String catName, String dbName, String funcName) * aborted. This can result from the transaction timing out. * @throws TException */ - void commitTxn(long txnid) + void commitTxn(long txnid, String writeIds) throws NoSuchTxnException, TxnAbortedException, TException; /** diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index 6b25b42..85bd3a8 100644 --- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -966,6 +966,8 @@ struct CommitTxnRequest { // Information to update the last repl id of table/partition along with commit txn (replication from 2.6 to 3.0) 5: optional ReplLastIdInfo replLastIdInfo, + // snapshot of table writeIds of the transaction + 6: optional string txnWriteIds, } struct ReplTblWriteIdStateRequest { @@ -1901,7 +1903,7 @@ service ThriftHiveMetastore extends fb303.FacebookService // * See notes on DDL_TIME void create_table(1:Table tbl) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4) void create_table_with_environment_context(1:Table tbl, - 2:EnvironmentContext environment_context) + 2:EnvironmentContext environment_context, 3:string validWriteIdList) throws (1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index f1983c5..09c88ea 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -1884,17 +1884,17 @@ public boolean drop_type(final String name) throws MetaException, NoSuchObjectEx } private void create_table_core(final RawStore ms, final Table tbl, - final EnvironmentContext envContext) + final EnvironmentContext envContext, String validWriteIdList) throws AlreadyExistsException, MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { - create_table_core(ms, tbl, envContext, null, null, null, null, null, null); + create_table_core(ms, tbl, envContext, null, null, null, null, null, null, validWriteIdList); } private void create_table_core(final RawStore ms, final Table tbl, final EnvironmentContext envContext, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, - List checkConstraints) + List checkConstraints, String validWriteIdListString) throws AlreadyExistsException, MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { @@ -2003,7 +2003,7 @@ private void create_table_core(final RawStore ms, final Table tbl, if (primaryKeys == null && foreignKeys == null && uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null && checkConstraints == null) { - ms.createTable(tbl); + ms.createTable(tbl, validWriteIdListString); } else { // Check that constraints have catalog name properly set first if (primaryKeys != null && !primaryKeys.isEmpty() && !primaryKeys.get(0).isSetCatName()) { @@ -2091,7 +2091,7 @@ private void create_table_core(final RawStore ms, final Table tbl, if (!transactionalListeners.isEmpty()) { transactionalListenerResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners, - EventType.CREATE_TABLE, new CreateTableEvent(tbl, true, this), envContext); + EventType.CREATE_TABLE, new CreateTableEvent(tbl, validWriteIdListString, true, this), envContext); if (primaryKeys != null && !primaryKeys.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_PRIMARYKEY, new AddPrimaryKeyEvent(primaryKeys, true, this), envContext); @@ -2121,7 +2121,7 @@ private void create_table_core(final RawStore ms, final Table tbl, if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.CREATE_TABLE, - new CreateTableEvent(tbl, success, this), envContext, transactionalListenerResponses, ms); + new CreateTableEvent(tbl, validWriteIdListString, success, this), envContext, transactionalListenerResponses, ms); if (primaryKeys != null && !primaryKeys.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PRIMARYKEY, new AddPrimaryKeyEvent(primaryKeys, success, this), envContext); @@ -2164,19 +2164,20 @@ private void create_table_core(final RawStore ms, final Table tbl, @Override public void create_table(final Table tbl) throws AlreadyExistsException, MetaException, InvalidObjectException, InvalidInputException { - create_table_with_environment_context(tbl, null); + // TODO =====to be reworked in HIVE-21637====== + create_table_with_environment_context(tbl, null, null); } @Override public void create_table_with_environment_context(final Table tbl, - final EnvironmentContext envContext) + final EnvironmentContext envContext, String validWriteIdList) throws AlreadyExistsException, MetaException, InvalidObjectException, InvalidInputException { startFunction("create_table", ": " + tbl.toString()); boolean success = false; Exception ex = null; try { - create_table_core(getMS(), tbl, envContext); + create_table_core(getMS(), tbl, envContext, validWriteIdList); success = true; } catch (NoSuchObjectException e) { LOG.warn("create_table_with_environment_context got ", e); @@ -2206,8 +2207,9 @@ public void create_table_with_constraints(final Table tbl, boolean success = false; Exception ex = null; try { + // TODO =====to be reworked in HIVE-21637====== create_table_core(getMS(), tbl, null, primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); + uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, null); success = true; } catch (NoSuchObjectException e) { ex = e; @@ -7728,7 +7730,7 @@ public void commit_txn(CommitTxnRequest rqst) throws TException { getTxnHandler().commitTxn(rqst); if (listeners != null && !listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.COMMIT_TXN, - new CommitTxnEvent(rqst.getTxnid(), this)); + new CommitTxnEvent(rqst.getTxnid(), rqst.getTxnWriteIds(), this)); } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java index c2ba3b0..8cc96de 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java @@ -288,7 +288,8 @@ public int repair(MsckInfo msckInfo) { if (success) { try { LOG.info("txnId: {} succeeded. Committing..", txnId); - getMsc().commitTxn(txnId); + // TODO =====to be reworked in HIVE-21637====== + getMsc().commitTxn(txnId, null); } catch (Exception e) { LOG.warn("Error while committing txnId: {} for table: {}", txnId, qualifiedTableName, e); ret = 1; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 391915a..f4e3305 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -991,7 +991,8 @@ public boolean dropType(String typeName) { boolean success = false; try { openTransaction(); - createTable(tbl); + // TODO =====to be reworked in HIVE-21637====== + createTable(tbl, null); // Add constraints. // We need not do a deep retrieval of the Table Column Descriptor while persisting the // constraints since this transaction involving create table is not yet committed. @@ -1024,7 +1025,7 @@ public boolean dropType(String typeName) { } @Override - public void createTable(Table tbl) throws InvalidObjectException, MetaException { + public void createTable(Table tbl, String validWriteIdList) throws InvalidObjectException, MetaException { boolean commited = false; MTable mtbl = null; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index 8c1ab73..b1a8e28 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -181,7 +181,7 @@ boolean alterDatabase(String catalogName, String dbname, Database db) boolean dropType(String typeName); - void createTable(Table tbl) throws InvalidObjectException, + void createTable(Table tbl, String validWriteIdList) throws InvalidObjectException, MetaException; /** diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 6ef9a19..f4973b9 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -42,6 +42,10 @@ import org.apache.hadoop.hive.common.DatabaseName; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.Deadline; import org.apache.hadoop.hive.metastore.FileMetadataHandler; import org.apache.hadoop.hive.metastore.ObjectStore; @@ -63,6 +67,7 @@ import org.apache.hadoop.hive.metastore.messaging.CreateTableMessage; import org.apache.hadoop.hive.metastore.messaging.DropTableMessage; import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; +import org.apache.hadoop.hive.metastore.messaging.CommitTxnMessage; import org.apache.hadoop.hive.metastore.messaging.AddPartitionMessage; import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; import org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage; @@ -74,6 +79,8 @@ import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.messaging.MessageFactory; +import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils; +import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.FileUtils; import org.apache.hadoop.hive.metastore.utils.JavaUtils; @@ -139,7 +146,7 @@ void setConfForTest(Configuration conf) { initSharedCache(conf); } - synchronized private static void triggerUpdateUsingEvent(RawStore rawStore) { + synchronized private static void triggerUpdateUsingEvent(RawStore rawStore, Configuration conf) { if (!isCachePrewarmed.get()) { LOG.error("cache update should be done only after prewarm"); throw new RuntimeException("cache update should be done only after prewarm"); @@ -147,7 +154,7 @@ synchronized private static void triggerUpdateUsingEvent(RawStore rawStore) { long startTime = System.nanoTime(); long preEventId = lastEventId; try { - lastEventId = updateUsingNotificationEvents(rawStore, lastEventId); + lastEventId = updateUsingNotificationEvents(rawStore, lastEventId, conf); } catch (Exception e) { LOG.error(" cache update failed for start event id " + lastEventId + " with error ", e); throw new RuntimeException(e.getMessage()); @@ -158,9 +165,9 @@ synchronized private static void triggerUpdateUsingEvent(RawStore rawStore) { } } - synchronized private static void triggerPreWarm(RawStore rawStore) { + synchronized private static void triggerPreWarm(RawStore rawStore, Configuration conf) { lastEventId = rawStore.getCurrentNotificationEventId().getEventId(); - prewarm(rawStore); + prewarm(rawStore, conf); } private void setConfInternal(Configuration conf) { @@ -245,14 +252,13 @@ static private void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, } @VisibleForTesting - public static long updateUsingNotificationEvents(RawStore rawStore, long lastEventId) throws Exception { + public static long updateUsingNotificationEvents(RawStore rawStore, long lastEventId, Configuration conf) throws Exception { LOG.debug("updating cache using notification events starting from event id " + lastEventId); NotificationEventRequest rqst = new NotificationEventRequest(lastEventId); //Add the events which are not related to metadata update rqst.addToEventTypeSkipList(MessageBuilder.INSERT_EVENT); rqst.addToEventTypeSkipList(MessageBuilder.OPEN_TXN_EVENT); - rqst.addToEventTypeSkipList(MessageBuilder.COMMIT_TXN_EVENT); rqst.addToEventTypeSkipList(MessageBuilder.ABORT_TXN_EVENT); rqst.addToEventTypeSkipList(MessageBuilder.ALLOC_WRITE_ID_EVENT); rqst.addToEventTypeSkipList(MessageBuilder.ACID_WRITE_EVENT); @@ -270,6 +276,8 @@ public static long updateUsingNotificationEvents(RawStore rawStore, long lastEve rqst.addToEventTypeSkipList(MessageBuilder.ALTER_SCHEMA_VERSION_EVENT); rqst.addToEventTypeSkipList(MessageBuilder.DROP_SCHEMA_VERSION_EVENT); + String defaultCat = getDefaultCatalog(conf); + Deadline.startTimer("getNextNotification"); NotificationEventResponse resp = rawStore.getNextNotification(rqst); Deadline.stopTimer(); @@ -321,7 +329,7 @@ public static long updateUsingNotificationEvents(RawStore rawStore, long lastEve case MessageBuilder.CREATE_TABLE_EVENT: CreateTableMessage createTableMessage = deserializer.getCreateTableMessage(message); sharedCache.addTableToCache(catalogName, dbName, - tableName, createTableMessage.getTableObj()); + tableName, createTableMessage.getTableObj(), new ValidReaderWriteIdList(createTableMessage.getWriteIds()), false); break; case MessageBuilder.ALTER_TABLE_EVENT: AlterTableMessage alterTableMessage = deserializer.getAlterTableMessage(message); @@ -387,6 +395,16 @@ public static long updateUsingNotificationEvents(RawStore rawStore, long lastEve sharedCache.removePartitionColStatsFromCache(catalogName, dbName, tableName, msgPart.getPartValues(), msgPart.getColName()); break; + case MessageBuilder.COMMIT_TXN_EVENT: + CommitTxnMessage msgCommit = deserializer.getCommitTxnMessage(message); + String txnWriteIdsString = msgCommit.getTxnWriteIds(); + ValidTxnWriteIdList txnWriteIds = new ValidTxnWriteIdList(txnWriteIdsString); + for (String tblName : txnWriteIds.getTableNames()) { + ValidWriteIdList writeIds = txnWriteIds.getTableValidWriteIdList(tblName); + String[] names = writeIds.getTableName().split("\\."); + sharedCache.markTableCommitted(defaultCat, names[0], names[1], writeIds); + } + break; default: LOG.error("Event is not supported for cache invalidation : " + event.getEventType()); } @@ -399,13 +417,14 @@ public static long updateUsingNotificationEvents(RawStore rawStore, long lastEve * This initializes the caches in SharedCache by getting the objects from Metastore DB via * ObjectStore and populating the respective caches */ - static void prewarm(RawStore rawStore) { + static void prewarm(RawStore rawStore, Configuration conf) { if (isCachePrewarmed.get()) { return; } long startTime = System.nanoTime(); LOG.info("Prewarming CachedStore"); long sleepTime = 100; + TxnStore txn = TxnUtils.getTxnStore(conf); while (!isCachePrewarmed.get()) { // Prevents throwing exceptions in our raw store calls since we're not using RawStoreProxy Deadline.registerIfNot(1000000); @@ -476,12 +495,20 @@ static void prewarm(RawStore rawStore) { continue; } Table table; + ValidWriteIdList writeIds; try { + ValidTxnList currentTxnList = TxnCommonUtils.createValidReadTxnList(txn.getOpenTxns(), 0); + GetValidWriteIdsRequest rqst = new GetValidWriteIdsRequest(Arrays.asList(TableName.getDbTable(dbName, tblName))); + rqst.setValidTxnList(currentTxnList.toString()); + writeIds = TxnCommonUtils.createValidReaderWriteIdList(txn.getValidWriteIds(rqst).getTblValidWriteIds().get(0)); table = rawStore.getTable(catName, dbName, tblName); } catch (MetaException e) { // It is possible the table is deleted during fetching tables of the database, // in that case, continue with the next table continue; + } catch (NoSuchTxnException e) { + LOG.warn("Cannot find transaction", e); + continue; } List colNames = MetaStoreUtils.getColumnNamesForTable(table); try { @@ -536,7 +563,7 @@ static void prewarm(RawStore rawStore) { } // If the table could not cached due to memory limit, stop prewarm boolean isSuccess = sharedCache.populateTableInCache(table, tableColStats, partitions, - partitionColStats, aggrStatsAllPartitions, aggrStatsAllButDefaultPartition); + partitionColStats, aggrStatsAllPartitions, aggrStatsAllButDefaultPartition, writeIds, false); if (isSuccess) { LOG.trace("Cached Database: {}'s Table: {}.", dbName, tblName); } else { @@ -688,9 +715,11 @@ static void setCacheRefreshPeriod(long time) { static class CacheUpdateMasterWork implements Runnable { private boolean shouldRunPrewarm = true; private final RawStore rawStore; + private Configuration conf; CacheUpdateMasterWork(Configuration conf, boolean shouldRunPrewarm) { + this.conf = conf; this.shouldRunPrewarm = shouldRunPrewarm; String rawStoreClassName = MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, ObjectStore.class.getName()); @@ -709,7 +738,7 @@ public void run() { if (!shouldRunPrewarm) { if (canUseEvents) { try { - triggerUpdateUsingEvent(rawStore); + triggerUpdateUsingEvent(rawStore, conf); } catch (Exception e) { LOG.error("failed to update cache using events ", e); } @@ -723,7 +752,7 @@ public void run() { } } else { try { - triggerPreWarm(rawStore); + triggerPreWarm(rawStore, conf); shouldRunPrewarm = false; } catch (Exception e) { LOG.error("Prewarm failure", e); @@ -959,7 +988,7 @@ public boolean commitTransaction() { // consistency in case there is only one metastore. if (canUseEvents) { try { - triggerUpdateUsingEvent(rawStore); + triggerUpdateUsingEvent(rawStore, conf); } catch (Exception e) { //TODO : Not sure how to handle it as the commit is already done in the object store. LOG.error("Failed to update cache", e); @@ -1129,8 +1158,8 @@ private void validateTableType(Table tbl) { } @Override - public void createTable(Table tbl) throws InvalidObjectException, MetaException { - rawStore.createTable(tbl); + public void createTable(Table tbl, String validWriteIdList) throws InvalidObjectException, MetaException { + rawStore.createTable(tbl, validWriteIdList); // in case of event based cache update, cache will be updated during commit. if (canUseEvents) { return; @@ -1142,7 +1171,8 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException return; } validateTableType(tbl); - sharedCache.addTableToCache(catName, dbName, tblName, tbl); + ValidWriteIdList writeIds = validWriteIdList!=null?new ValidReaderWriteIdList(validWriteIdList):null; + sharedCache.addTableToCache(catName, dbName, tblName, tbl, writeIds, true); } @Override @@ -1175,7 +1205,8 @@ public Table getTable(String catName, String dbName, String tblName, String vali if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { return rawStore.getTable(catName, dbName, tblName, validWriteIds); } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + ValidWriteIdList writeIds = validWriteIds!=null?new ValidReaderWriteIdList(validWriteIds):null; + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName, writeIds); if (tbl == null) { // This table is not yet loaded in cache @@ -1289,7 +1320,8 @@ public Partition getPartition(String catName, String dbName, String tblName, catName, dbName, tblName, part_vals, validWriteIds); } if (validWriteIds != null) { - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table containing the partition is not yet loaded in cache return rawStore.getPartition( @@ -1312,7 +1344,8 @@ public boolean doesPartitionExist(String catName, String dbName, String tblName, if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, part_vals); } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (tbl == null) { // The table containing the partition is not yet loaded in cache return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, part_vals); @@ -1367,7 +1400,8 @@ public void dropPartitions(String catName, String dbName, String tblName, List tables = new ArrayList<>(); for (String tblName : tblNames) { tblName = normalizeIdentifier(tblName); - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (tbl == null) { tbl = rawStore.getTable(catName, dbName, tblName); } @@ -1526,7 +1563,8 @@ public void updateCreationMetadata(String catName, String dbname, String tablena if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { return rawStore.listPartitionNames(catName, dbName, tblName, max_parts); } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (tbl == null) { // The table is not yet loaded in cache return rawStore.listPartitionNames(catName, dbName, tblName, max_parts); @@ -1633,7 +1671,8 @@ public boolean getPartitionsByExpr(String catName, String dbName, String tblName return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result); } List partNames = new LinkedList<>(); - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result); @@ -1665,7 +1704,8 @@ public int getNumPartitionsByExpr(String catName, String dbName, String tblName, } String defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); List partNames = new LinkedList<>(); - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); @@ -1697,7 +1737,8 @@ public int getNumPartitionsByExpr(String catName, String dbName, String tblName, if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames); @@ -1884,7 +1925,8 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames); @@ -1911,7 +1953,8 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames); @@ -1940,7 +1983,8 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { return rawStore.listPartitionNamesPs(catName, dbName, tblName, partSpecs, maxParts); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return rawStore.listPartitionNamesPs(catName, dbName, tblName, partSpecs, maxParts); @@ -1969,7 +2013,8 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partSpecs, maxParts, userName, groupNames); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partSpecs, maxParts, userName, groupNames); @@ -2058,7 +2103,8 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt if (!shouldCacheTable(catName, dbName, tblName)) { return; } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return; @@ -2121,7 +2167,8 @@ public ColumnStatistics getTableColumnStatistics( return rawStore.getTableColumnStatistics( catName, dbName, tblName, colNames, validWriteIds); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return rawStore.getTableColumnStatistics( @@ -2243,7 +2290,8 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblNam return rawStore.get_aggr_stats_for( catName, dbName, tblName, partNames, colNames, writeIdList); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + // TODO =====to be reworked in HIVE-21637====== + Table table = sharedCache.getTableFromCache(catName, dbName, tblName, null); if (table == null) { // The table is not yet loaded in cache return rawStore.get_aggr_stats_for( @@ -2685,9 +2733,10 @@ public int getDatabaseCount() throws MetaException { if (!shouldCacheTable(catName, dbName, tblName)) { return constraintNames; } + // TODO =====to be reworked in HIVE-21637====== sharedCache.addTableToCache(StringUtils.normalizeIdentifier(tbl.getCatName()), StringUtils.normalizeIdentifier(tbl.getDbName()), - StringUtils.normalizeIdentifier(tbl.getTableName()), tbl); + StringUtils.normalizeIdentifier(tbl.getTableName()), tbl, null, true); return constraintNames; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java index 60862d4..e60f2ed 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java @@ -34,6 +34,7 @@ import java.util.TreeMap; import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; @@ -57,6 +58,7 @@ import org.apache.hadoop.hive.metastore.utils.StringUtils; import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator; import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator.ObjectEstimator; +import org.apache.hive.common.util.TxnIdUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -158,12 +160,25 @@ private static ObjectEstimator getMemorySizeEstimator(Class clazz) { private Map> aggrColStatsCache = new ConcurrentHashMap>(); private AtomicBoolean isAggrPartitionColStatsCacheDirty = new AtomicBoolean(false); + private ValidWriteIdList txnWriteIds; + private boolean committed; - TableWrapper(Table t, byte[] sdHash, String location, Map parameters) { + TableWrapper(Table t, byte[] sdHash, String location, Map parameters, ValidWriteIdList txnWriteIds) { this.t = t; this.sdHash = sdHash; this.location = location; this.parameters = parameters; + this.txnWriteIds = txnWriteIds; + this.committed = true; + } + + TableWrapper(Table t, byte[] sdHash, String location, Map parameters, ValidWriteIdList txnWriteIds, boolean committed) { + this.t = t; + this.sdHash = sdHash; + this.location = location; + this.parameters = parameters; + this.txnWriteIds = txnWriteIds; + this.committed = committed; } public Table getTable() { @@ -866,6 +881,18 @@ private PartitionWrapper makePartitionWrapper(Partition part, SharedCache shared } return wrapper; } + + ValidWriteIdList getWriteIds() { + return txnWriteIds; + } + + boolean isCommitted() { + return committed; + } + + void setCommitted(boolean committed) { + this.committed = committed; + } } static class PartitionWrapper { @@ -1172,7 +1199,8 @@ public int getCachedDatabaseCount() { public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, List partitions, List partitionColStats, - AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { + AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition, + ValidWriteIdList writeIds, boolean committed) { String catName = StringUtils.normalizeIdentifier(table.getCatName()); String dbName = StringUtils.normalizeIdentifier(table.getDbName()); String tableName = StringUtils.normalizeIdentifier(table.getTableName()); @@ -1181,7 +1209,7 @@ public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, if (tablesDeletedDuringPrewarm.contains(CacheUtils.buildTableKey(catName, dbName, tableName))) { return false; } - TableWrapper tblWrapper = createTableWrapper(catName, dbName, tableName, table); + TableWrapper tblWrapper = createTableWrapper(catName, dbName, tableName, table, writeIds, committed); if (maxCacheSizeInBytes > 0) { ObjectEstimator tblWrapperSizeEstimator = getMemorySizeEstimator(TableWrapper.class); long estimatedMemUsage = tblWrapperSizeEstimator.estimate(tblWrapper, sizeEstimators); @@ -1252,13 +1280,20 @@ public void completeTableCachePrewarm() { } } - public Table getTableFromCache(String catName, String dbName, String tableName) { + public Table getTableFromCache(String catName, String dbName, String tableName, ValidWriteIdList writeIds) { Table t = null; try { cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { + if (writeIds!=null) { + // If the request writeIds is newer than the cached version + if (tblWrapper.getWriteIds()==null || !tblWrapper.isCommitted() || + tblWrapper.getWriteIds()!=null && TxnIdUtils.compare(writeIds, tblWrapper.getWriteIds()) > 0) { + return null; + } + } t = CacheUtils.assemble(tblWrapper, this); } } finally { @@ -1267,20 +1302,37 @@ public Table getTableFromCache(String catName, String dbName, String tableName) return t; } - public TableWrapper addTableToCache(String catName, String dbName, String tblName, Table tbl) { + public void addTableToCache(String catName, String dbName, String tblName, Table tbl, ValidWriteIdList writeIds, boolean committed) { try { cacheLock.writeLock().lock(); - TableWrapper wrapper = createTableWrapper(catName, dbName, tblName, tbl); - tableCache.put(CacheUtils.buildTableKey(catName, dbName, tblName), wrapper); + String key = CacheUtils.buildTableKey(catName, dbName, tblName); + if (writeIds != null) { + TableWrapper wrapper = tableCache.get(key); + // skip if cached writeId is newer + if (wrapper!=null && wrapper.getWriteIds()!=null && TxnIdUtils.compare(writeIds, wrapper.getWriteIds()) < 0) { + return; + } + } + TableWrapper wrapper = createTableWrapper(catName, dbName, tblName, tbl, writeIds, committed); + tableCache.put(key, wrapper); isTableCacheDirty.set(true); - return wrapper; } finally { cacheLock.writeLock().unlock(); } } + public void markTableCommitted(String catName, String dbName, String tblName, ValidWriteIdList writeIds) { + String key = CacheUtils.buildTableKey(catName, dbName, tblName); + if (tableCache.containsKey(key)) { + TableWrapper wrapper = tableCache.get(key); + if (writeIds!=null && wrapper.getWriteIds()!=null && TxnIdUtils.compare(writeIds, wrapper.getWriteIds())==0) { + wrapper.setCommitted(true); + } + } + } + private TableWrapper createTableWrapper(String catName, String dbName, String tblName, - Table tbl) { + Table tbl, ValidWriteIdList txnWriteIds, boolean committed) { TableWrapper wrapper; Table tblCopy = tbl.deepCopy(); tblCopy.setCatName(normalizeIdentifier(catName)); @@ -1296,9 +1348,9 @@ private TableWrapper createTableWrapper(String catName, String dbName, String tb StorageDescriptor sd = tbl.getSd(); increSd(sd, sdHash); tblCopy.setSd(null); - wrapper = new TableWrapper(tblCopy, sdHash, sd.getLocation(), sd.getParameters()); + wrapper = new TableWrapper(tblCopy, sdHash, sd.getLocation(), sd.getParameters(), txnWriteIds, committed); } else { - wrapper = new TableWrapper(tblCopy, null, null, null); + wrapper = new TableWrapper(tblCopy, null, null, null, txnWriteIds, committed); } return wrapper; } @@ -1451,7 +1503,8 @@ public void refreshTablesInCache(String catName, String dbName, List tabl if (tblWrapper != null) { tblWrapper.updateTableObj(tbl, this); } else { - tblWrapper = createTableWrapper(catName, dbName, tblName, tbl); + // TODO =====to be reworked in HIVE-21637====== + tblWrapper = createTableWrapper(catName, dbName, tblName, tbl, null, true); } newCacheForDB.put(CacheUtils.buildTableKey(catName, dbName, tblName), tblWrapper); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java index fed3dda..df94a70 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java @@ -19,6 +19,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; @@ -215,9 +216,9 @@ public Table build(Configuration conf) throws MetaException { return t; } - public Table create(IMetaStoreClient client, Configuration conf) throws TException { + public Table create(IMetaStoreClient client, Configuration conf, String txnWriteIds) throws TException { Table t = build(conf); - client.createTable(t); + client.createTable(t, txnWriteIds); return t; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitTxnEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitTxnEvent.java index ba382cd..291801e 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitTxnEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitTxnEvent.java @@ -31,15 +31,17 @@ public class CommitTxnEvent extends ListenerEvent { private final Long txnId; + private final String txnWriteIds; /** * * @param transactionId Unique identification for the transaction just got committed. * @param handler handler that is firing the event */ - public CommitTxnEvent(Long transactionId, IHMSHandler handler) { + public CommitTxnEvent(Long transactionId, String txnWriteIds, IHMSHandler handler) { super(true, handler); this.txnId = transactionId; + this.txnWriteIds = txnWriteIds; } /** @@ -48,4 +50,8 @@ public CommitTxnEvent(Long transactionId, IHMSHandler handler) { public Long getTxnId() { return txnId; } + + public String getTxnWriteIds() { + return txnWriteIds; + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CreateTableEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CreateTableEvent.java index 4f5e887..2febb64 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CreateTableEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CreateTableEvent.java @@ -28,10 +28,12 @@ public class CreateTableEvent extends ListenerEvent { private final Table table; + private final String writeIds; - public CreateTableEvent (Table table, boolean status, IHMSHandler handler) { + public CreateTableEvent (Table table, String writeIds, boolean status, IHMSHandler handler) { super (status, handler); this.table = table; + this.writeIds = writeIds; } /** @@ -40,4 +42,11 @@ public CreateTableEvent (Table table, boolean status, IHMSHandler handler) { public Table getTable () { return table; } + + /*** + * @return writeIds as string + */ + public String getValidWriteIdList() { + return writeIds; + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CommitTxnMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CommitTxnMessage.java index 9733039..af00b82 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CommitTxnMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CommitTxnMessage.java @@ -39,6 +39,8 @@ protected CommitTxnMessage() { */ public abstract Long getTxnId(); + public abstract String getTxnWriteIds(); + public abstract List getWriteIds(); public abstract List getDatabases(); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CreateTableMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CreateTableMessage.java index 49732ff..b19af92 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CreateTableMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CreateTableMessage.java @@ -37,6 +37,8 @@ protected CreateTableMessage() { public abstract Table getTableObj() throws Exception; + public abstract String getWriteIds(); + /** * Get list of files created as a result of this DML operation * diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java index aa83da4..b93bac1 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java @@ -178,8 +178,8 @@ public DropDatabaseMessage buildDropDatabaseMessage(Database db) { return new JSONDropDatabaseMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, db, now()); } - public CreateTableMessage buildCreateTableMessage(Table table, Iterator fileIter) { - return new JSONCreateTableMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, table, fileIter, now()); + public CreateTableMessage buildCreateTableMessage(Table table, Iterator fileIter, String writeIds) { + return new JSONCreateTableMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, table, fileIter, now(), writeIds); } public AlterTableMessage buildAlterTableMessage(Table before, Table after, boolean isTruncateOp, @@ -266,8 +266,8 @@ public OpenTxnMessage buildOpenTxnMessage(Long fromTxnId, Long toTxnId) { return new JSONOpenTxnMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, fromTxnId, toTxnId, now()); } - public CommitTxnMessage buildCommitTxnMessage(Long txnId) { - return new JSONCommitTxnMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, txnId, now()); + public CommitTxnMessage buildCommitTxnMessage(Long txnId, String txnWriteIds) { + return new JSONCommitTxnMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, txnId, txnWriteIds, now()); } public AbortTxnMessage buildAbortTxnMessage(Long txnId) { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitTxnMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitTxnMessage.java index 482fc8e..ceea78d 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitTxnMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitTxnMessage.java @@ -50,6 +50,9 @@ private List writeIds; @JsonProperty + private String txnWriteIds; + + @JsonProperty private List databases, tables, partitions, tableObjs, partitionObjs, files; /** @@ -58,9 +61,10 @@ public JSONCommitTxnMessage() { } - public JSONCommitTxnMessage(String server, String servicePrincipal, Long txnid, Long timestamp) { + public JSONCommitTxnMessage(String server, String servicePrincipal, Long txnid, String txnWriteIds, Long timestamp) { this.timestamp = timestamp; this.txnid = txnid; + this.txnWriteIds = txnWriteIds; this.server = server; this.servicePrincipal = servicePrincipal; this.databases = null; @@ -78,6 +82,11 @@ public Long getTxnId() { } @Override + public String getTxnWriteIds() { + return txnWriteIds; + } + + @Override public Long getTimestamp() { return timestamp; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateTableMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateTableMessage.java index 145ee4b..7b2e930 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateTableMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateTableMessage.java @@ -41,6 +41,8 @@ Long timestamp; @JsonProperty List files; + @JsonProperty + String writeIds; /** * Default constructor, needed for Jackson. @@ -49,25 +51,26 @@ public JSONCreateTableMessage() { } public JSONCreateTableMessage(String server, String servicePrincipal, String db, String table, - String tableType, Long timestamp) { + String tableType, Long timestamp, String writeIds) { this.server = server; this.servicePrincipal = servicePrincipal; this.db = db; this.table = table; this.tableType = tableType; this.timestamp = timestamp; + this.writeIds = writeIds; checkValid(); } public JSONCreateTableMessage(String server, String servicePrincipal, String db, String table, - Long timestamp) { - this(server, servicePrincipal, db, table, null, timestamp); + Long timestamp, String writeIds) { + this(server, servicePrincipal, db, table, null, timestamp, writeIds); } public JSONCreateTableMessage(String server, String servicePrincipal, Table tableObj, - Iterator fileIter, Long timestamp) { + Iterator fileIter, Long timestamp, String writeIds) { this(server, servicePrincipal, tableObj.getDbName(), tableObj.getTableName(), - tableObj.getTableType(), timestamp); + tableObj.getTableType(), timestamp, writeIds); try { this.tableObjJson = MessageBuilder.createTableObjJson(tableObj); } catch (TException e) { @@ -115,6 +118,11 @@ public Table getTableObj() throws Exception { return (Table) MessageBuilder.getTObj(tableObjJson,Table.class); } + @Override + public String getWriteIds() { + return writeIds; + } + public String getTableObjJson() { return tableObjJson; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java index 9cdf271..d20ad16 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java @@ -68,13 +68,14 @@ private void runTest(IMetaStoreClient client) throws TException { .create(client, conf); LOG.info("Going to create table " + tableName); + // TODO =====to be reworked in HIVE-21637====== Table table = new TableBuilder() .inDb(db) .setTableName(tableName) .addCol("col1", ColumnType.INT_TYPE_NAME) .addCol("col2", ColumnType.TIMESTAMP_TYPE_NAME) .addPartCol("pcol1", ColumnType.STRING_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); LOG.info("Going to create partition with value " + partValue); Partition part = new PartitionBuilder() diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index fd85af9..4032889 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -1320,7 +1320,7 @@ public void commitTxn(CommitTxnRequest rqst) if (transactionalListeners != null) { MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners, - EventMessage.EventType.COMMIT_TXN, new CommitTxnEvent(txnid, null), dbConn, sqlGenerator); + EventMessage.EventType.COMMIT_TXN, new CommitTxnEvent(txnid, rqst.getTxnWriteIds(), null), dbConn, sqlGenerator); } LOG.debug("Going to commit"); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index c13e538..048cebb 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -245,8 +245,8 @@ public boolean dropType(String typeName) { } @Override - public void createTable(Table tbl) throws InvalidObjectException, MetaException { - objectStore.createTable(tbl); + public void createTable(Table tbl, String validWriteIdList) throws InvalidObjectException, MetaException { + objectStore.createTable(tbl, validWriteIdList); } @Override diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index e943f17..21e0a0a 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -228,7 +228,7 @@ public boolean dropType(String typeName) { } @Override - public void createTable(Table tbl) throws InvalidObjectException, MetaException { + public void createTable(Table tbl, String validWriteIdList) throws InvalidObjectException, MetaException { } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index e1450a5..7f50afa 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -57,6 +57,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -767,12 +768,12 @@ public void createDatabase(Database db) * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table) */ @Override - public void createTable(Table tbl) throws AlreadyExistsException, + public void createTable(Table tbl, String txnWriteIds) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { - createTable(tbl, null); + createTable(tbl, null, txnWriteIds); } - public void createTable(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException, + public void createTable(Table tbl, EnvironmentContext envContext, String txnWriteIds) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { HiveMetaHook hook = getHook(tbl); if (hook != null) { @@ -2293,7 +2294,7 @@ public void replRollbackTxn(long srcTxnId, String replPolicy) throws NoSuchTxnEx } @Override - public void commitTxn(long txnid) + public void commitTxn(long txnid, String txnWriteIds) throws NoSuchTxnException, TxnAbortedException, TException { client.commit_txn(new CommitTxnRequest(txnid)); } @@ -2657,7 +2658,7 @@ public GetAllFunctionsResponse getAllFunctions() protected void create_table_with_environment_context(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { - client.create_table_with_environment_context(tbl, envContext); + client.create_table_with_environment_context(tbl, envContext, null); } protected void drop_table_with_environment_context(String dbname, String name, diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java index 6c7fe11..0f82a8b 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java @@ -208,7 +208,7 @@ public void addNotificationEvent(NotificationEvent entry) throws MetaException { } @Override - public void createTable(Table tbl) throws InvalidObjectException, MetaException { + public void createTable(Table tbl, String validWriteIdList) throws InvalidObjectException, MetaException { if (callerVerifier != null) { CallerArguments args = new CallerArguments(tbl.getDbName()); args.tblName = tbl.getTableName(); @@ -218,7 +218,7 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException + args.dbName + " table: " + args.tblName); } } - super.createTable(tbl); + super.createTable(tbl, validWriteIdList); } @Override diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/NonCatCallsWithCatalog.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/NonCatCallsWithCatalog.java index 377a550..0a4964c 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/NonCatCallsWithCatalog.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/NonCatCallsWithCatalog.java @@ -168,7 +168,7 @@ public void setUp() throws Exception { for (Table t : testTables) { t.unsetCatName(); - client.createTable(t); + client.createTable(t, null); } // Create partitions for the partitioned table @@ -282,7 +282,7 @@ public void tablesCreateDropAlterTruncate() throws TException, URISyntaxExceptio */ Table t = builder.build(conf); t.unsetCatName(); - client.createTable(t); + client.createTable(t, null); } // Add partitions for the partitioned table @@ -416,7 +416,7 @@ public void tablesGetExists() throws TException { .addCol("col2_" + i, ColumnType.INT_TYPE_NAME) .build(conf); table.unsetCatName(); - client.createTable(table); + client.createTable(table, null); } Set tables = new HashSet<>(client.getTables(dbName, "*e_in_other_*")); @@ -452,7 +452,7 @@ public void tablesList() throws TException { if (i == 0) builder.addTableParam("the_key", "the_value"); Table table = builder.build(conf); table.unsetCatName(); - client.createTable(table); + client.createTable(table, null); } String filter = hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS + "the_key=\"the_value\""; @@ -481,7 +481,7 @@ public void getTableMeta() throws TException { .addCol("name", "string") .build(conf); table.unsetCatName(); - client.createTable(table); + client.createTable(table, null); TableMeta tableMeta = new TableMeta(dbName, tableNames[i], TableType.MANAGED_TABLE.name()); tableMeta.setCatName(expectedCatalog()); expected.add(tableMeta); @@ -514,7 +514,7 @@ public void addPartitions() throws TException { .addPartCol("partcol", "string") .build(conf); table.unsetCatName(); - client.createTable(table); + client.createTable(table, null); Partition[] parts = new Partition[5]; for (int i = 0; i < parts.length; i++) { @@ -558,7 +558,7 @@ public void getPartitions() throws TException { .addTableParam("PARTITION_LEVEL_PRIVILEGE", "true") .build(conf); table.unsetCatName(); - client.createTable(table); + client.createTable(table, null); Partition[] parts = new Partition[5]; for (int i = 0; i < parts.length; i++) { @@ -607,7 +607,7 @@ public void listPartitions() throws TException { .addPartCol("partcol", "string") .build(conf); table.unsetCatName(); - client.createTable(table); + client.createTable(table, null); Partition[] parts = new Partition[5]; for (int i = 0; i < parts.length; i++) { @@ -674,7 +674,7 @@ public void alterPartitions() throws TException { .addPartCol("partcol", "string") .build(conf); table.unsetCatName(); - client.createTable(table); + client.createTable(table, null); Partition[] parts = new Partition[5]; for (int i = 0; i < 5; i++) { @@ -742,7 +742,7 @@ public void dropPartitions() throws TException { .addPartCol("partcol", "string") .build(conf); table.unsetCatName(); - client.createTable(table); + client.createTable(table, null); Partition[] parts = new Partition[2]; for (int i = 0; i < parts.length; i++) { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java index 23faa74..81c866b 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java @@ -220,13 +220,13 @@ protected void creatEnv(Configuration conf) throws Exception { .setTableName(TAB1) .addCol("id", "int") .addCol("name", "string") - .create(client, conf); + .create(client, conf, null); Table tab2 = new TableBuilder() .setDbName(DBNAME1) .setTableName(TAB2) .addCol("id", "int") .addPartCol("name", "string") - .create(client, conf); + .create(client, conf, null); new PartitionBuilder() .inTable(tab2) .addValue("value1") diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestGetPartitionsUsingProjectionAndFilterSpecs.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestGetPartitionsUsingProjectionAndFilterSpecs.java index bc43f3d..be59b3e 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestGetPartitionsUsingProjectionAndFilterSpecs.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestGetPartitionsUsingProjectionAndFilterSpecs.java @@ -151,7 +151,7 @@ private void createTestTables() throws TException { .setSerdeLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe") .setInputFormat("org.apache.hadoop.hive.ql.io.HiveInputFormat") .setOutputFormat("org.apache.hadoop.hive.ql.io.HiveOutputFormat") - .create(client, conf); + .create(client, conf, null); Table table = client.getTable(dbName, tblName); Assert.assertTrue("Table " + dbName + "." + tblName + " does not exist", @@ -551,7 +551,7 @@ public void testNonStandardPartitions() throws TException { .addBucketCol("ns_c1") .addSortCol("ns_c2", 1) .addTableParam("tblparamKey", "Partitions of this table are not located within table directory") - .create(client, conf); + .create(client, conf, null); Table table = client.getTable(dbName, testTblName); Assert.assertNotNull("Unable to create a test table ", table); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 0c4c84c..195cb4e 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -225,7 +225,7 @@ private static void partitionTester(HiveMetaStoreClient client, Configuration co .setSkewedColValueLocationMaps(Collections.singletonMap(skewedColValue, "location1")) .addPartCol("ds", ColumnType.STRING_TYPE_NAME) .addPartCol("hr", ColumnType.STRING_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -450,7 +450,7 @@ private static void partitionTester(HiveMetaStoreClient client, Configuration co // still exist tbl.setParameters(new HashMap<>()); tbl.getParameters().put("EXTERNAL", "TRUE"); - client.createTable(tbl); + client.createTable(tbl, null); retp = client.add_partition(part); assertTrue(fs.exists(partPath)); client.dropPartition(dbName, tblName, part.getValues(), true); @@ -849,7 +849,7 @@ public void testAlterViewParititon() throws Throwable { .setTableName(tblName) .addCol("name", ColumnType.STRING_TYPE_NAME) .addCol("income", ColumnType.INT_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -882,7 +882,7 @@ public void testAlterViewParititon() throws Throwable { viewSd.setSerdeInfo(new SerDeInfo()); viewSd.getSerdeInfo().setParameters(new HashMap<>()); - client.createTable(view); + client.createTable(view, null); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -947,7 +947,7 @@ public void testAlterPartition() throws Throwable { .addSerdeParam(ColumnType.SERIALIZATION_FORMAT, "1") .addPartCol("ds", ColumnType.STRING_TYPE_NAME) .addPartCol("hr", ColumnType.INT_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -1022,7 +1022,7 @@ public void testRenamePartition() throws Throwable { .addCol("income", ColumnType.INT_TYPE_NAME) .addPartCol("ds", ColumnType.STRING_TYPE_NAME) .addPartCol("hr", ColumnType.INT_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -1395,7 +1395,7 @@ public void testSimpleTable() throws Exception { .setNumBuckets(1) .addBucketCol("name") .addStorageDescriptorParam("test_param_1", "Use this for comments etc") - .create(client, conf); + .create(client, conf, null); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -1442,7 +1442,7 @@ public void testSimpleTable() throws Exception { } tbl2.unsetId(); - client.createTable(tbl2); + client.createTable(tbl2, null); if (isThriftClient) { tbl2 = client.getTable(tbl2.getDbName(), tbl2.getTableName()); } @@ -1795,7 +1795,7 @@ public void testGetSchemaWithNoClassDefFoundError() throws TException { .setTableName(tblName) .addCol("name", ColumnType.STRING_TYPE_NAME, "") .setSerdeLib("no.such.class") - .create(client, conf); + .create(client, conf, null); client.getSchema(dbName, tblName); } @@ -1841,7 +1841,7 @@ public void testCreateTableSettingId() throws Exception { .build(conf); table.setId(1); try { - client.createTable(table); + client.createTable(table, null); Assert.fail("An error should happen when setting the id" + " to create a table"); } catch (InvalidObjectException e) { @@ -1876,7 +1876,7 @@ public void testAlterTable() throws Exception { boolean failed = false; try { - client.createTable(tbl); + client.createTable(tbl, null); } catch (InvalidObjectException ex) { failed = true; } @@ -1893,7 +1893,7 @@ public void testAlterTable() throws Exception { tbl.getSd().setCols(invColsInvType); boolean failChecker = false; try { - client.createTable(tbl); + client.createTable(tbl, null); } catch (InvalidObjectException ex) { failChecker = true; } @@ -1909,7 +1909,7 @@ public void testAlterTable() throws Exception { // create a valid table tbl.setTableName(tblName); tbl.getSd().setCols(cols); - client.createTable(tbl); + client.createTable(tbl, null); if (isThriftClient) { tbl = client.getTable(tbl.getDbName(), tbl.getTableName()); @@ -2034,7 +2034,7 @@ public void testComplexTable() throws Exception { .setNumBuckets(1) .addBucketCol("name") .addStorageDescriptorParam("test_param_1","Use this for comments etc") - .create(client, conf); + .create(client, conf, null); Table tbl2 = client.getTable(dbName, tblName); assertEquals(tbl2.getDbName(), dbName); @@ -2108,7 +2108,7 @@ public void testTableDatabase() throws Exception { .setTableName(tblName_1) .addCol("name", ColumnType.STRING_TYPE_NAME) .addCol("income", ColumnType.INT_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); tbl = client.getTable(dbName, tblName_1); @@ -2205,7 +2205,7 @@ public void testPartitionFilter() throws Exception { .addPartCol("p1", ColumnType.STRING_TYPE_NAME) .addPartCol("p2", ColumnType.STRING_TYPE_NAME) .addPartCol("p3", ColumnType.INT_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); tbl = client.getTable(dbName, tblName); @@ -2376,7 +2376,7 @@ public void testFilterSinglePartition() throws Exception { .addCol("c1", ColumnType.STRING_TYPE_NAME) .addCol("c2", ColumnType.INT_TYPE_NAME) .addPartCol("p1", ColumnType.STRING_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); tbl = client.getTable(dbName, tblName); @@ -2426,7 +2426,7 @@ public void testFilterLastPartition() throws Exception { .addCol("c2", ColumnType.INT_TYPE_NAME) .addPartCol("p1", ColumnType.STRING_TYPE_NAME) .addPartCol("p2", ColumnType.STRING_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); tbl = client.getTable(dbName, tblName); @@ -2651,7 +2651,7 @@ private Table createTableForTestFilter(String dbName, String tableName, String o .setTableParams(tableParams) .setOwner(owner) .setLastAccessTime(lastAccessTime) - .create(client, conf); + .create(client, conf, null); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -2683,7 +2683,7 @@ public void testConcurrentMetastores() throws Exception { .setTableName(tblName) .addCol("c1", ColumnType.STRING_TYPE_NAME) .addCol("c2", ColumnType.INT_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); // get the table from the client, verify the name is correct Table tbl2 = client.getTable(dbName, tblName); @@ -2895,7 +2895,7 @@ private void createTable(String dbName, String tableName) throws TException { .setTableName(tableName) .addCol("foo", "string") .addCol("bar", "string") - .create(client, conf); + .create(client, conf, null); } private void createMaterializedView(String dbName, String tableName, Set tablesUsed) @@ -2907,7 +2907,7 @@ private void createMaterializedView(String dbName, String tableName, Set .addMaterializedViewReferencedTables(tablesUsed) .addCol("foo", "string") .addCol("bar", "string") - .create(client, conf); + .create(client, conf, null); } private List createPartitions(String dbName, Table tbl, @@ -2954,7 +2954,7 @@ private void createMaterializedView(String dbName, String tableName, Set .addCol("income", ColumnType.INT_TYPE_NAME) .addPartCol("ds", ColumnType.STRING_TYPE_NAME) .addPartCol("hr", ColumnType.STRING_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -3218,7 +3218,7 @@ public void testValidateTableCols() throws Throwable { .setTableName(tblName) .addCol("name", ColumnType.STRING_TYPE_NAME) .addCol("income", ColumnType.INT_TYPE_NAME) - .create(client, conf); + .create(client, conf, null); if (isThriftClient) { tbl = client.getTable(dbName, tblName); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java index ebbd1c7..383aca0 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java @@ -109,7 +109,7 @@ private static void createTable(HiveMetaStoreClient hmsc, boolean enablePartitio tableParameters.put("hive.hcatalog.partition.spec.grouping.enabled", enablePartitionGrouping? "true":"false"); Table table = new Table(tableName, dbName, "", 0, 0, 0, storageDescriptor, partColumns, tableParameters, "", "", ""); - hmsc.createTable(table); + hmsc.createTable(table, null); Assert.assertTrue("Table " + dbName + "." + tableName + " does not exist", hmsc.tableExists(dbName, tableName)); @@ -233,7 +233,7 @@ public void testAddPartitions() { StorageDescriptor targetTableSd = new StorageDescriptor(targetTable.getSd()); targetTableSd.setLocation( targetTableSd.getLocation().replace( tableName, targetTableName)); - hmsc.createTable(targetTable); + hmsc.createTable(targetTable, null); // Get partition-list from source. PartitionSpecProxy partitionsForAddition diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java index 4cdc035..5b3df7f 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java @@ -73,7 +73,7 @@ public void testTxns() throws Exception { Assert.assertEquals(2L, (long) tids.get(1)); Assert.assertEquals(3L, (long) tids.get(2)); client.rollbackTxn(1); - client.commitTxn(2); + client.commitTxn(2, null); ValidTxnList validTxns = client.getValidTxns(); Assert.assertFalse(validTxns.isTxnValid(1)); Assert.assertTrue(validTxns.isTxnValid(2)); @@ -88,7 +88,7 @@ public void testOpenTxnNotExcluded() throws Exception { Assert.assertEquals(2L, (long) tids.get(1)); Assert.assertEquals(3L, (long) tids.get(2)); client.rollbackTxn(1); - client.commitTxn(2); + client.commitTxn(2, null); ValidTxnList validTxns = client.getValidTxns(3); Assert.assertFalse(validTxns.isTxnValid(1)); Assert.assertTrue(validTxns.isTxnValid(2)); @@ -111,7 +111,7 @@ public void testTxNWithKeyValue() throws Exception { try { client.createDatabase(db); - client.createTable(tbl); + client.createTable(tbl, null); tbl = client.getTable(dbName, tblName); stm.executeUpdate( @@ -169,7 +169,7 @@ public void testTxNWithKeyWrongPrefix() throws Exception { Table tbl = new TableBuilder().setDbName(dbName).setTableName(tblName) .addCol("id", "int").addCol("name", "string") .setType(TableType.MANAGED_TABLE.name()).build(conf); - client.createTable(tbl); + client.createTable(tbl, null); tbl = client.getTable(dbName, tblName); client.commitTxnWithKeyValue(1, tbl.getId(), "mykey", @@ -259,7 +259,7 @@ public void testLocksWithTxn() throws Exception { client.heartbeat(txnid, 1); - client.commitTxn(txnid); + client.commitTxn(txnid, null); } @Test @@ -302,7 +302,7 @@ public void stringifyValidTxns() throws Exception { @Test public void testOpenTxnWithType() throws Exception { long txnId = client.openTxn("me", TxnType.DEFAULT); - client.commitTxn(txnId); + client.commitTxn(txnId, null); ValidTxnList validTxns = client.getValidTxns(); Assert.assertTrue(validTxns.isTxnValid(txnId)); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java index 38b3f6e..c0b01bc 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java @@ -118,7 +118,7 @@ public void testEnvironmentContext() throws Exception { CreateDatabaseEvent dbEvent = (CreateDatabaseEvent)(notifyList.get(listSize - 1)); assert dbEvent.getStatus(); - msc.createTable(table, envContext); + msc.createTable(table, envContext, null); listSize++; assertEquals(notifyList.size(), listSize); CreateTableEvent tblEvent = (CreateTableEvent)(notifyList.get(listSize - 1)); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHmsServerAuthorization.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHmsServerAuthorization.java index 19fd634..13a6b32 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHmsServerAuthorization.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHmsServerAuthorization.java @@ -152,13 +152,13 @@ protected void creatEnv(Configuration conf) throws Exception { .setTableName(TAB1) .addCol("id", "int") .addCol("name", "string") - .create(client, conf); + .create(client, conf, null); Table tab2 = new TableBuilder() .setDbName(dbName1) .setTableName(TAB2) .addCol("id", "int") .addPartCol("name", "string") - .create(client, conf); + .create(client, conf, null); new PartitionBuilder() .inTable(tab2) .addValue("value1") diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java index 00fae25..cfe20f0 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java @@ -72,7 +72,7 @@ public void testMarkingPartitionSet() throws TException, InterruptedException { .setTableName(tableName) .addCol("a", "string") .addPartCol("b", "string") - .create(msc, conf); + .create(msc, conf, null); Partition part = new PartitionBuilder() .inTable(table) diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java index b919eef..d8cee63 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java @@ -96,7 +96,7 @@ public void testEndFunctionListener() throws Exception { .setTableName(tblName) .addCol("a", "string") .addPartCol("b", "string") - .create(msc, conf); + .create(msc, conf, null); try { msc.getTable(dbName, unknownTable); } catch (Exception e1) { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java index fe64a91..ea37744 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java @@ -220,7 +220,7 @@ public void testListener() throws Exception { .setTableName(tblName) .addCol("a", "string") .addPartCol("b", "string") - .create(msc, conf); + .create(msc, conf, null); PreCreateTableEvent preTblEvent = (PreCreateTableEvent)(preNotifyList.get(preNotifyList.size() - 1)); listSize++; Table tbl = msc.getTable(dbName, tblName); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java index 546422d..c9e1edd 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java @@ -91,7 +91,7 @@ public void testEventStatus() throws Exception { .setTableName(tableName) .addCol("id", "int") .addPartCol("ds", "string") - .create(msc, conf); + .create(msc, conf, null); listSize += 1; notifyList = DummyListener.notifyList; assertEquals(notifyList.size(), listSize); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index 0e814bc..2064dda 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -245,7 +245,7 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO params.put("EXTERNAL", "false"); Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd1, null, params, null, null, "MANAGED_TABLE"); - objectStore.createTable(tbl1); + objectStore.createTable(tbl1, null); List tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1); Assert.assertEquals(1, tables.size()); @@ -272,7 +272,7 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO Assert.assertEquals("Owner of table was not altered", newTbl1.getOwner(), alteredTable.getOwner()); Assert.assertEquals("Owner type of table was not altered", newTbl1.getOwnerType(), alteredTable.getOwnerType()); - objectStore.createTable(tbl1); + objectStore.createTable(tbl1, null); tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1); Assert.assertEquals(2, tables.size()); @@ -343,7 +343,7 @@ public void testPartitionOps() throws MetaException, InvalidObjectException, Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2), tableParams, null, null, "MANAGED_TABLE"); - objectStore.createTable(tbl1); + objectStore.createTable(tbl1, null); HashMap partitionParams = new HashMap<>(); partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true"); List value1 = Arrays.asList("US", "CA"); @@ -401,7 +401,7 @@ public void testConcurrentDropPartitions() throws MetaException, InvalidObjectEx Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2), tableParams, null, null, "MANAGED_TABLE"); - objectStore.createTable(tbl1); + objectStore.createTable(tbl1, null); HashMap partitionParams = new HashMap<>(); partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true"); @@ -581,7 +581,7 @@ private void createPartitionedTable(boolean withPrivileges, boolean withStatisti .addCol("test_skewed_col", "int", "test skewed col comment") .addCol("test_sort_col", "int", "test sort col comment") .build(conf); - objectStore.createTable(tbl1); + objectStore.createTable(tbl1, null); PrivilegeBag privilegeBag = new PrivilegeBag(); // Create partitions for the partitioned table diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreStatementVerify.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreStatementVerify.java index 0d9b1bc..f9326b7 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreStatementVerify.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreStatementVerify.java @@ -130,7 +130,7 @@ public void testGetTableMetaFetchGroup() throws MetaException, InvalidObjectExce db.setCatalogName("hive"); objectStore.createDatabase(db); - objectStore.createTable(makeTable(DB1, TBL1)); + objectStore.createTable(makeTable(DB1, TBL1), null); List tableMeta = objectStore.getTableMeta("hive", "*", "*", Collections.emptyList()); Assert.assertEquals("Number of items for tableMeta is incorrect", 1, tableMeta.size()); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java index 27c5bba..22477df 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java @@ -145,7 +145,7 @@ public void testPartitionOps() throws Exception { partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, Collections.emptyMap(), null, null, null); - store.createTable(table); + store.createTable(table, null); Deadline.startTimer("getPartition"); for (int i = 0; i < 10; i++) { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionManagement.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionManagement.java index 059c166..a538c27 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionManagement.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionManagement.java @@ -167,7 +167,7 @@ public void tearDown() throws Exception { tb.addPartCol(partKeys.get(i), partKeyTypes.get(i)); } } - Table table = tb.create(client, conf); + Table table = tb.create(client, conf, null); if (partKeys != null) { for (List partVal : partVals) { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java index b9fa89e..45da635 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java @@ -74,7 +74,7 @@ public void testRetryingHMSHandler() throws Exception { .setDbName(dbName) .setTableName(tblName) .addCol("c1", ColumnType.STRING_TYPE_NAME) - .create(msc, conf); + .create(msc, conf, null); Assert.assertEquals(4, AlternateFailurePreListener.getCallCount()); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java index 03378ba..27b81a5 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java @@ -154,7 +154,7 @@ public void tearDown() throws TException { "Must provide partition values for partitioned table"; tb.addPartCol(partKey, ColumnType.STRING_TYPE_NAME); } - Table table = tb.create(client, conf); + Table table = tb.create(client, conf, null); if (partKey != null) { for (String partVal : partVals) { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java index 8696c2f..5f6389a 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java @@ -106,7 +106,7 @@ public void testDatabaseOps() throws Exception { db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Read database via CachedStore Database dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); @@ -197,12 +197,12 @@ public void testTableOps() throws Exception { cols.add(col2); List ptnCols = new ArrayList(); Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols); - objectStore.createTable(tbl); + objectStore.createTable(tbl, null); tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Read database, table via CachedStore Database dbRead= cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); @@ -214,7 +214,7 @@ public void testTableOps() throws Exception { String tblName1 = "tbl1"; Table tbl1 = new Table(tbl); tbl1.setTableName(tblName1); - cachedStore.createTable(tbl1); + cachedStore.createTable(tbl1, null); tbl1 = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName1); // Read via object store @@ -225,7 +225,7 @@ public void testTableOps() throws Exception { String tblName2 = "tbl2"; Table tbl2 = new Table(tbl); tbl2.setTableName(tblName2); - objectStore.createTable(tbl2); + objectStore.createTable(tbl2, null); tbl2 = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName2); // Alter table "tbl" via ObjectStore @@ -293,7 +293,7 @@ public void testPartitionOps() throws Exception { List ptnCols = new ArrayList(); ptnCols.add(ptnCol1); Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols); - objectStore.createTable(tbl); + objectStore.createTable(tbl, null); tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); final String ptnColVal1 = "aaa"; @@ -313,7 +313,7 @@ public void testPartitionOps() throws Exception { // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Read database, table, partition via CachedStore Database dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); @@ -411,7 +411,7 @@ public void testTableColStatsOps() throws Exception { List ptnCols = new ArrayList(); ptnCols.add(ptnCol1); Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols); - objectStore.createTable(tbl); + objectStore.createTable(tbl, null); tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); // Add ColumnStatistics for tbl to metastore DB via ObjectStore @@ -459,7 +459,7 @@ public void testTableColStatsOps() throws Exception { // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Read table stats via CachedStore ColumnStatistics newStats = @@ -551,15 +551,15 @@ public void testSharedStoreTable() { newTbl1.setSd(newSd1); newTbl1.setPartitionKeys(new ArrayList<>()); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl1", tbl1); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl2", tbl2); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl3", tbl3); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db2", "tbl1", tbl1); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl1", tbl1, null, true); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl2", tbl2, null, true); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl3", tbl3, null, true); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db2", "tbl1", tbl1, null, true); Assert.assertEquals(sharedCache.getCachedTableCount(), 4); Assert.assertEquals(sharedCache.getSdCache().size(), 2); - Table t = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1"); + Table t = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1", null); Assert.assertEquals(t.getSd().getLocation(), "loc1"); sharedCache.removeTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1"); @@ -591,9 +591,9 @@ public void testSharedStorePartition() { cols.add(col2); List ptnCols = new ArrayList(); Table tbl1 = createTestTbl(dbName, tbl1Name, owner, cols, ptnCols); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, tbl1); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, tbl1, null, true); Table tbl2 = createTestTbl(dbName, tbl2Name, owner, cols, ptnCols); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, tbl2); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, tbl2, null, true); Partition part1 = new Partition(); StorageDescriptor sd1 = new StorageDescriptor(); @@ -686,7 +686,7 @@ public void testAggrStatsRepeatedRead() throws Exception { new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString()); tbl.setCatName(DEFAULT_CATALOG_NAME); - cachedStore.createTable(tbl); + cachedStore.createTable(tbl, null); List partVals1 = new ArrayList<>(); partVals1.add("1"); @@ -756,7 +756,7 @@ public void testPartitionAggrStats() throws Exception { new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString()); tbl.setCatName(DEFAULT_CATALOG_NAME); - cachedStore.createTable(tbl); + cachedStore.createTable(tbl, null); List partVals1 = new ArrayList<>(); partVals1.add("1"); @@ -830,7 +830,7 @@ public void testPartitionAggrStatsBitVector() throws Exception { new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString()); tbl.setCatName(DEFAULT_CATALOG_NAME); - cachedStore.createTable(tbl); + cachedStore.createTable(tbl, null); List partVals1 = new ArrayList<>(); partVals1.add("1"); @@ -943,7 +943,7 @@ public Object call() { Callable c = new Callable() { public Object call() { Table tbl = createTestTbl(dbNames.get(0), tblName, "user1", cols, ptnCols); - sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, tbl); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, tbl, null, true); return null; } }; @@ -951,7 +951,7 @@ public Object call() { } executor.invokeAll(tasks); for (String tblName : tblNames) { - Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName); + Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, null); Assert.assertNotNull(tbl); Assert.assertEquals(tblName, tbl.getTableName()); } @@ -960,7 +960,7 @@ public Object call() { List ptnVals = new ArrayList(Arrays.asList("aaa", "bbb", "ccc", "ddd", "eee")); tasks.clear(); for (String tblName : tblNames) { - Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName); + Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, null); for (String ptnVal : ptnVals) { Map partParams = new HashMap(); Callable c = new Callable() { @@ -1002,7 +1002,7 @@ public Object call() { } } for (String tblName : addPtnTblNames) { - Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName); + Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, null); for (String ptnVal : newPtnVals) { Map partParams = new HashMap(); Callable c = new Callable() { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java index 423dce8..583af42 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java @@ -87,7 +87,7 @@ public void defaultHiveOnly() throws Exception { CachedStore.stopCacheUpdateService(1); cachedStore.resetCatalogCache(); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // Only the hive catalog should be cached List cachedCatalogs = cachedStore.getCatalogs(); @@ -107,7 +107,7 @@ public void cacheAll() throws Exception { // prewarm gets the conf object cachedStore.resetCatalogCache(); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // All the catalogs should be cached List cachedCatalogs = cachedStore.getCatalogs(); @@ -130,7 +130,7 @@ public void cacheSome() throws Exception { // prewarm gets the conf object cachedStore.resetCatalogCache(); - CachedStore.prewarm(objectStore); + CachedStore.prewarm(objectStore, conf); // All the catalogs should be cached List cachedCatalogs = cachedStore.getCatalogs(); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java index a15f5ea..1cf9ea4 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java @@ -151,7 +151,7 @@ public void addPartitionOtherCatalog() throws TException { .addCol("id", "int") .addCol("name", "string") .addPartCol("partcol", "string") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Partition[] parts = new Partition[5]; for (int i = 0; i < parts.length; i++) { @@ -184,7 +184,7 @@ public void noSuchCatalog() throws TException { .addCol("id", "int") .addCol("name", "string") .addPartCol("partcol", "string") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Partition part = new PartitionBuilder() .inTable(table) @@ -545,7 +545,7 @@ public void testAddPartitionNoPartColOnTable() throws Exception { .setTableName(TABLE_NAME) .addCol("test_id", "int", "test col id") .addCol("test_value", "string", "test col value") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); client.add_partition(partition); } @@ -1531,7 +1531,7 @@ private Table createTable(String dbName, String tableName, List par .setStoredAsSubDirectories(false) .addSerdeParam("partTestSerdeParamKey", "partTestSerdeParamValue") .setLocation(location) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); return client.getTable(dbName, tableName); } @@ -1544,7 +1544,7 @@ private void createExternalTable(String tableName, String location) throws Excep .addPartCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .addTableParam("EXTERNAL", "TRUE") .setLocation(location) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); } private Partition buildPartition(String dbName, String tableName, String value) @@ -1731,6 +1731,6 @@ private void createView(String tableName) throws Exception { .addCol("test_value", "string", "test col value") .addPartCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .setLocation(null) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); } } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java index 2564349..78111ee 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java @@ -1019,7 +1019,7 @@ private Table createTable(String dbName, String tableName, List par .setStoredAsSubDirectories(false) .addSerdeParam("partTestSerdeParamKey", "partTestSerdeParamValue") .setLocation(location) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); return client.getTable(dbName, tableName); } @@ -1262,6 +1262,6 @@ private void createView(String tableName) throws Exception { .addCol("test_value", "string", "test col value") .addPartCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .setLocation(null) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); } } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java index 4fc3688..9b315f6 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java @@ -130,7 +130,7 @@ private Table createTestTable(IMetaStoreClient client, String dbName, String tab table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true"); } - client.createTable(table); + client.createTable(table, null); return table; } @@ -251,7 +251,7 @@ public void otherCatalog() throws TException { .addCol("id", "int") .addCol("name", "string") .addPartCol("partcol", "string") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Partition[] parts = new Partition[5]; for (int i = 0; i < 5; i++) { @@ -314,7 +314,7 @@ public void deprecatedCalls() throws TException { .addCol("id", "int") .addCol("name", "string") .addPartCol("partcol", "string") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Partition[] parts = new Partition[5]; for (int i = 0; i < 5; i++) { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java index 462584a..6d4d13a 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java @@ -471,7 +471,7 @@ public void otherCatalog() throws TException { .addCol("id", "int") .addCol("name", "string") .addPartCol("partcol", "string") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Partition created = client.appendPartition(catName, dbName, tableName, Collections.singletonList("a1")); @@ -546,7 +546,7 @@ private Table createTable(String tableName, List partCols, Map partCols, .setPartCols(partCols) .setLocation(metaStore.getWarehouseRoot() + "/" + tableName) .setTableParams(tableParams) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); return table; } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java index 1a2b7e4..dc1652b 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java @@ -1194,7 +1194,7 @@ private Table createTable(String dbName, String tableName, List par .setCols(cols) .setPartCols(partCols) .setLocation(location) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); return client.getTable(dbName, tableName); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestForeignKey.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestForeignKey.java index b058dd2..a999466 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestForeignKey.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestForeignKey.java @@ -105,7 +105,7 @@ public void setUp() throws Exception { .setTableName("test_table_1") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[1] = new TableBuilder() @@ -113,7 +113,7 @@ public void setUp() throws Exception { .setTableName("test_table_2") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[2] = new TableBuilder() @@ -121,7 +121,7 @@ public void setUp() throws Exception { .setTableName("test_table_3") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[3] = new TableBuilder() @@ -129,7 +129,7 @@ public void setUp() throws Exception { .setTableName("test_table_4") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java index 4d7f7c1..c26eddb 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java @@ -121,7 +121,7 @@ private Table createTestTable(IMetaStoreClient client, String dbName, String tab table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true"); } - client.createTable(table); + client.createTable(table, null); return table; } @@ -547,7 +547,7 @@ public void otherCatalog() throws TException { .addCol("name", "string") .addPartCol("partcol", "string") .addTableParam("PARTITION_LEVEL_PRIVILEGE", "true") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Partition[] parts = new Partition[5]; for (int i = 0; i < parts.length; i++) { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetTableMeta.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetTableMeta.java index 7720aa2..1054e71 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetTableMeta.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetTableMeta.java @@ -152,7 +152,7 @@ private TableMeta createTestTable(String dbName, String tableName, TableType typ throws Exception { Table table = createTable(dbName, tableName, type); table.getParameters().put("comment", comment); - client.createTable(table); + client.createTable(table, null); TableMeta tableMeta = new TableMeta(dbName, tableName, type.name()); tableMeta.setComments(comment); tableMeta.setCatName("hive"); @@ -162,7 +162,7 @@ private TableMeta createTestTable(String dbName, String tableName, TableType typ private TableMeta createTestTable(String dbName, String tableName, TableType type) throws Exception { Table table = createTable(dbName, tableName, type); - client.createTable(table); + client.createTable(table, null); TableMeta tableMeta = new TableMeta(dbName, tableName, type.name()); tableMeta.setCatName("hive"); return tableMeta; @@ -305,7 +305,7 @@ public void tablesInDifferentCatalog() throws TException { .setTableName(tableNames[i]) .addCol("id", "int") .addCol("name", "string") - .build(metaStore.getConf())); + .build(metaStore.getConf()), null); TableMeta tableMeta = new TableMeta(dbName, tableNames[i], TableType.MANAGED_TABLE.name()); tableMeta.setCatName(catName); expected.add(tableMeta); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java index 34ceb34..375c901 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java @@ -135,7 +135,7 @@ private Table createTestTable(IMetaStoreClient client, String dbName, String tab table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true"); } - client.createTable(table); + client.createTable(table, null); return table; } @@ -1414,7 +1414,7 @@ public void otherCatalog() throws TException { .addCol("id", "int") .addCol("name", "string") .addPartCol("partcol", "string") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Partition[] parts = new Partition[5]; for (int i = 0; i < parts.length; i++) { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestNotNullConstraint.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestNotNullConstraint.java index b32eeda..ec882a7 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestNotNullConstraint.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestNotNullConstraint.java @@ -103,7 +103,7 @@ public void setUp() throws Exception { .setTableName("test_table_1") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[1] = new TableBuilder() @@ -111,7 +111,7 @@ public void setUp() throws Exception { .setTableName("test_table_2") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[2] = new TableBuilder() @@ -119,7 +119,7 @@ public void setUp() throws Exception { .setTableName("test_table_3") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestPrimaryKey.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestPrimaryKey.java index c33572b..92616d8 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestPrimaryKey.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestPrimaryKey.java @@ -103,7 +103,7 @@ public void setUp() throws Exception { .setTableName("test_table_1") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[1] = new TableBuilder() @@ -111,7 +111,7 @@ public void setUp() throws Exception { .setTableName("test_table_2") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[2] = new TableBuilder() @@ -119,7 +119,7 @@ public void setUp() throws Exception { .setTableName("test_table_3") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java index 6c8c943..f7a36a9 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java @@ -124,20 +124,20 @@ public void setUp() throws Exception { new TableBuilder() .setTableName("test_table") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[1] = new TableBuilder() .setTableName("test_view") .addCol("test_col", "int") .setType("VIRTUAL_VIEW") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[2] = new TableBuilder() .setTableName("test_table_to_find_1") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[3] = new TableBuilder() @@ -145,7 +145,7 @@ public void setUp() throws Exception { .addCol("test_col1", "int") .addCol("test_col2", "int") .addPartCol("test_part_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[4] = new TableBuilder() @@ -154,7 +154,7 @@ public void setUp() throws Exception { .setLocation(metaStore.getWarehouseRoot() + "/external/table_dir") .addTableParam("EXTERNAL", "TRUE") .setType("EXTERNAL_TABLE") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); @@ -164,7 +164,7 @@ public void setUp() throws Exception { .setDbName(OTHER_DATABASE) .setTableName("test_table") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); // Create partitions for the partitioned table for(int i=0; i < 2; i++) { @@ -224,7 +224,7 @@ public void tearDown() throws Exception { public void testCreateGetDeleteTable() throws Exception { // Try to create a table with all of the parameters set Table table = getTableWithAllParametersSet(); - client.createTable(table); + client.createTable(table, null); table.unsetId(); Table createdTable = client.getTable(table.getDbName(), table.getTableName()); @@ -271,7 +271,7 @@ public void testCreateTableDefaultValues() throws Exception { sd.setSerdeInfo(new SerDeInfo()); table.setSd(sd); - client.createTable(table); + client.createTable(table, null); Table createdTable = client.getTable(table.getDbName(), table.getTableName()); Assert.assertEquals("Comparing OwnerType", PrincipalType.USER, createdTable.getOwnerType()); @@ -344,7 +344,7 @@ public void testCreateTableDefaultLocationInSpecificDatabase() throws Exception sd.setSerdeInfo(new SerDeInfo()); table.setSd(sd); - client.createTable(table); + client.createTable(table, null); Table createdTable = client.getTable(table.getDbName(), table.getTableName()); Assert.assertEquals("Storage descriptor location", metaStore.getWarehouseRoot() + "/" + table.getDbName() + ".db/" + table.getTableName(), @@ -365,7 +365,7 @@ public void testCreateTableDefaultValuesView() throws Exception { sd.setSerdeInfo(new SerDeInfo()); table.setSd(sd); - client.createTable(table); + client.createTable(table, null); Table createdTable = client.getTable(table.getDbName(), table.getTableName()); // No location should be created for views @@ -378,7 +378,7 @@ public void testCreateTableNullDatabase() throws Exception { Table table = testTables[0]; table.setDbName(null); - client.createTable(table); + client.createTable(table, null); } @Test(expected = MetaException.class) @@ -386,7 +386,7 @@ public void testCreateTableNullTableName() throws Exception { Table table = testTables[0]; table.setTableName(null); - client.createTable(table); + client.createTable(table, null); } @Test(expected = InvalidObjectException.class) @@ -394,7 +394,7 @@ public void testCreateTableInvalidTableName() throws Exception { Table table = testTables[0]; table.setTableName("test_table;"); - client.createTable(table); + client.createTable(table, null); } @Test(expected = InvalidObjectException.class) @@ -402,7 +402,7 @@ public void testCreateTableEmptyName() throws Exception { Table table = testTables[0]; table.setTableName(""); - client.createTable(table); + client.createTable(table, null); } @Test(expected = MetaException.class) @@ -410,7 +410,7 @@ public void testCreateTableNullStorageDescriptor() throws Exception { Table table = testTables[0]; table.setSd(null); - client.createTable(table); + client.createTable(table, null); } private Table getNewTable() throws MetaException { @@ -424,7 +424,7 @@ private Table getNewTable() throws MetaException { public void testCreateTableInvalidStorageDescriptorNullColumns() throws Exception { Table table = getNewTable(); table.getSd().setCols(null); - client.createTable(table); + client.createTable(table, null); } @Test(expected = MetaException.class) @@ -432,7 +432,7 @@ public void testCreateTableInvalidStorageDescriptorNullSerdeInfo() throws Except Table table = getNewTable(); table.getSd().setSerdeInfo(null); - client.createTable(table); + client.createTable(table, null); } @Test(expected = MetaException.class) @@ -440,7 +440,7 @@ public void testCreateTableInvalidStorageDescriptorNullColumnType() throws Excep Table table = getNewTable(); table.getSd().getCols().get(0).setType(null); - client.createTable(table); + client.createTable(table, null); } @Test(expected = InvalidObjectException.class) @@ -448,7 +448,7 @@ public void testCreateTableInvalidStorageDescriptorInvalidColumnType() throws Ex Table table = getNewTable(); table.getSd().getCols().get(0).setType("xyz"); - client.createTable(table); + client.createTable(table, null); } @Test(expected = InvalidObjectException.class) @@ -456,7 +456,7 @@ public void testCreateTableNoSuchDatabase() throws Exception { Table table = testTables[0]; table.setDbName("no_such_database"); - client.createTable(table); + client.createTable(table, null); } @Test(expected = AlreadyExistsException.class) @@ -464,7 +464,7 @@ public void testCreateTableAlreadyExists() throws Exception { Table table = testTables[0]; table.unsetId(); - client.createTable(table); + client.createTable(table, null); } @Test(expected = NoSuchObjectException.class) @@ -532,7 +532,7 @@ public void testDropTableCaseInsensitive() throws Exception { table.unsetId(); // Test in mixed case - client.createTable(table); + client.createTable(table, null); client.dropTable("DeFaUlt", "TeST_tAbLE"); try { client.getTable(table.getDbName(), table.getTableName()); @@ -554,7 +554,7 @@ public void testDropTableDeleteDir() throws Exception { metaStore.isPathExists(new Path(table.getSd().getLocation()))); table.unsetId(); - client.createTable(table); + client.createTable(table, null); client.dropTable(table.getDbName(), table.getTableName(), false, false); Assert.assertTrue("Table path should be kept", @@ -1153,7 +1153,7 @@ public void tablesInOtherCatalogs() throws TException, URISyntaxException { .setRewriteEnabled(true) .addMaterializedViewReferencedTable(dbName + "." + tableNames[0]); } - client.createTable(builder.build(metaStore.getConf())); + client.createTable(builder.build(metaStore.getConf()), null); } // Add partitions for the partitioned table @@ -1288,7 +1288,7 @@ public void createTableInBogusCatalog() throws TException { .setTableName("doomed") .addCol("col1", ColumnType.STRING_TYPE_NAME) .addCol("col2", ColumnType.INT_TYPE_NAME) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); } @Test(expected = NoSuchObjectException.class) @@ -1343,7 +1343,7 @@ public void moveTablesBetweenCatalogsOnAlter() throws TException { .setTableName(tableName) .addCol("col1", ColumnType.STRING_TYPE_NAME) .addCol("col2", ColumnType.INT_TYPE_NAME) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); Table after = before.deepCopy(); after.setCatName(DEFAULT_CATALOG_NAME); client.alter_table(catName, dbName, tableName, after); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java index e885c0a..70ec0ec 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java @@ -88,7 +88,7 @@ public void setUp() throws Exception { .setDbName(DEFAULT_DATABASE) .setTableName("test_table") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[1] = new TableBuilder() @@ -96,14 +96,14 @@ public void setUp() throws Exception { .setTableName("test_view") .addCol("test_col", "int") .setType("VIEW") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[2] = new TableBuilder() .setDbName(DEFAULT_DATABASE) .setTableName("test_table_to_find_1") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[3] = new TableBuilder() @@ -111,14 +111,14 @@ public void setUp() throws Exception { .setTableName("test_table_to_find_2") .addCol("test_col", "int") .setType("VIEW") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[4] = new TableBuilder() .setDbName(DEFAULT_DATABASE) .setTableName("test_table_hidden_1") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); @@ -127,14 +127,14 @@ public void setUp() throws Exception { .setDbName(OTHER_DATABASE) .setTableName("test_table") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[6] = new TableBuilder() .setDbName(OTHER_DATABASE) .setTableName("test_table_to_find_3") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { @@ -486,7 +486,7 @@ public void otherCatalog() throws TException { .setTableName(tableNames[i]) .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) .addCol("col2_" + i, ColumnType.INT_TYPE_NAME) - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); } Set tables = new HashSet<>(client.getTables(catName, dbName, "*e_in_other_*")); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java index 20c3af0..72a15eb 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java @@ -86,7 +86,7 @@ public void setUp() throws Exception { .setOwner("Owner1") .setLastAccessTime(1000) .addTableParam("param1", "value1") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[1] = new TableBuilder() @@ -96,7 +96,7 @@ public void setUp() throws Exception { .setOwner("Owner1") .setLastAccessTime(2000) .addTableParam("param1", "value2") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[2] = new TableBuilder() @@ -106,7 +106,7 @@ public void setUp() throws Exception { .setOwner("Owner2") .setLastAccessTime(1000) .addTableParam("param1", "value2") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[3] = new TableBuilder() @@ -116,7 +116,7 @@ public void setUp() throws Exception { .setOwner("Owner3") .setLastAccessTime(3000) .addTableParam("param1", "value2") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[4] = new TableBuilder() @@ -126,14 +126,14 @@ public void setUp() throws Exception { .setOwner("Tester") .setLastAccessTime(2500) .addTableParam("param1", "value4") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[5] = new TableBuilder() .setDbName(DEFAULT_DATABASE) .setTableName("filter_test_table_5") .addCol("test_col", "int") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); @@ -145,7 +145,7 @@ public void setUp() throws Exception { .setOwner("Owner1") .setLastAccessTime(1000) .addTableParam("param1", "value1") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { @@ -302,7 +302,7 @@ public void otherCatalogs() throws TException { .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) .addCol("col2_" + i, ColumnType.INT_TYPE_NAME); if (i == 0) builder.addTableParam("the_key", "the_value"); - builder.create(client, metaStore.getConf()); + builder.create(client, metaStore.getConf(), null); } String filter = hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS + "the_key=\"the_value\""; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java index 5842ec5..a9d275d 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java @@ -103,7 +103,7 @@ public void setUp() throws Exception { .setTableName("test_table_1") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[1] = new TableBuilder() @@ -111,7 +111,7 @@ public void setUp() throws Exception { .setTableName("test_table_2") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); testTables[2] = new TableBuilder() @@ -119,7 +119,7 @@ public void setUp() throws Exception { .setTableName("test_table_3") .addCol("col1", "int") .addCol("col2", "varchar(32)") - .create(client, metaStore.getConf()); + .create(client, metaStore.getConf(), null); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java index bc8ac0d..982f156 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReaderWriteIdList.java @@ -260,5 +260,18 @@ public RangeResponse isWriteIdRangeAborted(long minWriteId, long maxWriteId) { public ValidReaderWriteIdList updateHighWatermark(long value) { return new ValidReaderWriteIdList(tableName, exceptions, abortedBits, value, minOpenWriteId); } + + public void commitWriteId(long writeId) { + if (writeId > highWatermark) { + highWatermark = writeId; + } else { + int pos = Arrays.binarySearch(exceptions, writeId); + if (pos != -1) { + long[] newExceptions = new long[exceptions.length-1]; + System.arraycopy(exceptions, 0, newExceptions, 0, pos); + System.arraycopy(exceptions, pos+1, newExceptions, pos, exceptions.length-pos-1); + } + } + } } diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java index cfe01fe..acda189 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java @@ -20,6 +20,7 @@ import java.util.HashMap; import java.util.Map; +import java.util.Set; /** * An implementation to store and manage list of ValidWriteIds for each tables read by current @@ -102,4 +103,8 @@ private String writeToString() { } return buf.toString(); } + + public Set getTableNames() { + return tablesValidWriteIdList.keySet(); + } } diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java index b3d6402..93de2df 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidWriteIdList.java @@ -118,4 +118,9 @@ * @return smallest Open write Id in this set, {@code null} if there is none. */ Long getMinOpenWriteId(); + + /** + * Mark the writeId as committed + */ + void commitWriteId(long writeId); } diff --git a/storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java b/storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java index 4b3cb7d..bd972d4 100644 --- a/storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java +++ b/storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java @@ -19,62 +19,69 @@ import org.apache.hadoop.hive.common.ValidWriteIdList; -import java.util.*; - public class TxnIdUtils { /** * Check if 2 ValidWriteIdLists are at an equivalent commit point. */ public static boolean checkEquivalentWriteIds(ValidWriteIdList a, ValidWriteIdList b) { + return compare(a, b) == 0; + } + + /*** Compare the freshness of two ValidWriteIdList + * @param a + * @param b + * @return 0, if a and b are equivalent + * 1, if a is more recent + * -1, if b is more recent + ***/ + public static int compare(ValidWriteIdList a, ValidWriteIdList b) { if (!a.getTableName().equalsIgnoreCase(b.getTableName())) { - return false; + return a.getTableName().toLowerCase().compareTo(b.getTableName().toLowerCase()); } - ValidWriteIdList newer = a; - ValidWriteIdList older = b; - if (a.getHighWatermark() < b.getHighWatermark()) { - newer = b; - older = a; + // The algorithm assumes invalidWriteIds are sorted and values are less or equal than hwm, here is how + // the algorithm works: + // 1. Compare two invalidWriteIds until one the list ends, difference means the mismatch writeid is + // committed in one ValidWriteIdList but not the other, the comparison end + // 2. Every writeid from the last writeid in the short invalidWriteIds till its hwm should be committed + // in the other ValidWriteIdList, otherwise the comparison end + // 3. Every writeid from lower hwm to higher hwm should be invalid, otherwise, the comparison end + int minLen = Math.min(a.getInvalidWriteIds().length, b.getInvalidWriteIds().length); + for (int i=0;i b.getInvalidWriteIds()[i]?1:-1; } - - return checkEquivalentCommittedIds( - older.getHighWatermark(), older.getInvalidWriteIds(), - newer.getHighWatermark(), newer.getInvalidWriteIds()); - } - - /** - * Check the min open ID/highwater mark/exceptions list to see if 2 ID lists are at the same commit point. - * This can also be used for ValidTxnList as well as ValidWriteIdList. - */ - private static boolean checkEquivalentCommittedIds( - long oldHWM, long[] oldInvalidIds, - long newHWM, long[] newInvalidIds) { - - // There should be no valid txns in newer list that are not also in older. - // - All values in oldInvalidIds should also be in newInvalidIds. - // - if oldHWM < newHWM, then all IDs between oldHWM .. newHWM should exist in newInvalidTxns. - // A Gap in the sequence means a committed txn in newer list (lists are not equivalent) - - if (newInvalidIds.length < oldInvalidIds.length) { - return false; + if (a.getInvalidWriteIds().length == b.getInvalidWriteIds().length) { + return Long.signum(a.getHighWatermark() - b.getHighWatermark()); } - - // Check that the values in the older list are also in newer. Lists should already be sorted. - for (int idx = 0; idx < oldInvalidIds.length; ++idx) { - if (oldInvalidIds[idx] != newInvalidIds[idx]) { - return false; + if (a.getInvalidWriteIds().length == minLen) { + if (a.getHighWatermark() != b.getInvalidWriteIds()[minLen] -1) { + return Long.signum(a.getHighWatermark() - (b.getInvalidWriteIds()[minLen] -1)); + } + if (allInvalidFrom(b.getInvalidWriteIds(), minLen, b.getHighWatermark())) { + return 0; + } else { + return -1; + } + } else { + if (b.getHighWatermark() != a.getInvalidWriteIds()[minLen] -1) { + return Long.signum(b.getHighWatermark() - (a.getInvalidWriteIds()[minLen] -1)); + } + if (allInvalidFrom(a.getInvalidWriteIds(), minLen, a.getHighWatermark())) { + return 0; + } else { + return 1; } } - - // If older committed state is equivalent to newer state, then there should be no committed IDs - // between oldHWM and newHWM, and newInvalidIds should have exactly (newHWM - oldHWM) - // more entries than oldInvalidIds. - long oldNewListSizeDifference = newInvalidIds.length - oldInvalidIds.length; - long oldNewHWMDifference = newHWM - oldHWM; - if (oldNewHWMDifference != oldNewListSizeDifference) { - return false; + } + private static boolean allInvalidFrom(long[] invalidIds, int start, long hwm) { + for (int i=start+1;i partitions, String key, String value) conn.getMSC().commitTxnWithKeyValue(txnToWriteId.getTxnId(), tableId, key, value); } else { - conn.getMSC().commitTxn(txnToWriteId.getTxnId()); + // TODO =====to be reworked in HIVE-21637====== + conn.getMSC().commitTxn(txnToWriteId.getTxnId(), null); } // increment the min txn id so that heartbeat thread will heartbeat only from the next open transaction. // the current transaction is going to committed or fail, so don't need heartbeat for current transaction.