diff --git hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java index f4ee208..6016425 100644 --- hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java +++ hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement; import org.apache.hadoop.hive.metastore.api.TxnAbortedException; @@ -647,13 +648,16 @@ public void testHeartbeat() throws Exception { //todo: this should ideally check Transaction heartbeat as well, but heartbeat //timestamp is not reported yet //GetOpenTxnsInfoResponse txnresp = msClient.showTxns(); - ShowLocksResponse response = msClient.showLocks(); + ShowLocksRequest request = new ShowLocksRequest(); + request.setDbname(dbName2); + request.setTablename(tblName2); + ShowLocksResponse response = msClient.showLocks(request); Assert.assertEquals("Wrong nubmer of locks: " + response, 1, response.getLocks().size()); ShowLocksResponseElement lock = response.getLocks().get(0); long acquiredAt = lock.getAcquiredat(); long heartbeatAt = lock.getLastheartbeat(); txnBatch.heartbeat(); - response = msClient.showLocks(); + response = msClient.showLocks(request); Assert.assertEquals("Wrong number of locks2: " + response, 1, response.getLocks().size()); lock = response.getLocks().get(0); Assert.assertEquals("Acquired timestamp didn't match", acquiredAt, lock.getAcquiredat()); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 75fea5b..b82bf6f 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -2094,11 +2094,17 @@ public void unlock(long lockid) } @Override + @Deprecated public ShowLocksResponse showLocks() throws TException { return client.show_locks(new ShowLocksRequest()); } @Override + public ShowLocksResponse showLocks(ShowLocksRequest request) throws TException { + return client.show_locks(request); + } + + @Override public void heartbeat(long txnid, long lockid) throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, TException { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 3965475..fc0f5d3 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -74,6 +74,7 @@ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; +import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TableMeta; @@ -1369,9 +1370,18 @@ void unlock(long lockid) * @return List of currently held and waiting locks. * @throws TException */ + @Deprecated ShowLocksResponse showLocks() throws TException; /** + * Show all currently held and waiting locks. + * @param showLocksRequest SHOW LOCK request + * @return List of currently held and waiting locks. + * @throws TException + */ + ShowLocksResponse showLocks(ShowLocksRequest showLocksRequest) throws TException; + + /** * Send a heartbeat to indicate that the client holding these locks (if * any) and that opened this transaction (if one exists) is still alive. * The default timeout for transactions and locks is 300 seconds, diff --git metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index 06cd4aa..ff98b82 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -1064,6 +1064,34 @@ public ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException { String s = "select hl_lock_ext_id, hl_txnid, hl_db, hl_table, hl_partition, hl_lock_state, " + "hl_lock_type, hl_last_heartbeat, hl_acquired_at, hl_user, hl_host, hl_lock_int_id," + "hl_blockedby_ext_id, hl_blockedby_int_id from HIVE_LOCKS"; + + // Some filters may have been specified in the SHOW LOCKS statement. Add them to the query. + String dbName = rqst.getDbname(); + String tableName = rqst.getTablename(); + String partName = rqst.getPartname(); + + StringBuilder filter = new StringBuilder(); + if (dbName != null && !dbName.isEmpty()) { + filter.append("hl_db=").append(quoteString(dbName)); + } + if (tableName != null && !tableName.isEmpty()) { + if (filter.length() > 0) { + filter.append(" and "); + } + filter.append("hl_table=").append(quoteString(tableName)); + } + if (partName != null && !partName.isEmpty()) { + if (filter.length() > 0) { + filter.append(" and "); + } + filter.append("hl_partition=").append(quoteString(partName)); + } + String whereClause = filter.toString(); + + if (!whereClause.isEmpty()) { + s = s + " where " + whereClause; + } + LOG.debug("Doing to execute query <" + s + ">"); ResultSet rs = stmt.executeQuery(s); while (rs.next()) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index c4d3bfb..6b7cb5f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -84,6 +84,7 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; +import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement; import org.apache.hadoop.hive.metastore.api.SkewedInfo; @@ -2618,7 +2619,29 @@ private int showLocksNewFormat(ShowLocksDesc showLocks, HiveLockManager lm) } lockMgr = (DbLockManager)lm; - ShowLocksResponse rsp = lockMgr.getLocks(); + String dbName = showLocks.getDbName(); + String tblName = showLocks.getTableName(); + Map partSpec = showLocks.getPartSpec(); + if (dbName == null && tblName != null) { + dbName = SessionState.get().getCurrentDatabase(); + } + + ShowLocksRequest rqst = new ShowLocksRequest(); + rqst.setDbname(dbName); + rqst.setTablename(tblName); + if (partSpec != null) { + List keyList = new ArrayList(); + List valList = new ArrayList(); + for (String partKey : partSpec.keySet()) { + String partVal = partSpec.remove(partKey); + keyList.add(partKey); + valList.add(partVal); + } + String partName = FileUtils.makePartName(keyList, valList); + rqst.setPartname(partName); + } + + ShowLocksResponse rsp = lockMgr.getLocks(rqst); // write the results in the file DataOutputStream os = getOutputStream(showLocks.getResFile()); diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java index 18ed864..2804514 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java @@ -278,8 +278,12 @@ public void releaseLocks(List hiveLocks) { } public ShowLocksResponse getLocks() throws LockException { + return getLocks(new ShowLocksRequest()); + } + + public ShowLocksResponse getLocks(ShowLocksRequest showLocksRequest) throws LockException { try { - return client.showLocks(); + return client.showLocks(showLocksRequest); } catch (TException e) { throw new LockException(ErrorMsg.METASTORE_COMMUNICATION_FAILED.getMsg(), e); } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java index 9e93254..45a86f6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowLocksDesc.java @@ -53,8 +53,8 @@ * Schema for use with db txn manager. */ private static final String newFormatSchema = "lockid,database,table,partition,lock_state," + - "lock_type,transaction_id,last_heartbeat,acquired_at,user," + - "hostname#string:string:string:string:string:string:string:string:string:string:string"; + "blocked_by,lock_type,transaction_id,last_heartbeat,acquired_at,user,hostname#" + + "string:string:string:string:string:string:string:string:string:string:string:string"; public String getDatabase() { return dbName; diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java index c956d78..f3873a8 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java @@ -24,9 +24,11 @@ import org.apache.hadoop.hive.ql.txn.AcidWriteSetService; import org.junit.After; import org.junit.Assert; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.LockState; import org.apache.hadoop.hive.metastore.api.LockType; +import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement; import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; @@ -42,7 +44,9 @@ import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; /** * See additional tests in {@link org.apache.hadoop.hive.ql.lockmgr.TestDbTxnManager} @@ -641,6 +645,105 @@ private void checkLock(LockType expectedType, LockState expectedState, String ex Assert.assertEquals(actual.toString(), normalizeCase(expectedTable), normalizeCase(actual.getTablename())); Assert.assertEquals(actual.toString(), normalizeCase(expectedPartition), normalizeCase(actual.getPartname())); } + + @Test + public void testShowLocksFilterOptions() throws Exception { + CommandProcessorResponse cpr = driver.run("drop table if exists db1.t14"); + checkCmdOnDriver(cpr); + cpr = driver.run("drop table if exists db2.t14"); // Note that db1 and db2 have a table with common name + checkCmdOnDriver(cpr); + cpr = driver.run("drop table if exists db2.t15"); + checkCmdOnDriver(cpr); + cpr = driver.run("drop table if exists db2.t16"); + checkCmdOnDriver(cpr); + cpr = driver.run("drop database if exists db1"); + checkCmdOnDriver(cpr); + cpr = driver.run("drop database if exists db2"); + checkCmdOnDriver(cpr); + + cpr = driver.run("create database if not exists db1"); + checkCmdOnDriver(cpr); + cpr = driver.run("create database if not exists db2"); + checkCmdOnDriver(cpr); + cpr = driver.run("create table if not exists db1.t14 (a int, b int) partitioned by (ds string) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + checkCmdOnDriver(cpr); + cpr = driver.run("create table if not exists db2.t14 (a int, b int) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + checkCmdOnDriver(cpr); + cpr = driver.run("create table if not exists db2.t15 (a int, b int) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + checkCmdOnDriver(cpr); + cpr = driver.run("create table if not exists db2.t16 (a int, b int) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); + checkCmdOnDriver(cpr); + + // Acquire different locks at different levels + + cpr = driver.compileAndRespond("insert into table db1.t14 partition (ds='today') values (1, 2)"); + checkCmdOnDriver(cpr); + txnMgr.acquireLocks(driver.getPlan(), ctx, "Tom"); + + HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); + cpr = driver.compileAndRespond("insert into table db1.t14 partition (ds='tomorrow') values (3, 4)"); + checkCmdOnDriver(cpr); + txnMgr2.acquireLocks(driver.getPlan(), ctx, "Jerry"); + + HiveTxnManager txnMgr3 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); + cpr = driver.compileAndRespond("select * from db2.t15"); + checkCmdOnDriver(cpr); + txnMgr3.acquireLocks(driver.getPlan(), ctx, "Donald"); + + HiveTxnManager txnMgr4 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); + cpr = driver.compileAndRespond("select * from db2.t16"); + checkCmdOnDriver(cpr); + txnMgr4.acquireLocks(driver.getPlan(), ctx, "Hillary"); + + HiveTxnManager txnMgr5 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); + cpr = driver.compileAndRespond("select * from db2.t14"); + checkCmdOnDriver(cpr); + txnMgr5.acquireLocks(driver.getPlan(), ctx, "Obama"); + + // Simulate SHOW LOCKS with different filter options + + // SHOW LOCKS (no filter) + List locks = getLocks(); + Assert.assertEquals("Unexpected lock count", 7, locks.size()); + // locks.get(0) is a lock on tmp table in default database used for insert + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", "ds=today", locks.get(1)); + // locks.get(2) is a lock on tmp table in default database used for insert + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", "ds=tomorrow", locks.get(3)); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t15", null, locks.get(4)); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t16", null, locks.get(5)); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t14", null, locks.get(6)); + + // SHOW LOCKS db2 + locks = getLocksWithFilterOptions(txnMgr3, "db2", null, null); + Assert.assertEquals("Unexpected lock count", 3, locks.size()); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t15", null, locks.get(0)); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t16", null, locks.get(1)); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t14", null, locks.get(2)); + + // SHOW LOCKS t14 + cpr = driver.run("use db1"); + checkCmdOnDriver(cpr); + locks = getLocksWithFilterOptions(txnMgr, null, "t14", null); + Assert.assertEquals("Unexpected lock count", 2, locks.size()); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", "ds=today", locks.get(0)); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", "ds=tomorrow", locks.get(1)); + // Note that it shouldn't show t14 from db2 + + // SHOW LOCKS t14 PARTITION ds='today' + Map partSpec = new HashMap(); + partSpec.put("ds", "today"); + locks = getLocksWithFilterOptions(txnMgr, null, "t14", partSpec); + Assert.assertEquals("Unexpected lock count", 1, locks.size()); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", "ds=today", locks.get(0)); + + // SHOW LOCKS t15 + cpr = driver.run("use db2"); + checkCmdOnDriver(cpr); + locks = getLocksWithFilterOptions(txnMgr3, null, "t15", null); + Assert.assertEquals("Unexpected lock count", 1, locks.size()); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t15", null, locks.get(0)); + } + private void checkCmdOnDriver(CommandProcessorResponse cpr) { Assert.assertTrue(cpr.toString(), cpr.getResponseCode() == 0); } @@ -1191,4 +1294,27 @@ public void testWriteSetTracking11() throws Exception { Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), 4, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); } + + private List getLocksWithFilterOptions(HiveTxnManager txnMgr, + String dbName, String tblName, Map partSpec) throws Exception { + if (dbName == null && tblName != null) { + dbName = SessionState.get().getCurrentDatabase(); + } + ShowLocksRequest rqst = new ShowLocksRequest(); + rqst.setDbname(dbName); + rqst.setTablename(tblName); + if (partSpec != null) { + List keyList = new ArrayList(); + List valList = new ArrayList(); + for (String partKey : partSpec.keySet()) { + String partVal = partSpec.remove(partKey); + keyList.add(partKey); + valList.add(partVal); + } + String partName = FileUtils.makePartName(keyList, valList); + rqst.setPartname(partName); + } + ShowLocksResponse rsp = ((DbLockManager)txnMgr.getLockManager()).getLocks(rqst); + return rsp.getLocks(); + } } diff --git ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q index 30b26f4..da8e448 100644 --- ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q +++ ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q @@ -8,3 +8,17 @@ show locks extended; show locks default; show transactions; + +create table partitioned_acid_table (a int, b int) partitioned by (p string) clustered by (a) into 2 buckets stored as orc tblproperties ('transactional'='true'); + +show locks database default; + +show locks partitioned_acid_table; + +show locks partitioned_acid_table extended; + +show locks partitioned_acid_table partition (p='abc'); + +show locks partitioned_acid_table partition (p='abc') extended; + +drop table partitioned_acid_table; diff --git ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out index 46d8ea1..c1adeb3 100644 --- ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out +++ ql/src/test/results/clientpositive/dbtxnmgr_showlocks.q.out @@ -2,19 +2,60 @@ PREHOOK: query: show locks PREHOOK: type: SHOWLOCKS POSTHOOK: query: show locks POSTHOOK: type: SHOWLOCKS -Lock ID Database Table Partition State Blocked By Type Transaction ID Last Hearbeat Acquired At User +Lock ID Database Table Partition State Blocked By Type Transaction ID Last Hearbeat Acquired At User Hostname PREHOOK: query: show locks extended PREHOOK: type: SHOWLOCKS POSTHOOK: query: show locks extended POSTHOOK: type: SHOWLOCKS -Lock ID Database Table Partition State Blocked By Type Transaction ID Last Hearbeat Acquired At User +Lock ID Database Table Partition State Blocked By Type Transaction ID Last Hearbeat Acquired At User Hostname PREHOOK: query: show locks default PREHOOK: type: SHOWLOCKS POSTHOOK: query: show locks default POSTHOOK: type: SHOWLOCKS -Lock ID Database Table Partition State Blocked By Type Transaction ID Last Hearbeat Acquired At User +Lock ID Database Table Partition State Blocked By Type Transaction ID Last Hearbeat Acquired At User Hostname PREHOOK: query: show transactions PREHOOK: type: SHOW TRANSACTIONS POSTHOOK: query: show transactions POSTHOOK: type: SHOW TRANSACTIONS Transaction ID Transaction State User Hostname +PREHOOK: query: create table partitioned_acid_table (a int, b int) partitioned by (p string) clustered by (a) into 2 buckets stored as orc tblproperties ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@partitioned_acid_table +POSTHOOK: query: create table partitioned_acid_table (a int, b int) partitioned by (p string) clustered by (a) into 2 buckets stored as orc tblproperties ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@partitioned_acid_table +PREHOOK: query: show locks database default +PREHOOK: type: SHOWLOCKS +POSTHOOK: query: show locks database default +POSTHOOK: type: SHOWLOCKS +Lock ID Database Table Partition State Blocked By Type Transaction ID Last Hearbeat Acquired At User Hostname +PREHOOK: query: show locks partitioned_acid_table +PREHOOK: type: SHOWLOCKS +POSTHOOK: query: show locks partitioned_acid_table +POSTHOOK: type: SHOWLOCKS +Lock ID Database Table Partition State Blocked By Type Transaction ID Last Hearbeat Acquired At User Hostname +PREHOOK: query: show locks partitioned_acid_table extended +PREHOOK: type: SHOWLOCKS +POSTHOOK: query: show locks partitioned_acid_table extended +POSTHOOK: type: SHOWLOCKS +Lock ID Database Table Partition State Blocked By Type Transaction ID Last Hearbeat Acquired At User Hostname +PREHOOK: query: show locks partitioned_acid_table partition (p='abc') +PREHOOK: type: SHOWLOCKS +POSTHOOK: query: show locks partitioned_acid_table partition (p='abc') +POSTHOOK: type: SHOWLOCKS +Lock ID Database Table Partition State Blocked By Type Transaction ID Last Hearbeat Acquired At User Hostname +PREHOOK: query: show locks partitioned_acid_table partition (p='abc') extended +PREHOOK: type: SHOWLOCKS +POSTHOOK: query: show locks partitioned_acid_table partition (p='abc') extended +POSTHOOK: type: SHOWLOCKS +Lock ID Database Table Partition State Blocked By Type Transaction ID Last Hearbeat Acquired At User Hostname +PREHOOK: query: drop table partitioned_acid_table +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@partitioned_acid_table +PREHOOK: Output: default@partitioned_acid_table +POSTHOOK: query: drop table partitioned_acid_table +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@partitioned_acid_table +POSTHOOK: Output: default@partitioned_acid_table