diff --git common/src/java/org/apache/hadoop/hive/common/metrics/common/MetricsConstant.java common/src/java/org/apache/hadoop/hive/common/metrics/common/MetricsConstant.java index af0f87bac3..10ce9db02f 100644 --- common/src/java/org/apache/hadoop/hive/common/metrics/common/MetricsConstant.java +++ common/src/java/org/apache/hadoop/hive/common/metrics/common/MetricsConstant.java @@ -34,9 +34,9 @@ public static final String CUMULATIVE_CONNECTION_COUNT = "cumulative_connection_count"; public static final String METASTORE_HIVE_LOCKS = "metastore_hive_locks"; - public static final String ZOOKEEPER_HIVE_SHAREDLOCKS = "zookeeper_hive_sharedlocks"; - public static final String ZOOKEEPER_HIVE_EXCLUSIVELOCKS = "zookeeper_hive_exclusivelocks"; - public static final String ZOOKEEPER_HIVE_SEMISHAREDLOCKS = "zookeeper_hive_semisharedlocks"; + public static final String ZOOKEEPER_HIVE_EXCLUSIVE_LOCKS = "zookeeper_hive_exclusive_locks"; + public static final String ZOOKEEPER_HIVE_SHARED_WRITE_LOCKS = "zookeeper_hive_shared_write_locks"; + public static final String ZOOKEEPER_HIVE_SHARED_READ_LOCKS = "zookeeper_hive_shared_read_locks"; public static final String EXEC_ASYNC_QUEUE_SIZE = "exec_async_queue_size"; public static final String EXEC_ASYNC_POOL_SIZE = "exec_async_pool_size"; diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 73f185a1f3..20a0031d05 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -2703,7 +2703,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "no transactions."), HIVE_TXN_STRICT_LOCKING_MODE("hive.txn.strict.locking.mode", true, "In strict mode non-ACID\n" + "resources use standard R/W lock semantics, e.g. INSERT will acquire exclusive lock.\n" + - "In nonstrict mode, for non-ACID resources, INSERT will only acquire shared lock, which\n" + + "In nonstrict mode, for non-ACID resources, INSERT will only acquire shared_read lock, which\n" + "allows two concurrent writes to the same partition but still lets lock manager prevent\n" + "DROP TABLE etc. when the table is being written to"), HIVE_TXN_NONACID_READ_LOCKS("hive.txn.nonacid.read.locks", true, diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java index e249b7775e..dc8c6636bc 100644 --- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java +++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java @@ -1014,7 +1014,7 @@ private static LockRequest createLockRequest(final HiveEndPoint hiveEndPoint, LockComponentBuilder lockCompBuilder = new LockComponentBuilder() .setDbName(hiveEndPoint.database) .setTableName(hiveEndPoint.table) - .setShared() + .setSharedRead() .setOperationType(DataOperationType.INSERT); if (partNameForLock!=null && !partNameForLock.isEmpty() ) { lockCompBuilder.setPartitionName(partNameForLock); diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java index 52eb6133e7..21e462e1a9 100644 --- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java +++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java @@ -184,9 +184,9 @@ private LockRequest buildLockRequest(Long transactionId) { //todo: DataOperationType is set conservatively here, we'd really want to distinguish update/delete //and insert/select and if resource (that is written to) is ACID or not if (sinks.contains(table)) { - componentBuilder.setSemiShared().setOperationType(DataOperationType.UPDATE).setIsTransactional(true); + componentBuilder.setSharedWrite().setOperationType(DataOperationType.UPDATE).setIsTransactional(true); } else { - componentBuilder.setShared().setOperationType(DataOperationType.INSERT).setIsTransactional(true); + componentBuilder.setSharedRead().setOperationType(DataOperationType.INSERT).setIsTransactional(true); } LockComponent component = componentBuilder.build(); requestBuilder.addLockComponent(component); @@ -252,13 +252,13 @@ static String join(Iterable values) { String user; HiveConf hiveConf; - /** Adds a table for which a shared lock will be requested. */ + /** Adds a table for which a shared_read lock will be requested. */ public Options addSourceTable(String databaseName, String tableName) { addTable(databaseName, tableName, sources); return this; } - /** Adds a table for which a semi-shared lock will be requested. */ + /** Adds a table for which a shared_write lock will be requested. */ public Options addSinkTable(String databaseName, String tableName) { addTable(databaseName, tableName, sinks); return this; diff --git metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql index 03540bba4d..e56ce4cc5f 100644 --- metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql +++ metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql @@ -1418,7 +1418,7 @@ SELECT DISTINCT HL.`HL_TABLE`, HL.`HL_PARTITION`, CASE WHEN HL.`HL_LOCK_STATE` = 'a' THEN 'acquired' WHEN HL.`HL_LOCK_STATE` = 'w' THEN 'waiting' END AS LOCK_STATE, - CASE WHEN HL.`HL_LOCK_TYPE` = 'e' THEN 'exclusive' WHEN HL.`HL_LOCK_TYPE` = 'r' THEN 'shared' WHEN HL.`HL_LOCK_TYPE` = 'w' THEN 'semi-shared' END AS LOCK_TYPE, + CASE WHEN HL.`HL_LOCK_TYPE` = 'e' THEN 'exclusive' WHEN HL.`HL_LOCK_TYPE` = 'r' THEN 'shared_read' WHEN HL.`HL_LOCK_TYPE` = 'w' THEN 'shared_write' END AS LOCK_TYPE, FROM_UNIXTIME(HL.`HL_LAST_HEARTBEAT`), FROM_UNIXTIME(HL.`HL_ACQUIRED_AT`), HL.`HL_USER`, diff --git metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql index fa518747de..ab67583302 100644 --- metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql +++ metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql @@ -479,7 +479,7 @@ SELECT DISTINCT HL.`HL_TABLE`, HL.`HL_PARTITION`, CASE WHEN HL.`HL_LOCK_STATE` = 'a' THEN 'acquired' WHEN HL.`HL_LOCK_STATE` = 'w' THEN 'waiting' END AS LOCK_STATE, - CASE WHEN HL.`HL_LOCK_TYPE` = 'e' THEN 'exclusive' WHEN HL.`HL_LOCK_TYPE` = 'r' THEN 'shared' WHEN HL.`HL_LOCK_TYPE` = 'w' THEN 'semi-shared' END AS LOCK_TYPE, + CASE WHEN HL.`HL_LOCK_TYPE` = 'e' THEN 'exclusive' WHEN HL.`HL_LOCK_TYPE` = 'r' THEN 'shared_read' WHEN HL.`HL_LOCK_TYPE` = 'w' THEN 'shared_write' END AS LOCK_TYPE, FROM_UNIXTIME(HL.`HL_LAST_HEARTBEAT`), FROM_UNIXTIME(HL.`HL_ACQUIRED_AT`), HL.`HL_USER`, diff --git ql/src/java/org/apache/hadoop/hive/ql/ValidTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/ValidTxnManager.java index 4885e437aa..bb453f6df9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ValidTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/ValidTxnManager.java @@ -66,7 +66,7 @@ /** * Checks whether txn list has been invalidated while planning the query. - * This would happen if query requires exclusive/semi-shared lock, and there has been a committed transaction + * This would happen if query requires exclusive/shared_write lock, and there has been a committed transaction * on the table over which the lock is required. */ boolean isValidTxnListState() throws LockException { @@ -83,7 +83,7 @@ boolean isValidTxnListState() throws LockException { // 2) Get locks that are relevant: // - Exclusive for INSERT OVERWRITE. - // - Semi-shared for UPDATE/DELETE. + // - Shared_write for UPDATE/DELETE. Set nonSharedLockedTables = getNonSharedLockedTables(); if (nonSharedLockedTables == null) { return true; // Nothing to check @@ -117,7 +117,7 @@ boolean isValidTxnListState() throws LockException { } else { // The lock has a single components, e.g., SimpleHiveLock or ZooKeeperHiveLock. // Pos 0 of lock paths array contains dbname, pos 1 contains tblname - if ((lock.getHiveLockMode() == HiveLockMode.EXCLUSIVE || lock.getHiveLockMode() == HiveLockMode.SEMI_SHARED) && + if ((lock.getHiveLockMode() == HiveLockMode.EXCLUSIVE || lock.getHiveLockMode() == HiveLockMode.SHARED_WRITE) && lock.getHiveLockObject().getPaths().length == 2) { nonSharedLockedTables.add( TableName.getDbTable(lock.getHiveLockObject().getPaths()[0], lock.getHiveLockObject().getPaths()[1])); diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 17e6cdf162..4461e9cc41 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -2901,7 +2901,7 @@ private static boolean isLockableTable(Table t) { boolean skipReadLock = !conf.getBoolVar(ConfVars.HIVE_TXN_READ_LOCKS); boolean skipNonAcidReadLock = !conf.getBoolVar(ConfVars.HIVE_TXN_NONACID_READ_LOCKS); - // For each source to read, get a shared lock + // For each source to read, get a shared_read lock for (ReadEntity input : inputs) { if (input.isDummy() || !input.needsLock() @@ -2913,7 +2913,7 @@ private static boolean isLockableTable(Table t) { continue; } LockComponentBuilder compBuilder = new LockComponentBuilder(); - compBuilder.setShared(); + compBuilder.setSharedRead(); compBuilder.setOperationType(DataOperationType.SELECT); Table t = null; @@ -2955,7 +2955,7 @@ private static boolean isLockableTable(Table t) { // For each source to write to, get the appropriate lock type. If it's // an OVERWRITE, we need to get an exclusive lock. If it's an insert (no // overwrite) than we need a shared. If it's update or delete then we - // need a SEMI-SHARED. + // need a SHARED_WRITE. for (WriteEntity output : outputs) { LOG.debug("output is null " + (output == null)); if (output.getType() == Entity.Type.DFS_DIR || output.getType() == Entity.Type.LOCAL_DIR || !AcidUtils @@ -3002,7 +3002,7 @@ Seems much cleaner if each stmt is identified as a particular HiveOperation (whi if (conf.getBoolVar(HiveConf.ConfVars.TXN_OVERWRITE_X_LOCK)) { compBuilder.setExclusive(); } else { - compBuilder.setSemiShared(); + compBuilder.setSharedWrite(); } compBuilder.setOperationType(DataOperationType.UPDATE); } else { @@ -3013,7 +3013,7 @@ Seems much cleaner if each stmt is identified as a particular HiveOperation (whi case INSERT: assert t != null; if (AcidUtils.isTransactionalTable(t)) { - compBuilder.setShared(); + compBuilder.setSharedRead(); } else if (MetaStoreUtils.isNonNativeTable(t.getTTable())) { final HiveStorageHandler storageHandler = Preconditions.checkNotNull(t.getStorageHandler(), "Thought all the non native tables have an instance of storage handler"); @@ -3028,13 +3028,13 @@ Seems much cleaner if each stmt is identified as a particular HiveOperation (whi if (conf.getBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE)) { compBuilder.setExclusive(); } else { // this is backward compatible for non-ACID resources, w/o ACID semantics - compBuilder.setShared(); + compBuilder.setSharedRead(); } } compBuilder.setOperationType(DataOperationType.INSERT); break; case DDL_SHARED: - compBuilder.setShared(); + compBuilder.setSharedRead(); if (!output.isTxnAnalyze()) { // Analyze needs txn components to be present, otherwise an aborted analyze write ID // might be rolled under the watermark by compactor while stats written by it are @@ -3044,11 +3044,11 @@ Seems much cleaner if each stmt is identified as a particular HiveOperation (whi break; case UPDATE: - compBuilder.setSemiShared(); + compBuilder.setSharedWrite(); compBuilder.setOperationType(DataOperationType.UPDATE); break; case DELETE: - compBuilder.setSemiShared(); + compBuilder.setSharedWrite(); compBuilder.setOperationType(DataOperationType.DELETE); break; diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java index 7820013ab0..8e7a2873bc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java @@ -25,7 +25,6 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.ValidReadTxnList; -import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.ql.Context; @@ -173,14 +172,14 @@ public void acquireLocks(QueryPlan plan, Context ctx, String username, DriverSta LOG.debug("Adding " + input.getName() + " to list of lock inputs"); if (input.getType() == ReadEntity.Type.DATABASE) { lockObjects.addAll(getLockObjects(plan, input.getDatabase(), null, - null, HiveLockMode.SHARED)); + null, HiveLockMode.SHARED_READ)); } else if (input.getType() == ReadEntity.Type.TABLE) { lockObjects.addAll(getLockObjects(plan, null, input.getTable(), null, - HiveLockMode.SHARED)); + HiveLockMode.SHARED_READ)); } else { lockObjects.addAll(getLockObjects(plan, null, null, input.getPartition(), - HiveLockMode.SHARED)); + HiveLockMode.SHARED_READ)); } } @@ -201,7 +200,7 @@ public void acquireLocks(QueryPlan plan, Context ctx, String username, DriverSta // In case of dynamic queries, it is possible to have incomplete dummy partitions else if (output.getTyp() == WriteEntity.Type.DUMMYPARTITION) { lockObj = getLockObjects(plan, null, null, output.getPartition(), - HiveLockMode.SHARED); + HiveLockMode.SHARED_READ); } if(lockObj != null) { @@ -332,7 +331,7 @@ static void dedupLockObjects(List lockObjects) { } private HiveLockMode getWriteEntityLockMode (WriteEntity we) { - HiveLockMode lockMode = we.isComplete() ? HiveLockMode.EXCLUSIVE : HiveLockMode.SHARED; + HiveLockMode lockMode = we.isComplete() ? HiveLockMode.EXCLUSIVE : HiveLockMode.SHARED_READ; //but the writeEntity is complete in DDL operations, instead DDL sets the writeType, so //we use it to determine its lockMode, and first we check if the writeType was set WriteEntity.WriteType writeType = we.getWriteType(); @@ -343,7 +342,7 @@ private HiveLockMode getWriteEntityLockMode (WriteEntity we) { case DDL_EXCLUSIVE: return HiveLockMode.EXCLUSIVE; case DDL_SHARED: - return HiveLockMode.SHARED; + return HiveLockMode.SHARED_READ; case DDL_NO_LOCK: return null; default: //other writeTypes related to DMLs @@ -372,7 +371,7 @@ private HiveLockMode getWriteEntityLockMode (WriteEntity we) { if (t != null) { locks.add(new HiveLockObj(new HiveLockObject(t, lockData), mode)); - mode = HiveLockMode.SHARED; + mode = HiveLockMode.SHARED_READ; locks.add(new HiveLockObj(new HiveLockObject(t.getDbName(), lockData), mode)); return locks; } @@ -383,7 +382,7 @@ private HiveLockMode getWriteEntityLockMode (WriteEntity we) { } // All the parents are locked in shared mode - mode = HiveLockMode.SHARED; + mode = HiveLockMode.SHARED_READ; // For dummy partitions, only partition name is needed String name = p.getName(); diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/EmbeddedLockManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/EmbeddedLockManager.java index 177cec749a..a50c5e716d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/EmbeddedLockManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/EmbeddedLockManager.java @@ -436,7 +436,7 @@ private void getLocks(String[] paths, boolean verify, boolean fetchData, List children = curatorFramework.getChildren().forPath(lastName); String exLock = getLockName(lastName, HiveLockMode.EXCLUSIVE); - String shLock = getLockName(lastName, HiveLockMode.SHARED); + String shLock = getLockName(lastName, HiveLockMode.SHARED_READ); for (String child : children) { child = lastName + "/" + child; @@ -454,13 +454,13 @@ private ZooKeeperHiveLock lockPrimitive(HiveLockObject key, try { switch(mode) { case EXCLUSIVE: - metrics.incrementCounter(MetricsConstant.ZOOKEEPER_HIVE_EXCLUSIVELOCKS); + metrics.incrementCounter(MetricsConstant.ZOOKEEPER_HIVE_EXCLUSIVE_LOCKS); break; - case SEMI_SHARED: - metrics.incrementCounter(MetricsConstant.ZOOKEEPER_HIVE_SEMISHAREDLOCKS); + case SHARED_WRITE: + metrics.incrementCounter(MetricsConstant.ZOOKEEPER_HIVE_SHARED_WRITE_LOCKS); break; default: - metrics.incrementCounter(MetricsConstant.ZOOKEEPER_HIVE_SHAREDLOCKS); + metrics.incrementCounter(MetricsConstant.ZOOKEEPER_HIVE_SHARED_READ_LOCKS); break; } @@ -534,13 +534,13 @@ static void unlockPrimitive(HiveLock hiveLock, String parent, CuratorFramework c try { switch(lMode) { case EXCLUSIVE: - metrics.decrementCounter(MetricsConstant.ZOOKEEPER_HIVE_EXCLUSIVELOCKS); + metrics.decrementCounter(MetricsConstant.ZOOKEEPER_HIVE_EXCLUSIVE_LOCKS); break; - case SEMI_SHARED: - metrics.decrementCounter(MetricsConstant.ZOOKEEPER_HIVE_SEMISHAREDLOCKS); + case SHARED_WRITE: + metrics.decrementCounter(MetricsConstant.ZOOKEEPER_HIVE_SHARED_WRITE_LOCKS); break; default: - metrics.decrementCounter(MetricsConstant.ZOOKEEPER_HIVE_SHAREDLOCKS); + metrics.decrementCounter(MetricsConstant.ZOOKEEPER_HIVE_SHARED_READ_LOCKS); break; } } catch (Exception e) { @@ -813,7 +813,7 @@ private static HiveLockMode getLockMode(String path) { Matcher exMatcher = exMode.matcher(path); if (shMatcher.matches()) { - return HiveLockMode.SHARED; + return HiveLockMode.SHARED_READ; } if (exMatcher.matches()) { diff --git ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java index 72f095d264..21d0dee11c 100644 --- ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java +++ ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java @@ -88,6 +88,7 @@ import static junit.framework.Assert.assertNull; import static junit.framework.Assert.assertTrue; import static junit.framework.Assert.fail; +import static org.apache.hadoop.hive.metastore.txn.LockTypeEntity.EXCLUSIVE; /** * Tests for TxnHandler. @@ -1394,7 +1395,7 @@ public void deadlockDetected() throws Exception { stmt.executeUpdate("INSERT INTO \"HIVE_LOCKS\" (\"HL_LOCK_EXT_ID\", \"HL_LOCK_INT_ID\", \"HL_TXNID\", " + "\"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\", \"HL_LOCK_STATE\", \"HL_LOCK_TYPE\", \"HL_LAST_HEARTBEAT\", " + "\"HL_USER\", \"HL_HOST\") VALUES (1, 1, 1, 'MYDB', 'MYTABLE', 'MYPARTITION', '" + - tHndlr.LOCK_WAITING + "', '" + tHndlr.LOCK_EXCLUSIVE + "', " + now + ", 'fred', " + + tHndlr.LOCK_WAITING + "', '" + EXCLUSIVE.getEncoding() + "', " + now + ", 'fred', " + "'scooby.com')"); conn.commit(); tHndlr.closeDbConn(conn); diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java index 80fb1aff78..73d3b91585 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java @@ -126,7 +126,7 @@ public void tearDown() throws Exception { @Test public void testMetadataOperationLocks() throws Exception { boolean isStrict = conf.getBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE); - //to make insert into non-acid take shared lock + //to make insert into non-acid take shared_read lock conf.setBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE, false); dropTable(new String[] {"T"}); driver.run("create table if not exists T (a int, b int)"); @@ -134,7 +134,7 @@ public void testMetadataOperationLocks() throws Exception { txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer"); List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 1, locks.size()); - //since LM is using non strict mode we get shared lock + //since LM is using non strict mode we get shared_read lock checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T", null, locks); //simulate concurrent session diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java index 7a3ba3e5ca..ba3c297d07 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java @@ -106,8 +106,8 @@ public void testSingleReadTable() throws Exception { SessionState.get().setCurrentDatabase("db1"); List expectedLocks = new ArrayList(); - expectedLocks.add(new ZooKeeperHiveLock("default", new HiveLockObject(), HiveLockMode.SHARED)); - expectedLocks.add(new ZooKeeperHiveLock("default.table1", new HiveLockObject(), HiveLockMode.SHARED)); + expectedLocks.add(new ZooKeeperHiveLock("default", new HiveLockObject(), HiveLockMode.SHARED_READ)); + expectedLocks.add(new ZooKeeperHiveLock("default.table1", new HiveLockObject(), HiveLockMode.SHARED_READ)); DriverState driverState = new DriverState(); DriverState driverInterrupted = new DriverState(); driverInterrupted.abort(); @@ -137,9 +137,9 @@ public void testSingleReadTable() throws Exception { List lockObjs = lockObjsCaptor.getValue(); Assert.assertEquals(2, lockObjs.size()); Assert.assertEquals("default", lockObjs.get(0).getName()); - Assert.assertEquals(HiveLockMode.SHARED, lockObjs.get(0).mode); + Assert.assertEquals(HiveLockMode.SHARED_READ, lockObjs.get(0).mode); Assert.assertEquals("default/table1", lockObjs.get(1).getName()); - Assert.assertEquals(HiveLockMode.SHARED, lockObjs.get(1).mode); + Assert.assertEquals(HiveLockMode.SHARED_READ, lockObjs.get(1).mode); // Execute try { @@ -167,13 +167,13 @@ public void testDedupLockObjects() { // [path2, shared] // [path2, shared] // [path2, shared] - lockObjs.add(new HiveLockObj(new HiveLockObject(path1, lockData1), HiveLockMode.SHARED)); + lockObjs.add(new HiveLockObj(new HiveLockObject(path1, lockData1), HiveLockMode.SHARED_READ)); String name1 = lockObjs.get(lockObjs.size() - 1).getName(); lockObjs.add(new HiveLockObj(new HiveLockObject(path1, lockData1), HiveLockMode.EXCLUSIVE)); - lockObjs.add(new HiveLockObj(new HiveLockObject(path2, lockData2), HiveLockMode.SHARED)); + lockObjs.add(new HiveLockObj(new HiveLockObject(path2, lockData2), HiveLockMode.SHARED_READ)); String name2 = lockObjs.get(lockObjs.size() - 1).getName(); - lockObjs.add(new HiveLockObj(new HiveLockObject(path2, lockData2), HiveLockMode.SHARED)); - lockObjs.add(new HiveLockObj(new HiveLockObject(path2, lockData2), HiveLockMode.SHARED)); + lockObjs.add(new HiveLockObj(new HiveLockObject(path2, lockData2), HiveLockMode.SHARED_READ)); + lockObjs.add(new HiveLockObj(new HiveLockObject(path2, lockData2), HiveLockMode.SHARED_READ)); DummyTxnManager.dedupLockObjects(lockObjs); @@ -196,7 +196,7 @@ public int compare(HiveLockObj lock1, HiveLockObj lock2) { lockObj = lockObjs.get(1); Assert.assertEquals(name2, lockObj.getName()); - Assert.assertEquals(HiveLockMode.SHARED, lockObj.getMode()); + Assert.assertEquals(HiveLockMode.SHARED_READ, lockObj.getMode()); } private HashSet createReadEntities() { diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestEmbeddedLockManager.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestEmbeddedLockManager.java index 4a88551871..2a1be8b629 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestEmbeddedLockManager.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestEmbeddedLockManager.java @@ -48,7 +48,7 @@ public void testLocking() throws LockException { String path4 = "database2"; HiveLockObject path1sel1 = lockObj(path1, "select"); - HiveLock path1sel1Lock = manager.lock(path1sel1, HiveLockMode.SHARED, false); + HiveLock path1sel1Lock = manager.lock(path1sel1, HiveLockMode.SHARED_READ, false); Assert.assertNotNull(path1sel1Lock); Assert.assertEquals(1, manager.getLocks(path1sel1, false, false).size()); Assert.assertEquals(1, manager.getLocks(path1sel1, false, true).size()); @@ -58,7 +58,7 @@ public void testLocking() throws LockException { Assert.assertNull(manager.lock(path1up1, HiveLockMode.EXCLUSIVE, false)); HiveLockObject path1sel2 = lockObj(path1, "select"); - HiveLock path1sel2Lock = manager.lock(path1sel2, HiveLockMode.SHARED, false); + HiveLock path1sel2Lock = manager.lock(path1sel2, HiveLockMode.SHARED_READ, false); Assert.assertNotNull(path1sel2Lock); Assert.assertEquals(1, manager.getLocks(path1sel1, false, false).size()); Assert.assertEquals(2, manager.getLocks(path1sel1, false, true).size()); @@ -69,7 +69,7 @@ public void testLocking() throws LockException { Assert.assertNull(manager.lock(path1up2, HiveLockMode.EXCLUSIVE, false)); HiveLockObject path2sel1 = lockObj(path2, "select"); - HiveLock path2sel1Lock = manager.lock(path2sel1, HiveLockMode.SHARED, false); + HiveLock path2sel1Lock = manager.lock(path2sel1, HiveLockMode.SHARED_READ, false); Assert.assertNotNull(path2sel1Lock); Assert.assertEquals(1, manager.getLocks(path1sel1, false, false).size()); Assert.assertEquals(2, manager.getLocks(path1sel1, false, true).size()); @@ -78,7 +78,7 @@ public void testLocking() throws LockException { Assert.assertEquals(3, manager.getLocks(false, true).size()); HiveLockObject path3sel = lockObj(path3, "select"); - HiveLock path3selLock = manager.lock(path3sel, HiveLockMode.SHARED, false); + HiveLock path3selLock = manager.lock(path3sel, HiveLockMode.SHARED_READ, false); Assert.assertNotNull(path3selLock); Assert.assertEquals(1, manager.getLocks(path1sel1, false, false).size()); Assert.assertEquals(2, manager.getLocks(path1sel1, false, true).size()); @@ -117,8 +117,8 @@ public void testLocking() throws LockException { HiveLockObject path1sel3 = lockObj(path1, "select"); HiveLockObject path2sel2 = lockObj(path2, "select"); - Assert.assertNotNull(manager.lock(path1sel3, HiveLockMode.SHARED, false)); - Assert.assertNull(manager.lock(path2sel2, HiveLockMode.SHARED, false)); + Assert.assertNotNull(manager.lock(path1sel3, HiveLockMode.SHARED_READ, false)); + Assert.assertNull(manager.lock(path2sel2, HiveLockMode.SHARED_READ, false)); Assert.assertEquals(1, manager.getLocks(path2up1, false, true).size()); Assert.assertEquals(1, manager.getLocks(path1sel3, false, true).size()); diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/zookeeper/TestZookeeperLockManager.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/zookeeper/TestZookeeperLockManager.java index 4482f86dc0..51c6be7090 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/zookeeper/TestZookeeperLockManager.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/zookeeper/TestZookeeperLockManager.java @@ -18,10 +18,6 @@ package org.apache.hadoop.hive.ql.lockmgr.zookeeper; -import java.io.File; -import java.nio.file.Files; -import java.nio.file.Paths; - import org.apache.hadoop.hive.common.metrics.MetricsTestUtils; import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; @@ -42,9 +38,6 @@ import org.junit.After; import org.junit.Test; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; - public class TestZookeeperLockManager { private HiveConf conf; @@ -63,7 +56,7 @@ public void setup() { conf = new HiveConf(); lockObjData = new HiveLockObjectData("1", "10", "SHARED", "show tables", conf); hiveLock = new HiveLockObject(TABLE, lockObjData); - zLock = new ZooKeeperHiveLock(TABLE_LOCK_PATH, hiveLock, HiveLockMode.SHARED); + zLock = new ZooKeeperHiveLock(TABLE_LOCK_PATH, hiveLock, HiveLockMode.SHARED_READ); while (server == null) { @@ -138,13 +131,13 @@ public void testMetrics() throws Exception{ HiveLockManagerCtx ctx = new HiveLockManagerCtx(conf); ZooKeeperHiveLockManager zMgr= new ZooKeeperHiveLockManager(); zMgr.setContext(ctx); - ZooKeeperHiveLock curLock = zMgr.lock(hiveLock, HiveLockMode.SHARED, false); + ZooKeeperHiveLock curLock = zMgr.lock(hiveLock, HiveLockMode.SHARED_READ, false); String json = metrics.dumpJson(); - MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, MetricsConstant.ZOOKEEPER_HIVE_SHAREDLOCKS, 1); + MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, MetricsConstant.ZOOKEEPER_HIVE_SHARED_READ_LOCKS, 1); zMgr.unlock(curLock); json = metrics.dumpJson(); - MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, MetricsConstant.ZOOKEEPER_HIVE_SHAREDLOCKS, 0); + MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, MetricsConstant.ZOOKEEPER_HIVE_SHARED_READ_LOCKS, 0); zMgr.close(); } diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java index c739d4d196..2e1784976f 100644 --- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java +++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java @@ -45,19 +45,19 @@ public LockComponentBuilder setExclusive() { } /** - * Set the lock to be semi-shared. + * Set the lock to be shared_write. * @return reference to this builder */ - public LockComponentBuilder setSemiShared() { + public LockComponentBuilder setSharedWrite() { component.setType(LockType.SHARED_WRITE); return this; } /** - * Set the lock to be shared. + * Set the lock to be shared_read. * @return reference to this builder */ - public LockComponentBuilder setShared() { + public LockComponentBuilder setSharedRead() { component.setType(LockType.SHARED_READ); return this; } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/LockTypeEntity.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/LockTypeEntity.java new file mode 100644 index 0000000000..2c5df05f56 --- /dev/null +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/LockTypeEntity.java @@ -0,0 +1,54 @@ +package org.apache.hadoop.hive.metastore.txn; + +import org.apache.hadoop.hive.metastore.api.LockType; + +import java.util.Arrays; +import java.util.Optional; + +/** + * This entity class provides the database representation + * of {@link org.apache.hadoop.hive.metastore.api.LockType}. + */ +public enum LockTypeEntity { + EXCLUSIVE('e'), + SHARED_WRITE('w'), + SHARED_READ('r'),; + + static final String UNKNOWN = "z"; + + private final char encoding; + + LockTypeEntity(char encoding) { + this.encoding = encoding; + } + + public char getEncoding() { + return encoding; + } + + public String getEncodingAsStr() { + return Character.toString(encoding); + } + + public static Optional from(LockType lockType) { + try { + return Optional.of(valueOf(lockType.name())); + } catch (IllegalArgumentException e) { + return Optional.empty(); + } + } + + public static Optional from(char encoding) { + return Arrays.stream(values()) + .filter(lockTypeEntity -> lockTypeEntity.getEncoding() == encoding) + .findFirst(); + } + + public Optional toLockType() { + try { + return Optional.of(LockType.valueOf(name())); + } catch (IllegalArgumentException e) { + return Optional.empty(); + } + } +} \ No newline at end of file diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index e8a988cca8..cfdf4ab38f 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -174,11 +174,6 @@ static final protected char LOCK_ACQUIRED = 'a'; static final protected char LOCK_WAITING = 'w'; - // Lock types - static final protected char LOCK_EXCLUSIVE = 'e'; - static final protected char LOCK_SHARED = 'r'; - static final protected char LOCK_SEMI_SHARED = 'w'; - private static final int ALLOWED_REPEATED_DEADLOCKS = 10; private static final Logger LOG = LoggerFactory.getLogger(TxnHandler.class.getName()); private static final Long TEMP_HIVE_LOCK_ID = -1L; @@ -2616,6 +2611,9 @@ private void insertHiveLocksWithTemporaryExtLockId(long txnid, Connection dbConn + lc + " agentInfo=" + rqst.getAgentInfo()); } intLockId++; + String lockType = LockTypeEntity.from(lc.getType()) + .map(LockTypeEntity::getEncodingAsStr) + .orElse(LockTypeEntity.UNKNOWN); pstmt.setLong(1, TEMP_HIVE_LOCK_ID); pstmt.setLong(2, intLockId); @@ -2624,7 +2622,7 @@ private void insertHiveLocksWithTemporaryExtLockId(long txnid, Connection dbConn pstmt.setString(5, normalizeCase(lc.getTablename())); pstmt.setString(6, normalizeCase(lc.getPartitionname())); pstmt.setString(7, Character.toString(LOCK_WAITING)); - pstmt.setString(8, Character.toString(getLockChar(lc.getType()))); + pstmt.setString(8, lockType); pstmt.setString(9, rqst.getUser()); pstmt.setString(10, rqst.getHostname()); pstmt.setString(11, rqst.getAgentInfo()); @@ -2642,19 +2640,6 @@ private void insertHiveLocksWithTemporaryExtLockId(long txnid, Connection dbConn } } - private char getLockChar(LockType lockType) { - switch (lockType) { - case EXCLUSIVE: - return LOCK_EXCLUSIVE; - case SHARED_READ: - return LOCK_SHARED; - case SHARED_WRITE: - return LOCK_SEMI_SHARED; - default: - return 'z'; - } - } - private static String normalizeCase(String s) { return s == null ? null : s.toLowerCase(); } @@ -2897,12 +2882,12 @@ public ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException { case LOCK_WAITING: e.setState(LockState.WAITING); break; default: throw new MetaException("Unknown lock state " + rs.getString(6).charAt(0)); } - switch (rs.getString(7).charAt(0)) { - case LOCK_SEMI_SHARED: e.setType(LockType.SHARED_WRITE); break; - case LOCK_EXCLUSIVE: e.setType(LockType.EXCLUSIVE); break; - case LOCK_SHARED: e.setType(LockType.SHARED_READ); break; - default: throw new MetaException("Unknown lock type " + rs.getString(6).charAt(0)); - } + + char lockChar = rs.getString(7).charAt(0); + LockType lockType = getLockTypeFromChar(lockChar) + .orElseThrow(() -> new MetaException("Unknown lock type: " + lockChar)); + e.setType(lockType); + e.setLastheartbeat(rs.getLong(8)); long acquiredAt = rs.getLong(9); if (!rs.wasNull()) e.setAcquiredat(acquiredAt); @@ -4088,13 +4073,9 @@ private void determineDatabaseProduct(Connection conn) { default: throw new MetaException("Unknown lock state " + rs.getString("HL_LOCK_STATE").charAt(0)); } - switch (rs.getString("HL_LOCK_TYPE").charAt(0)) { - case LOCK_EXCLUSIVE: type = LockType.EXCLUSIVE; break; - case LOCK_SHARED: type = LockType.SHARED_READ; break; - case LOCK_SEMI_SHARED: type = LockType.SHARED_WRITE; break; - default: - throw new MetaException("Unknown lock type " + rs.getString("HL_LOCK_TYPE").charAt(0)); - } + char lockChar = rs.getString("HL_LOCK_TYPE").charAt(0); + type = getLockTypeFromChar(lockChar) + .orElseThrow(() -> new MetaException("Unknown lock type: " + lockChar)); txnId = rs.getLong("HL_TXNID");//returns 0 if value is NULL } LockInfo(ShowLocksResponseElement e) { @@ -4796,6 +4777,12 @@ private static void ensureValidTxn(Connection dbConn, long txnid, Statement stmt } } + static Optional getLockTypeFromChar(char lockChar) { + return LockTypeEntity.from(lockChar) + .map(LockTypeEntity::toLockType) + .orElse(Optional.empty()); + } + private LockInfo getTxnIdFromLockId(Connection dbConn, long extLockId) throws NoSuchLockException, MetaException, SQLException { Statement stmt = null; diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java index da38a6bbd3..b3a1f826bb 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java @@ -236,15 +236,15 @@ public static String getFullTableName(String dbName, String tableName) { * Note that, in this method, "a composite 'IN' clause" is defined as "a list of multiple 'IN' * clauses in a query". * - * @param queries OUT: Array of query strings - * @param prefix IN: Part of the query that comes before IN list - * @param suffix IN: Part of the query that comes after IN list - * @param inList IN: the list with IN list values - * @param inColumn IN: single column name of IN list operator - * @param addParens IN: add a pair of parenthesis outside the IN lists - * e.g. "(id in (1,2,3) OR id in (4,5,6))" - * @param notIn IN: is this for building a 'NOT IN' composite clause? - * @return OUT: a list of the count of IN list values that are in each of the corresponding queries + * @param queries IN-OUT: Array of query strings + * @param prefix IN: Part of the query that comes before IN list + * @param suffix IN: Part of the query that comes after IN list + * @param inList IN: the list with IN list values + * @param inColumn IN: single column name of IN list operator + * @param addParens IN: add a pair of parenthesis outside the IN lists + * e.g. "(id in (1,2,3) OR id in (4,5,6))" + * @param notIn IN: is this for building a 'NOT IN' composite clause? + * @return OUT: a list of the count of IN list values that are in each of the corresponding queries */ public static List buildQueryWithINClauseStrings(Configuration conf, List queries, StringBuilder prefix, StringBuilder suffix, List inList, String inColumn, boolean addParens, boolean notIn) { diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java index 1dfc105958..5f3db52c2f 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java @@ -222,13 +222,13 @@ public void testLocks() throws Exception { rqstBuilder.addLockComponent(new LockComponentBuilder() .setDbName("mydb") .setTableName("yourtable") - .setSemiShared() + .setSharedWrite() .setOperationType(DataOperationType.NO_TXN) .build()); rqstBuilder.addLockComponent(new LockComponentBuilder() .setDbName("yourdb") .setOperationType(DataOperationType.NO_TXN) - .setShared() + .setSharedRead() .build()); rqstBuilder.setUser("fred"); @@ -255,18 +255,18 @@ public void testLocksWithTxn() throws Exception { .setDbName("mydb") .setTableName("mytable") .setPartitionName("mypartition") - .setSemiShared() + .setSharedWrite() .setOperationType(DataOperationType.UPDATE) .build()) .addLockComponent(new LockComponentBuilder() .setDbName("mydb") .setTableName("yourtable") - .setSemiShared() + .setSharedWrite() .setOperationType(DataOperationType.UPDATE) .build()) .addLockComponent(new LockComponentBuilder() .setDbName("yourdb") - .setShared() + .setSharedRead() .setOperationType(DataOperationType.SELECT) .build()) .setUser("fred"); diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/txn/LockTypeEntityTest.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/txn/LockTypeEntityTest.java new file mode 100644 index 0000000000..1636279d27 --- /dev/null +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/txn/LockTypeEntityTest.java @@ -0,0 +1,36 @@ +package org.apache.hadoop.hive.metastore.txn; + +import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; +import org.apache.hadoop.hive.metastore.api.LockType; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +@Category(MetastoreUnitTest.class) +public class LockTypeEntityTest { + + @Test + public void testFromLockType() { + assertEquals(LockTypeEntity.EXCLUSIVE, LockTypeEntity.from(LockType.EXCLUSIVE).get()); + assertEquals(LockTypeEntity.SHARED_WRITE, LockTypeEntity.from(LockType.SHARED_WRITE).get()); + assertEquals(LockTypeEntity.SHARED_READ, LockTypeEntity.from(LockType.SHARED_READ).get()); + } + + @Test + public void testFromChar() { + assertEquals(LockTypeEntity.EXCLUSIVE, LockTypeEntity.from('e').get()); + assertEquals(LockTypeEntity.SHARED_WRITE, LockTypeEntity.from('w').get()); + assertEquals(LockTypeEntity.SHARED_READ, LockTypeEntity.from('r').get()); + assertFalse(LockTypeEntity.from('y').isPresent()); + } + + @Test + public void testToLockType() { + assertEquals(LockType.EXCLUSIVE, LockTypeEntity.EXCLUSIVE.toLockType().get()); + assertEquals(LockType.SHARED_WRITE, LockTypeEntity.SHARED_WRITE.toLockType().get()); + assertEquals(LockType.SHARED_READ, LockTypeEntity.SHARED_READ.toLockType().get()); + } + +} diff --git streaming/src/java/org/apache/hive/streaming/TransactionBatch.java streaming/src/java/org/apache/hive/streaming/TransactionBatch.java index d44065018f..40239ab53e 100644 --- streaming/src/java/org/apache/hive/streaming/TransactionBatch.java +++ streaming/src/java/org/apache/hive/streaming/TransactionBatch.java @@ -439,7 +439,7 @@ private static LockRequest createLockRequest(final HiveStreamingConnection conne LockComponentBuilder lockCompBuilder = new LockComponentBuilder() .setDbName(connection.getDatabase()) .setTableName(connection.getTable().getTableName()) - .setShared() + .setSharedRead() .setOperationType(DataOperationType.INSERT); if (connection.isDynamicPartitioning()) { lockCompBuilder.setIsDynamicPartitionWrite(true);