diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 73f185a1f3..20a0031d05 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -2703,7 +2703,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "no transactions."), HIVE_TXN_STRICT_LOCKING_MODE("hive.txn.strict.locking.mode", true, "In strict mode non-ACID\n" + "resources use standard R/W lock semantics, e.g. INSERT will acquire exclusive lock.\n" + - "In nonstrict mode, for non-ACID resources, INSERT will only acquire shared lock, which\n" + + "In nonstrict mode, for non-ACID resources, INSERT will only acquire shared_read lock, which\n" + "allows two concurrent writes to the same partition but still lets lock manager prevent\n" + "DROP TABLE etc. when the table is being written to"), HIVE_TXN_NONACID_READ_LOCKS("hive.txn.nonacid.read.locks", true, diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java index 52eb6133e7..aa294482d0 100644 --- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java +++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java @@ -252,13 +252,13 @@ static String join(Iterable values) { String user; HiveConf hiveConf; - /** Adds a table for which a shared lock will be requested. */ + /** Adds a table for which a shared_read lock will be requested. */ public Options addSourceTable(String databaseName, String tableName) { addTable(databaseName, tableName, sources); return this; } - /** Adds a table for which a semi-shared lock will be requested. */ + /** Adds a table for which a shared_write lock will be requested. */ public Options addSinkTable(String databaseName, String tableName) { addTable(databaseName, tableName, sinks); return this; diff --git metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql index 03540bba4d..e56ce4cc5f 100644 --- metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql +++ metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql @@ -1418,7 +1418,7 @@ SELECT DISTINCT HL.`HL_TABLE`, HL.`HL_PARTITION`, CASE WHEN HL.`HL_LOCK_STATE` = 'a' THEN 'acquired' WHEN HL.`HL_LOCK_STATE` = 'w' THEN 'waiting' END AS LOCK_STATE, - CASE WHEN HL.`HL_LOCK_TYPE` = 'e' THEN 'exclusive' WHEN HL.`HL_LOCK_TYPE` = 'r' THEN 'shared' WHEN HL.`HL_LOCK_TYPE` = 'w' THEN 'semi-shared' END AS LOCK_TYPE, + CASE WHEN HL.`HL_LOCK_TYPE` = 'e' THEN 'exclusive' WHEN HL.`HL_LOCK_TYPE` = 'r' THEN 'shared_read' WHEN HL.`HL_LOCK_TYPE` = 'w' THEN 'shared_write' END AS LOCK_TYPE, FROM_UNIXTIME(HL.`HL_LAST_HEARTBEAT`), FROM_UNIXTIME(HL.`HL_ACQUIRED_AT`), HL.`HL_USER`, diff --git metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql index fa518747de..ab67583302 100644 --- metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql +++ metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql @@ -479,7 +479,7 @@ SELECT DISTINCT HL.`HL_TABLE`, HL.`HL_PARTITION`, CASE WHEN HL.`HL_LOCK_STATE` = 'a' THEN 'acquired' WHEN HL.`HL_LOCK_STATE` = 'w' THEN 'waiting' END AS LOCK_STATE, - CASE WHEN HL.`HL_LOCK_TYPE` = 'e' THEN 'exclusive' WHEN HL.`HL_LOCK_TYPE` = 'r' THEN 'shared' WHEN HL.`HL_LOCK_TYPE` = 'w' THEN 'semi-shared' END AS LOCK_TYPE, + CASE WHEN HL.`HL_LOCK_TYPE` = 'e' THEN 'exclusive' WHEN HL.`HL_LOCK_TYPE` = 'r' THEN 'shared_read' WHEN HL.`HL_LOCK_TYPE` = 'w' THEN 'shared_write' END AS LOCK_TYPE, FROM_UNIXTIME(HL.`HL_LAST_HEARTBEAT`), FROM_UNIXTIME(HL.`HL_ACQUIRED_AT`), HL.`HL_USER`, diff --git ql/src/java/org/apache/hadoop/hive/ql/ValidTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/ValidTxnManager.java index 4885e437aa..49eafe8d08 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ValidTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/ValidTxnManager.java @@ -66,7 +66,7 @@ /** * Checks whether txn list has been invalidated while planning the query. - * This would happen if query requires exclusive/semi-shared lock, and there has been a committed transaction + * This would happen if query requires exclusive/shared_write lock, and there has been a committed transaction * on the table over which the lock is required. */ boolean isValidTxnListState() throws LockException { @@ -83,7 +83,7 @@ boolean isValidTxnListState() throws LockException { // 2) Get locks that are relevant: // - Exclusive for INSERT OVERWRITE. - // - Semi-shared for UPDATE/DELETE. + // - Shared_write for UPDATE/DELETE. Set nonSharedLockedTables = getNonSharedLockedTables(); if (nonSharedLockedTables == null) { return true; // Nothing to check diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 17e6cdf162..3944f9c7b9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -2901,7 +2901,7 @@ private static boolean isLockableTable(Table t) { boolean skipReadLock = !conf.getBoolVar(ConfVars.HIVE_TXN_READ_LOCKS); boolean skipNonAcidReadLock = !conf.getBoolVar(ConfVars.HIVE_TXN_NONACID_READ_LOCKS); - // For each source to read, get a shared lock + // For each source to read, get a shared_read lock for (ReadEntity input : inputs) { if (input.isDummy() || !input.needsLock() @@ -2955,7 +2955,7 @@ private static boolean isLockableTable(Table t) { // For each source to write to, get the appropriate lock type. If it's // an OVERWRITE, we need to get an exclusive lock. If it's an insert (no // overwrite) than we need a shared. If it's update or delete then we - // need a SEMI-SHARED. + // need a SHARED_WRITE. for (WriteEntity output : outputs) { LOG.debug("output is null " + (output == null)); if (output.getType() == Entity.Type.DFS_DIR || output.getType() == Entity.Type.LOCAL_DIR || !AcidUtils diff --git ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java index 72f095d264..21d0dee11c 100644 --- ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java +++ ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java @@ -88,6 +88,7 @@ import static junit.framework.Assert.assertNull; import static junit.framework.Assert.assertTrue; import static junit.framework.Assert.fail; +import static org.apache.hadoop.hive.metastore.txn.LockTypeEntity.EXCLUSIVE; /** * Tests for TxnHandler. @@ -1394,7 +1395,7 @@ public void deadlockDetected() throws Exception { stmt.executeUpdate("INSERT INTO \"HIVE_LOCKS\" (\"HL_LOCK_EXT_ID\", \"HL_LOCK_INT_ID\", \"HL_TXNID\", " + "\"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\", \"HL_LOCK_STATE\", \"HL_LOCK_TYPE\", \"HL_LAST_HEARTBEAT\", " + "\"HL_USER\", \"HL_HOST\") VALUES (1, 1, 1, 'MYDB', 'MYTABLE', 'MYPARTITION', '" + - tHndlr.LOCK_WAITING + "', '" + tHndlr.LOCK_EXCLUSIVE + "', " + now + ", 'fred', " + + tHndlr.LOCK_WAITING + "', '" + EXCLUSIVE.getEncoding() + "', " + now + ", 'fred', " + "'scooby.com')"); conn.commit(); tHndlr.closeDbConn(conn); diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java index 80fb1aff78..73d3b91585 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java @@ -126,7 +126,7 @@ public void tearDown() throws Exception { @Test public void testMetadataOperationLocks() throws Exception { boolean isStrict = conf.getBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE); - //to make insert into non-acid take shared lock + //to make insert into non-acid take shared_read lock conf.setBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE, false); dropTable(new String[] {"T"}); driver.run("create table if not exists T (a int, b int)"); @@ -134,7 +134,7 @@ public void testMetadataOperationLocks() throws Exception { txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer"); List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 1, locks.size()); - //since LM is using non strict mode we get shared lock + //since LM is using non strict mode we get shared_read lock checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T", null, locks); //simulate concurrent session diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java index c739d4d196..aa9441d4f8 100644 --- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java +++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java @@ -45,7 +45,7 @@ public LockComponentBuilder setExclusive() { } /** - * Set the lock to be semi-shared. + * Set the lock to be shared_write. * @return reference to this builder */ public LockComponentBuilder setSemiShared() { diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/LockTypeEntity.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/LockTypeEntity.java new file mode 100644 index 0000000000..2c5df05f56 --- /dev/null +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/LockTypeEntity.java @@ -0,0 +1,54 @@ +package org.apache.hadoop.hive.metastore.txn; + +import org.apache.hadoop.hive.metastore.api.LockType; + +import java.util.Arrays; +import java.util.Optional; + +/** + * This entity class provides the database representation + * of {@link org.apache.hadoop.hive.metastore.api.LockType}. + */ +public enum LockTypeEntity { + EXCLUSIVE('e'), + SHARED_WRITE('w'), + SHARED_READ('r'),; + + static final String UNKNOWN = "z"; + + private final char encoding; + + LockTypeEntity(char encoding) { + this.encoding = encoding; + } + + public char getEncoding() { + return encoding; + } + + public String getEncodingAsStr() { + return Character.toString(encoding); + } + + public static Optional from(LockType lockType) { + try { + return Optional.of(valueOf(lockType.name())); + } catch (IllegalArgumentException e) { + return Optional.empty(); + } + } + + public static Optional from(char encoding) { + return Arrays.stream(values()) + .filter(lockTypeEntity -> lockTypeEntity.getEncoding() == encoding) + .findFirst(); + } + + public Optional toLockType() { + try { + return Optional.of(LockType.valueOf(name())); + } catch (IllegalArgumentException e) { + return Optional.empty(); + } + } +} \ No newline at end of file diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index e8a988cca8..cfdf4ab38f 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -174,11 +174,6 @@ static final protected char LOCK_ACQUIRED = 'a'; static final protected char LOCK_WAITING = 'w'; - // Lock types - static final protected char LOCK_EXCLUSIVE = 'e'; - static final protected char LOCK_SHARED = 'r'; - static final protected char LOCK_SEMI_SHARED = 'w'; - private static final int ALLOWED_REPEATED_DEADLOCKS = 10; private static final Logger LOG = LoggerFactory.getLogger(TxnHandler.class.getName()); private static final Long TEMP_HIVE_LOCK_ID = -1L; @@ -2616,6 +2611,9 @@ private void insertHiveLocksWithTemporaryExtLockId(long txnid, Connection dbConn + lc + " agentInfo=" + rqst.getAgentInfo()); } intLockId++; + String lockType = LockTypeEntity.from(lc.getType()) + .map(LockTypeEntity::getEncodingAsStr) + .orElse(LockTypeEntity.UNKNOWN); pstmt.setLong(1, TEMP_HIVE_LOCK_ID); pstmt.setLong(2, intLockId); @@ -2624,7 +2622,7 @@ private void insertHiveLocksWithTemporaryExtLockId(long txnid, Connection dbConn pstmt.setString(5, normalizeCase(lc.getTablename())); pstmt.setString(6, normalizeCase(lc.getPartitionname())); pstmt.setString(7, Character.toString(LOCK_WAITING)); - pstmt.setString(8, Character.toString(getLockChar(lc.getType()))); + pstmt.setString(8, lockType); pstmt.setString(9, rqst.getUser()); pstmt.setString(10, rqst.getHostname()); pstmt.setString(11, rqst.getAgentInfo()); @@ -2642,19 +2640,6 @@ private void insertHiveLocksWithTemporaryExtLockId(long txnid, Connection dbConn } } - private char getLockChar(LockType lockType) { - switch (lockType) { - case EXCLUSIVE: - return LOCK_EXCLUSIVE; - case SHARED_READ: - return LOCK_SHARED; - case SHARED_WRITE: - return LOCK_SEMI_SHARED; - default: - return 'z'; - } - } - private static String normalizeCase(String s) { return s == null ? null : s.toLowerCase(); } @@ -2897,12 +2882,12 @@ public ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException { case LOCK_WAITING: e.setState(LockState.WAITING); break; default: throw new MetaException("Unknown lock state " + rs.getString(6).charAt(0)); } - switch (rs.getString(7).charAt(0)) { - case LOCK_SEMI_SHARED: e.setType(LockType.SHARED_WRITE); break; - case LOCK_EXCLUSIVE: e.setType(LockType.EXCLUSIVE); break; - case LOCK_SHARED: e.setType(LockType.SHARED_READ); break; - default: throw new MetaException("Unknown lock type " + rs.getString(6).charAt(0)); - } + + char lockChar = rs.getString(7).charAt(0); + LockType lockType = getLockTypeFromChar(lockChar) + .orElseThrow(() -> new MetaException("Unknown lock type: " + lockChar)); + e.setType(lockType); + e.setLastheartbeat(rs.getLong(8)); long acquiredAt = rs.getLong(9); if (!rs.wasNull()) e.setAcquiredat(acquiredAt); @@ -4088,13 +4073,9 @@ private void determineDatabaseProduct(Connection conn) { default: throw new MetaException("Unknown lock state " + rs.getString("HL_LOCK_STATE").charAt(0)); } - switch (rs.getString("HL_LOCK_TYPE").charAt(0)) { - case LOCK_EXCLUSIVE: type = LockType.EXCLUSIVE; break; - case LOCK_SHARED: type = LockType.SHARED_READ; break; - case LOCK_SEMI_SHARED: type = LockType.SHARED_WRITE; break; - default: - throw new MetaException("Unknown lock type " + rs.getString("HL_LOCK_TYPE").charAt(0)); - } + char lockChar = rs.getString("HL_LOCK_TYPE").charAt(0); + type = getLockTypeFromChar(lockChar) + .orElseThrow(() -> new MetaException("Unknown lock type: " + lockChar)); txnId = rs.getLong("HL_TXNID");//returns 0 if value is NULL } LockInfo(ShowLocksResponseElement e) { @@ -4796,6 +4777,12 @@ private static void ensureValidTxn(Connection dbConn, long txnid, Statement stmt } } + static Optional getLockTypeFromChar(char lockChar) { + return LockTypeEntity.from(lockChar) + .map(LockTypeEntity::toLockType) + .orElse(Optional.empty()); + } + private LockInfo getTxnIdFromLockId(Connection dbConn, long extLockId) throws NoSuchLockException, MetaException, SQLException { Statement stmt = null; diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java index da38a6bbd3..b3a1f826bb 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java @@ -236,15 +236,15 @@ public static String getFullTableName(String dbName, String tableName) { * Note that, in this method, "a composite 'IN' clause" is defined as "a list of multiple 'IN' * clauses in a query". * - * @param queries OUT: Array of query strings - * @param prefix IN: Part of the query that comes before IN list - * @param suffix IN: Part of the query that comes after IN list - * @param inList IN: the list with IN list values - * @param inColumn IN: single column name of IN list operator - * @param addParens IN: add a pair of parenthesis outside the IN lists - * e.g. "(id in (1,2,3) OR id in (4,5,6))" - * @param notIn IN: is this for building a 'NOT IN' composite clause? - * @return OUT: a list of the count of IN list values that are in each of the corresponding queries + * @param queries IN-OUT: Array of query strings + * @param prefix IN: Part of the query that comes before IN list + * @param suffix IN: Part of the query that comes after IN list + * @param inList IN: the list with IN list values + * @param inColumn IN: single column name of IN list operator + * @param addParens IN: add a pair of parenthesis outside the IN lists + * e.g. "(id in (1,2,3) OR id in (4,5,6))" + * @param notIn IN: is this for building a 'NOT IN' composite clause? + * @return OUT: a list of the count of IN list values that are in each of the corresponding queries */ public static List buildQueryWithINClauseStrings(Configuration conf, List queries, StringBuilder prefix, StringBuilder suffix, List inList, String inColumn, boolean addParens, boolean notIn) { diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/txn/LockTypeEntityTest.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/txn/LockTypeEntityTest.java new file mode 100644 index 0000000000..1636279d27 --- /dev/null +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/txn/LockTypeEntityTest.java @@ -0,0 +1,36 @@ +package org.apache.hadoop.hive.metastore.txn; + +import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; +import org.apache.hadoop.hive.metastore.api.LockType; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +@Category(MetastoreUnitTest.class) +public class LockTypeEntityTest { + + @Test + public void testFromLockType() { + assertEquals(LockTypeEntity.EXCLUSIVE, LockTypeEntity.from(LockType.EXCLUSIVE).get()); + assertEquals(LockTypeEntity.SHARED_WRITE, LockTypeEntity.from(LockType.SHARED_WRITE).get()); + assertEquals(LockTypeEntity.SHARED_READ, LockTypeEntity.from(LockType.SHARED_READ).get()); + } + + @Test + public void testFromChar() { + assertEquals(LockTypeEntity.EXCLUSIVE, LockTypeEntity.from('e').get()); + assertEquals(LockTypeEntity.SHARED_WRITE, LockTypeEntity.from('w').get()); + assertEquals(LockTypeEntity.SHARED_READ, LockTypeEntity.from('r').get()); + assertFalse(LockTypeEntity.from('y').isPresent()); + } + + @Test + public void testToLockType() { + assertEquals(LockType.EXCLUSIVE, LockTypeEntity.EXCLUSIVE.toLockType().get()); + assertEquals(LockType.SHARED_WRITE, LockTypeEntity.SHARED_WRITE.toLockType().get()); + assertEquals(LockType.SHARED_READ, LockTypeEntity.SHARED_READ.toLockType().get()); + } + +}