diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java index e249b7775e..dc8c6636bc 100644 --- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java +++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java @@ -1014,7 +1014,7 @@ private static LockRequest createLockRequest(final HiveEndPoint hiveEndPoint, LockComponentBuilder lockCompBuilder = new LockComponentBuilder() .setDbName(hiveEndPoint.database) .setTableName(hiveEndPoint.table) - .setShared() + .setSharedRead() .setOperationType(DataOperationType.INSERT); if (partNameForLock!=null && !partNameForLock.isEmpty() ) { lockCompBuilder.setPartitionName(partNameForLock); diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java index 52eb6133e7..88970da3a5 100644 --- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java +++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java @@ -184,9 +184,9 @@ private LockRequest buildLockRequest(Long transactionId) { //todo: DataOperationType is set conservatively here, we'd really want to distinguish update/delete //and insert/select and if resource (that is written to) is ACID or not if (sinks.contains(table)) { - componentBuilder.setSemiShared().setOperationType(DataOperationType.UPDATE).setIsTransactional(true); + componentBuilder.setSharedWrite().setOperationType(DataOperationType.UPDATE).setIsTransactional(true); } else { - componentBuilder.setShared().setOperationType(DataOperationType.INSERT).setIsTransactional(true); + componentBuilder.setSharedRead().setOperationType(DataOperationType.INSERT).setIsTransactional(true); } LockComponent component = componentBuilder.build(); requestBuilder.addLockComponent(component); diff --git metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql index 03540bba4d..e56ce4cc5f 100644 --- metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql +++ metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql @@ -1418,7 +1418,7 @@ SELECT DISTINCT HL.`HL_TABLE`, HL.`HL_PARTITION`, CASE WHEN HL.`HL_LOCK_STATE` = 'a' THEN 'acquired' WHEN HL.`HL_LOCK_STATE` = 'w' THEN 'waiting' END AS LOCK_STATE, - CASE WHEN HL.`HL_LOCK_TYPE` = 'e' THEN 'exclusive' WHEN HL.`HL_LOCK_TYPE` = 'r' THEN 'shared' WHEN HL.`HL_LOCK_TYPE` = 'w' THEN 'semi-shared' END AS LOCK_TYPE, + CASE WHEN HL.`HL_LOCK_TYPE` = 'e' THEN 'exclusive' WHEN HL.`HL_LOCK_TYPE` = 'r' THEN 'shared_read' WHEN HL.`HL_LOCK_TYPE` = 'w' THEN 'shared_write' END AS LOCK_TYPE, FROM_UNIXTIME(HL.`HL_LAST_HEARTBEAT`), FROM_UNIXTIME(HL.`HL_ACQUIRED_AT`), HL.`HL_USER`, diff --git metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql index fa518747de..ab67583302 100644 --- metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql +++ metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql @@ -479,7 +479,7 @@ SELECT DISTINCT HL.`HL_TABLE`, HL.`HL_PARTITION`, CASE WHEN HL.`HL_LOCK_STATE` = 'a' THEN 'acquired' WHEN HL.`HL_LOCK_STATE` = 'w' THEN 'waiting' END AS LOCK_STATE, - CASE WHEN HL.`HL_LOCK_TYPE` = 'e' THEN 'exclusive' WHEN HL.`HL_LOCK_TYPE` = 'r' THEN 'shared' WHEN HL.`HL_LOCK_TYPE` = 'w' THEN 'semi-shared' END AS LOCK_TYPE, + CASE WHEN HL.`HL_LOCK_TYPE` = 'e' THEN 'exclusive' WHEN HL.`HL_LOCK_TYPE` = 'r' THEN 'shared_read' WHEN HL.`HL_LOCK_TYPE` = 'w' THEN 'shared_write' END AS LOCK_TYPE, FROM_UNIXTIME(HL.`HL_LAST_HEARTBEAT`), FROM_UNIXTIME(HL.`HL_ACQUIRED_AT`), HL.`HL_USER`, diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 17e6cdf162..4461e9cc41 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -2901,7 +2901,7 @@ private static boolean isLockableTable(Table t) { boolean skipReadLock = !conf.getBoolVar(ConfVars.HIVE_TXN_READ_LOCKS); boolean skipNonAcidReadLock = !conf.getBoolVar(ConfVars.HIVE_TXN_NONACID_READ_LOCKS); - // For each source to read, get a shared lock + // For each source to read, get a shared_read lock for (ReadEntity input : inputs) { if (input.isDummy() || !input.needsLock() @@ -2913,7 +2913,7 @@ private static boolean isLockableTable(Table t) { continue; } LockComponentBuilder compBuilder = new LockComponentBuilder(); - compBuilder.setShared(); + compBuilder.setSharedRead(); compBuilder.setOperationType(DataOperationType.SELECT); Table t = null; @@ -2955,7 +2955,7 @@ private static boolean isLockableTable(Table t) { // For each source to write to, get the appropriate lock type. If it's // an OVERWRITE, we need to get an exclusive lock. If it's an insert (no // overwrite) than we need a shared. If it's update or delete then we - // need a SEMI-SHARED. + // need a SHARED_WRITE. for (WriteEntity output : outputs) { LOG.debug("output is null " + (output == null)); if (output.getType() == Entity.Type.DFS_DIR || output.getType() == Entity.Type.LOCAL_DIR || !AcidUtils @@ -3002,7 +3002,7 @@ Seems much cleaner if each stmt is identified as a particular HiveOperation (whi if (conf.getBoolVar(HiveConf.ConfVars.TXN_OVERWRITE_X_LOCK)) { compBuilder.setExclusive(); } else { - compBuilder.setSemiShared(); + compBuilder.setSharedWrite(); } compBuilder.setOperationType(DataOperationType.UPDATE); } else { @@ -3013,7 +3013,7 @@ Seems much cleaner if each stmt is identified as a particular HiveOperation (whi case INSERT: assert t != null; if (AcidUtils.isTransactionalTable(t)) { - compBuilder.setShared(); + compBuilder.setSharedRead(); } else if (MetaStoreUtils.isNonNativeTable(t.getTTable())) { final HiveStorageHandler storageHandler = Preconditions.checkNotNull(t.getStorageHandler(), "Thought all the non native tables have an instance of storage handler"); @@ -3028,13 +3028,13 @@ Seems much cleaner if each stmt is identified as a particular HiveOperation (whi if (conf.getBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE)) { compBuilder.setExclusive(); } else { // this is backward compatible for non-ACID resources, w/o ACID semantics - compBuilder.setShared(); + compBuilder.setSharedRead(); } } compBuilder.setOperationType(DataOperationType.INSERT); break; case DDL_SHARED: - compBuilder.setShared(); + compBuilder.setSharedRead(); if (!output.isTxnAnalyze()) { // Analyze needs txn components to be present, otherwise an aborted analyze write ID // might be rolled under the watermark by compactor while stats written by it are @@ -3044,11 +3044,11 @@ Seems much cleaner if each stmt is identified as a particular HiveOperation (whi break; case UPDATE: - compBuilder.setSemiShared(); + compBuilder.setSharedWrite(); compBuilder.setOperationType(DataOperationType.UPDATE); break; case DELETE: - compBuilder.setSemiShared(); + compBuilder.setSharedWrite(); compBuilder.setOperationType(DataOperationType.DELETE); break; diff --git ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java index 72f095d264..1d211857bf 100644 --- ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java +++ ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java @@ -88,6 +88,7 @@ import static junit.framework.Assert.assertNull; import static junit.framework.Assert.assertTrue; import static junit.framework.Assert.fail; +import static org.apache.hadoop.hive.metastore.utils.LockTypeUtil.getEncoding; /** * Tests for TxnHandler. @@ -1394,7 +1395,7 @@ public void deadlockDetected() throws Exception { stmt.executeUpdate("INSERT INTO \"HIVE_LOCKS\" (\"HL_LOCK_EXT_ID\", \"HL_LOCK_INT_ID\", \"HL_TXNID\", " + "\"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\", \"HL_LOCK_STATE\", \"HL_LOCK_TYPE\", \"HL_LAST_HEARTBEAT\", " + "\"HL_USER\", \"HL_HOST\") VALUES (1, 1, 1, 'MYDB', 'MYTABLE', 'MYPARTITION', '" + - tHndlr.LOCK_WAITING + "', '" + tHndlr.LOCK_EXCLUSIVE + "', " + now + ", 'fred', " + + tHndlr.LOCK_WAITING + "', '" + getEncoding(LockType.EXCLUSIVE) + "', " + now + ", 'fred', " + "'scooby.com')"); conn.commit(); tHndlr.closeDbConn(conn); diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java index 80fb1aff78..73d3b91585 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java @@ -126,7 +126,7 @@ public void tearDown() throws Exception { @Test public void testMetadataOperationLocks() throws Exception { boolean isStrict = conf.getBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE); - //to make insert into non-acid take shared lock + //to make insert into non-acid take shared_read lock conf.setBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE, false); dropTable(new String[] {"T"}); driver.run("create table if not exists T (a int, b int)"); @@ -134,7 +134,7 @@ public void testMetadataOperationLocks() throws Exception { txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer"); List locks = getLocks(); Assert.assertEquals("Unexpected lock count", 1, locks.size()); - //since LM is using non strict mode we get shared lock + //since LM is using non strict mode we get shared_read lock checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T", null, locks); //simulate concurrent session diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockType.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockType.java index 8ae4351129..b38ed1cbc3 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockType.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockType.java @@ -14,7 +14,8 @@ public enum LockType implements org.apache.thrift.TEnum { SHARED_READ(1), SHARED_WRITE(2), - EXCLUSIVE(3); + EXCLUSIVE(3), + EXCL_WRITE(4); private final int value; @@ -41,6 +42,8 @@ public static LockType findByValue(int value) { return SHARED_WRITE; case 3: return EXCLUSIVE; + case 4: + return EXCL_WRITE; default: return null; } diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php index db4cfb996a..9fb7ff011a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php +++ standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php @@ -89,10 +89,12 @@ final class LockType { const SHARED_READ = 1; const SHARED_WRITE = 2; const EXCLUSIVE = 3; + const EXCL_WRITE = 4; static public $__names = array( 1 => 'SHARED_READ', 2 => 'SHARED_WRITE', 3 => 'EXCLUSIVE', + 4 => 'EXCL_WRITE', ); } diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py index cf3137928f..4f317b3453 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -127,17 +127,20 @@ class LockType: SHARED_READ = 1 SHARED_WRITE = 2 EXCLUSIVE = 3 + EXCL_WRITE = 4 _VALUES_TO_NAMES = { 1: "SHARED_READ", 2: "SHARED_WRITE", 3: "EXCLUSIVE", + 4: "EXCL_WRITE", } _NAMES_TO_VALUES = { "SHARED_READ": 1, "SHARED_WRITE": 2, "EXCLUSIVE": 3, + "EXCL_WRITE": 4, } class CompactionType: diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb index 849970eb56..e64ae0ead2 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -61,8 +61,9 @@ module LockType SHARED_READ = 1 SHARED_WRITE = 2 EXCLUSIVE = 3 - VALUE_MAP = {1 => "SHARED_READ", 2 => "SHARED_WRITE", 3 => "EXCLUSIVE"} - VALID_VALUES = Set.new([SHARED_READ, SHARED_WRITE, EXCLUSIVE]).freeze + EXCL_WRITE = 4 + VALUE_MAP = {1 => "SHARED_READ", 2 => "SHARED_WRITE", 3 => "EXCLUSIVE", 4 => "EXCL_WRITE"} + VALID_VALUES = Set.new([SHARED_READ, SHARED_WRITE, EXCLUSIVE, EXCL_WRITE]).freeze end module CompactionType diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java index c739d4d196..2e1784976f 100644 --- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java +++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java @@ -45,19 +45,19 @@ public LockComponentBuilder setExclusive() { } /** - * Set the lock to be semi-shared. + * Set the lock to be shared_write. * @return reference to this builder */ - public LockComponentBuilder setSemiShared() { + public LockComponentBuilder setSharedWrite() { component.setType(LockType.SHARED_WRITE); return this; } /** - * Set the lock to be shared. + * Set the lock to be shared_read. * @return reference to this builder */ - public LockComponentBuilder setShared() { + public LockComponentBuilder setSharedRead() { component.setType(LockType.SHARED_READ); return this; } diff --git standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index 098ddec5dc..1e3d6e9b8b 100644 --- standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -175,6 +175,7 @@ enum LockType { SHARED_READ = 1, SHARED_WRITE = 2, EXCLUSIVE = 3, + EXCL_WRITE = 4, } enum CompactionType { diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index 7d0db0c3a0..56ef390546 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -33,6 +33,7 @@ import java.util.BitSet; import java.util.Collections; import java.util.Comparator; +import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -83,12 +84,14 @@ import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; import org.apache.hadoop.hive.metastore.tools.SQLGenerator; import org.apache.hadoop.hive.metastore.utils.JavaUtils; +import org.apache.hadoop.hive.metastore.utils.LockTypeUtil; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.StringableMap; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import static org.apache.hadoop.hive.metastore.DatabaseProduct.MYSQL; import static org.apache.hadoop.hive.metastore.txn.TxnDbUtil.executeQueriesInBatch; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; @@ -175,11 +178,6 @@ static final protected char LOCK_ACQUIRED = 'a'; static final protected char LOCK_WAITING = 'w'; - // Lock types - static final protected char LOCK_EXCLUSIVE = 'e'; - static final protected char LOCK_SHARED = 'r'; - static final protected char LOCK_SEMI_SHARED = 'w'; - private static final int ALLOWED_REPEATED_DEADLOCKS = 10; private static final Logger LOG = LoggerFactory.getLogger(TxnHandler.class.getName()); private static final Long TEMP_HIVE_LOCK_ID = -1L; @@ -2609,6 +2607,7 @@ private void insertHiveLocksWithTemporaryExtLockId(long txnid, Connection dbConn + lc + " agentInfo=" + rqst.getAgentInfo()); } intLockId++; + String lockType = LockTypeUtil.getEncodingAsStr(lc.getType()); pstmt.setLong(1, TEMP_HIVE_LOCK_ID); pstmt.setLong(2, intLockId); @@ -2617,7 +2616,7 @@ private void insertHiveLocksWithTemporaryExtLockId(long txnid, Connection dbConn pstmt.setString(5, normalizeCase(lc.getTablename())); pstmt.setString(6, normalizeCase(lc.getPartitionname())); pstmt.setString(7, Character.toString(LOCK_WAITING)); - pstmt.setString(8, Character.toString(getLockChar(lc.getType()))); + pstmt.setString(8, lockType); pstmt.setString(9, rqst.getUser()); pstmt.setString(10, rqst.getHostname()); pstmt.setString(11, rqst.getAgentInfo()); @@ -2635,19 +2634,6 @@ private void insertHiveLocksWithTemporaryExtLockId(long txnid, Connection dbConn } } - private char getLockChar(LockType lockType) { - switch (lockType) { - case EXCLUSIVE: - return LOCK_EXCLUSIVE; - case SHARED_READ: - return LOCK_SHARED; - case SHARED_WRITE: - return LOCK_SEMI_SHARED; - default: - return 'z'; - } - } - private static String normalizeCase(String s) { return s == null ? null : s.toLowerCase(); } @@ -2890,12 +2876,12 @@ public ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException { case LOCK_WAITING: e.setState(LockState.WAITING); break; default: throw new MetaException("Unknown lock state " + rs.getString(6).charAt(0)); } - switch (rs.getString(7).charAt(0)) { - case LOCK_SEMI_SHARED: e.setType(LockType.SHARED_WRITE); break; - case LOCK_EXCLUSIVE: e.setType(LockType.EXCLUSIVE); break; - case LOCK_SHARED: e.setType(LockType.SHARED_READ); break; - default: throw new MetaException("Unknown lock type " + rs.getString(6).charAt(0)); - } + + char lockChar = rs.getString(7).charAt(0); + LockType lockType = LockTypeUtil.getLockTypeFromEncoding(lockChar) + .orElseThrow(() -> new MetaException("Unknown lock type: " + lockChar)); + e.setType(lockType); + e.setLastheartbeat(rs.getLong(8)); long acquiredAt = rs.getLong(9); if (!rs.wasNull()) e.setAcquiredat(acquiredAt); @@ -4081,13 +4067,9 @@ private void determineDatabaseProduct(Connection conn) { default: throw new MetaException("Unknown lock state " + rs.getString("HL_LOCK_STATE").charAt(0)); } - switch (rs.getString("HL_LOCK_TYPE").charAt(0)) { - case LOCK_EXCLUSIVE: type = LockType.EXCLUSIVE; break; - case LOCK_SHARED: type = LockType.SHARED_READ; break; - case LOCK_SEMI_SHARED: type = LockType.SHARED_WRITE; break; - default: - throw new MetaException("Unknown lock type " + rs.getString("HL_LOCK_TYPE").charAt(0)); - } + char lockChar = rs.getString("HL_LOCK_TYPE").charAt(0); + type = LockTypeUtil.getLockTypeFromEncoding(lockChar) + .orElseThrow(() -> new MetaException("Unknown lock type: " + lockChar)); txnId = rs.getLong("HL_TXNID");//returns 0 if value is NULL } LockInfo(ShowLocksResponseElement e) { @@ -4175,37 +4157,32 @@ public int compare(LockInfo info1, LockInfo info2) { * be acquired once it sees S(DB). So need to check stricter locks first. */ private final static class LockTypeComparator implements Comparator { + // the higher the integer value, the more restrictive the lock type is + private static final Map orderOfRestrictiveness = new EnumMap<>(LockType.class); + static { + orderOfRestrictiveness.put(LockType.SHARED_READ, 1); + orderOfRestrictiveness.put(LockType.SHARED_WRITE, 2); + orderOfRestrictiveness.put(LockType.EXCL_WRITE, 3); + orderOfRestrictiveness.put(LockType.EXCLUSIVE, 4); + } + @Override public boolean equals(Object other) { return this == other; } + @Override public int compare(LockType t1, LockType t2) { - switch (t1) { - case EXCLUSIVE: - if(t2 == LockType.EXCLUSIVE) { - return 0; - } - return 1; - case SHARED_WRITE: - switch (t2) { - case EXCLUSIVE: - return -1; - case SHARED_WRITE: - return 0; - case SHARED_READ: - return 1; - default: - throw new RuntimeException("Unexpected LockType: " + t2); - } - case SHARED_READ: - if(t2 == LockType.SHARED_READ) { - return 0; - } - return -1; - default: - throw new RuntimeException("Unexpected LockType: " + t1); + Integer t1Restrictiveness = orderOfRestrictiveness.get(t1); + Integer t2Restrictiveness = orderOfRestrictiveness.get(t2); + if (t1Restrictiveness == null) { + throw new RuntimeException("Unexpected LockType: " + t1); } + if (t2Restrictiveness == null) { + throw new RuntimeException("Unexpected LockType: " + t2); + } + return Integer.compare(t1Restrictiveness, t2Restrictiveness); } } + private enum LockAction {ACQUIRE, WAIT, KEEP_LOOKING} // A jump table to figure out whether to wait, acquire, diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java index da38a6bbd3..b3a1f826bb 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java @@ -236,15 +236,15 @@ public static String getFullTableName(String dbName, String tableName) { * Note that, in this method, "a composite 'IN' clause" is defined as "a list of multiple 'IN' * clauses in a query". * - * @param queries OUT: Array of query strings - * @param prefix IN: Part of the query that comes before IN list - * @param suffix IN: Part of the query that comes after IN list - * @param inList IN: the list with IN list values - * @param inColumn IN: single column name of IN list operator - * @param addParens IN: add a pair of parenthesis outside the IN lists - * e.g. "(id in (1,2,3) OR id in (4,5,6))" - * @param notIn IN: is this for building a 'NOT IN' composite clause? - * @return OUT: a list of the count of IN list values that are in each of the corresponding queries + * @param queries IN-OUT: Array of query strings + * @param prefix IN: Part of the query that comes before IN list + * @param suffix IN: Part of the query that comes after IN list + * @param inList IN: the list with IN list values + * @param inColumn IN: single column name of IN list operator + * @param addParens IN: add a pair of parenthesis outside the IN lists + * e.g. "(id in (1,2,3) OR id in (4,5,6))" + * @param notIn IN: is this for building a 'NOT IN' composite clause? + * @return OUT: a list of the count of IN list values that are in each of the corresponding queries */ public static List buildQueryWithINClauseStrings(Configuration conf, List queries, StringBuilder prefix, StringBuilder suffix, List inList, String inColumn, boolean addParens, boolean notIn) { diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/LockTypeUtil.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/LockTypeUtil.java new file mode 100644 index 0000000000..1e87f94d6c --- /dev/null +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/LockTypeUtil.java @@ -0,0 +1,36 @@ +package org.apache.hadoop.hive.metastore.utils; + +import com.google.common.collect.BiMap; +import com.google.common.collect.EnumHashBiMap; +import org.apache.hadoop.hive.metastore.api.LockType; + +import java.util.Optional; + +/** + * Provides utility methods for {@link org.apache.hadoop.hive.metastore.api.LockType}. + * One use case is to encapsulate each lock type's persistence encoding, since + * Thrift-generated enums cannot be extended with character values. + */ +public class LockTypeUtil { + + private static final char UNKNOWN_LOCK_TYPE_ENCODING = 'z'; + private static final BiMap persistenceEncodings = EnumHashBiMap.create(LockType.class); + static { + persistenceEncodings.put(LockType.SHARED_READ, 'r'); + persistenceEncodings.put(LockType.SHARED_WRITE, 'w'); + persistenceEncodings.put(LockType.EXCLUSIVE, 'e'); + persistenceEncodings.put(LockType.EXCL_WRITE, 'x'); + } + + public static char getEncoding(LockType lockType) { + return persistenceEncodings.getOrDefault(lockType, UNKNOWN_LOCK_TYPE_ENCODING); + } + + public static String getEncodingAsStr(LockType lockType) { + return Character.toString(getEncoding(lockType)); + } + + public static Optional getLockTypeFromEncoding(char encoding) { + return Optional.ofNullable(persistenceEncodings.inverse().get(encoding)); + } +} diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java index 1dfc105958..5f3db52c2f 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java @@ -222,13 +222,13 @@ public void testLocks() throws Exception { rqstBuilder.addLockComponent(new LockComponentBuilder() .setDbName("mydb") .setTableName("yourtable") - .setSemiShared() + .setSharedWrite() .setOperationType(DataOperationType.NO_TXN) .build()); rqstBuilder.addLockComponent(new LockComponentBuilder() .setDbName("yourdb") .setOperationType(DataOperationType.NO_TXN) - .setShared() + .setSharedRead() .build()); rqstBuilder.setUser("fred"); @@ -255,18 +255,18 @@ public void testLocksWithTxn() throws Exception { .setDbName("mydb") .setTableName("mytable") .setPartitionName("mypartition") - .setSemiShared() + .setSharedWrite() .setOperationType(DataOperationType.UPDATE) .build()) .addLockComponent(new LockComponentBuilder() .setDbName("mydb") .setTableName("yourtable") - .setSemiShared() + .setSharedWrite() .setOperationType(DataOperationType.UPDATE) .build()) .addLockComponent(new LockComponentBuilder() .setDbName("yourdb") - .setShared() + .setSharedRead() .setOperationType(DataOperationType.SELECT) .build()) .setUser("fred"); diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/LockTypeUtilTest.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/LockTypeUtilTest.java new file mode 100644 index 0000000000..35fb3809a8 --- /dev/null +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/LockTypeUtilTest.java @@ -0,0 +1,30 @@ +package org.apache.hadoop.hive.metastore.utils; + +import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; +import org.apache.hadoop.hive.metastore.api.LockType; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +@Category(MetastoreUnitTest.class) +public class LockTypeUtilTest { + + @Test + public void testGetEncoding() { + assertEquals('r', LockTypeUtil.getEncoding(LockType.SHARED_READ)); + assertEquals('w', LockTypeUtil.getEncoding(LockType.SHARED_WRITE)); + assertEquals('e', LockTypeUtil.getEncoding(LockType.EXCLUSIVE)); + assertEquals('x', LockTypeUtil.getEncoding(LockType.EXCL_WRITE)); + } + + @Test + public void testGetLockType() { + assertEquals(LockType.SHARED_READ, LockTypeUtil.getLockTypeFromEncoding('r').get()); + assertEquals(LockType.SHARED_WRITE, LockTypeUtil.getLockTypeFromEncoding('w').get()); + assertEquals(LockType.EXCLUSIVE, LockTypeUtil.getLockTypeFromEncoding('e').get()); + assertEquals(LockType.EXCL_WRITE, LockTypeUtil.getLockTypeFromEncoding('x').get()); + assertFalse(LockTypeUtil.getLockTypeFromEncoding('y').isPresent()); + } +} diff --git streaming/src/java/org/apache/hive/streaming/TransactionBatch.java streaming/src/java/org/apache/hive/streaming/TransactionBatch.java index d44065018f..40239ab53e 100644 --- streaming/src/java/org/apache/hive/streaming/TransactionBatch.java +++ streaming/src/java/org/apache/hive/streaming/TransactionBatch.java @@ -439,7 +439,7 @@ private static LockRequest createLockRequest(final HiveStreamingConnection conne LockComponentBuilder lockCompBuilder = new LockComponentBuilder() .setDbName(connection.getDatabase()) .setTableName(connection.getTable().getTableName()) - .setShared() + .setSharedRead() .setOperationType(DataOperationType.INSERT); if (connection.isDynamicPartitioning()) { lockCompBuilder.setIsDynamicPartitionWrite(true);