diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index fc53ed397e..0ad223ae7e 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -2644,6 +2644,8 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "In nonstrict mode, for non-ACID resources, INSERT will only acquire shared lock, which\n" + "allows two concurrent writes to the same partition but still lets lock manager prevent\n" + "DROP TABLE etc. when the table is being written to"), + HIVE_READ_LOCK("hive.read.lock", true, + "flag to turn off the strict read lock when set to false"), TXN_OVERWRITE_X_LOCK("hive.txn.xlock.iow", true, "Ensures commands with OVERWRITE (such as INSERT OVERWRITE) acquire Exclusive locks for\n" + "transactional tables. This ensures that inserts (w/o overwrite) running concurrently\n" + diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 14d18844b3..dc479aa5ca 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -2843,8 +2843,13 @@ private static boolean isLockableTable(Table t) { HiveConf conf) { List lockComponents = new ArrayList<>(); // For each source to read, get a shared lock + boolean skipReadLock = !conf.getBoolVar(ConfVars.HIVE_READ_LOCK); for (ReadEntity input : inputs) { - if (input.isDummy() || !input.needsLock() || input.isUpdateOrDelete() || !AcidUtils.needsLock(input)) { + if (input.isDummy() + || !input.needsLock() + || input.isUpdateOrDelete() + || !AcidUtils.needsLock(input) + || skipReadLock) { // We don't want to acquire read locks during update or delete as we'll be acquiring write // locks instead. Also, there's no need to lock temp tables since they're session wide continue;