diff --git ql/src/java/org/apache/hadoop/hive/ql/Driver.java ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 55c76d4..d14bbcb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -777,6 +777,28 @@ public QueryPlan getPlan() { } /** + * Dedup the list of lock objects so that there is only one lock per table/partition. + * If there is both a shared and exclusive lock for the same object, this will deduped + * to just a single exclusive lock. + * @param lockObjects + */ + static void dedupLockObjects(List lockObjects) { + Map lockMap = new HashMap(); + for (HiveLockObj lockObj : lockObjects) { + String lockName = lockObj.getName(); + HiveLockObj foundLock = lockMap.get(lockName); + if (foundLock == null || lockObj.getMode() == HiveLockMode.EXCLUSIVE) { + lockMap.put(lockName, lockObj); + } + } + // copy set of deduped locks back to original list + lockObjects.clear(); + for (HiveLockObj lockObj : lockMap.values()) { + lockObjects.add(lockObj); + } + } + + /** * Acquire read and write locks needed by the statement. The list of objects to be locked are * obtained from he inputs and outputs populated by the compiler. The lock acuisition scheme is * pretty simple. If all the locks cannot be obtained, error out. Deadlock is avoided by making @@ -843,6 +865,7 @@ else if (output.getTyp() == WriteEntity.Type.DUMMYPARTITION) { ) ); + dedupLockObjects(lockObjects); List hiveLocks = ctx.getHiveLockMgr().lock(lockObjects, false); if (hiveLocks == null) { diff --git ql/src/test/org/apache/hadoop/hive/ql/TestDriver.java ql/src/test/org/apache/hadoop/hive/ql/TestDriver.java new file mode 100644 index 0000000..e00a73d --- /dev/null +++ ql/src/test/org/apache/hadoop/hive/ql/TestDriver.java @@ -0,0 +1,57 @@ +package org.apache.hadoop.hive.ql; + +import java.util.*; +import junit.framework.TestCase; + +import org.apache.hadoop.hive.ql.lockmgr.HiveLockMode; +import org.apache.hadoop.hive.ql.lockmgr.HiveLockObj; +import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject; +import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData; + +public class TestDriver extends TestCase { + public void testDedupLockObjects() { + List lockObjs = new ArrayList(); + String path1 = "path1"; + String path2 = "path2"; + HiveLockObjectData lockData1 = new HiveLockObjectData( + "query1", "1", "IMPLICIT", "drop table table1"); + HiveLockObjectData lockData2 = new HiveLockObjectData( + "query1", "1", "IMPLICIT", "drop table table1"); + + // Start with the following locks: + // [path1, shared] + // [path1, exclusive] + // [path2, shared] + // [path2, shared] + // [path2, shared] + lockObjs.add(new HiveLockObj(new HiveLockObject(path1, lockData1), HiveLockMode.SHARED)); + String name1 = lockObjs.get(lockObjs.size() - 1).getName(); + lockObjs.add(new HiveLockObj(new HiveLockObject(path1, lockData1), HiveLockMode.EXCLUSIVE)); + lockObjs.add(new HiveLockObj(new HiveLockObject(path2, lockData2), HiveLockMode.SHARED)); + String name2 = lockObjs.get(lockObjs.size() - 1).getName(); + lockObjs.add(new HiveLockObj(new HiveLockObject(path2, lockData2), HiveLockMode.SHARED)); + lockObjs.add(new HiveLockObj(new HiveLockObject(path2, lockData2), HiveLockMode.SHARED)); + + Driver.dedupLockObjects(lockObjs); + + // After dedup we should be left with 2 locks: + // [path1, exclusive] + // [path2, shared] + assertEquals("Locks should be deduped", 2, lockObjs.size()); + + Comparator cmp = new Comparator() { + public int compare(HiveLockObj lock1, HiveLockObj lock2) { + return lock1.getName().compareTo(lock2.getName()); + } + }; + Collections.sort(lockObjs, cmp); + + HiveLockObj lockObj = lockObjs.get(0); + assertEquals(name1, lockObj.getName()); + assertEquals(HiveLockMode.EXCLUSIVE, lockObj.getMode()); + + lockObj = lockObjs.get(1); + assertEquals(name2, lockObj.getName()); + assertEquals(HiveLockMode.SHARED, lockObj.getMode()); + } +} diff --git service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java index 0032925..ff7166d 100644 --- service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java +++ service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java @@ -197,6 +197,9 @@ public void testExecuteStatement() throws Exception { assertEquals("Query should be finished", OperationState.FINISHED, OperationState.getOperationState(opStatusResp.getOperationState())); + queryString = "DROP TABLE TEST_EXEC_THRIFT"; + executeQuerySync(queryString, sessHandle); + // Close the session; ignore exception if any TCloseSessionReq closeReq = new TCloseSessionReq(sessHandle); client.CloseSession(closeReq);