diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java index fdf6676..11d255f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.lockmgr; +import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.common.ValidTxnList; @@ -26,6 +27,7 @@ import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryPlan; +import org.apache.hadoop.hive.ql.hooks.Entity; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.metadata.DummyPartition; @@ -126,26 +128,65 @@ public void acquireLocks(QueryPlan plan, Context ctx, String username) throws Lo for (WriteEntity output : plan.getOutputs()) { LOG.debug("Adding " + output.getName() + " to list of lock outputs"); - List lockObj = null; - if (output.getType() == WriteEntity.Type.DATABASE) { - lockObjects.addAll(getLockObjects(plan, output.getDatabase(), null, - null, - output.isComplete() ? HiveLockMode.EXCLUSIVE : HiveLockMode.SHARED)); - } else if (output.getTyp() == WriteEntity.Type.TABLE) { - lockObj = getLockObjects(plan, null, output.getTable(), null, - output.isComplete() ? HiveLockMode.EXCLUSIVE : HiveLockMode.SHARED); - } else if (output.getTyp() == WriteEntity.Type.PARTITION) { - lockObj = getLockObjects(plan, null, null, output.getPartition(), - HiveLockMode.EXCLUSIVE); + if (output.getType() == Entity.Type.DFS_DIR || output.getType() == Entity.Type.LOCAL_DIR) { + // We don't lock files or directories. + continue; } - // In case of dynamic queries, it is possible to have incomplete dummy partitions - else if (output.getTyp() == WriteEntity.Type.DUMMYPARTITION) { - lockObj = getLockObjects(plan, null, null, output.getPartition(), - HiveLockMode.SHARED); + + List lockObj; + Database lockDb = null; + Table lockTable = null; + Partition lockPt = null; + HiveLockMode mode; + Boolean isLockDb = false; + + switch (output.getWriteType()) { + case DDL_EXCLUSIVE: + case INSERT_OVERWRITE: + mode = HiveLockMode.EXCLUSIVE; + break; + + case INSERT: + case DDL_SHARED: + mode = HiveLockMode.SHARED; + break; + + case UPDATE: + case DELETE: + mode = HiveLockMode.SEMI_SHARED; + break; + + case DDL_NO_LOCK: + continue; // No lock required here + + default: + throw new RuntimeException("Unknown write type " + + output.getWriteType().toString()); + } + switch (output.getType()) { + case DATABASE: + lockDb = output.getDatabase(); + isLockDb = true; + break; + + case TABLE: + case DUMMYPARTITION: + lockTable = output.getTable(); + break; + + case PARTITION: + lockPt = output.getPartition(); + break; + + default: + // This is a file or something we don't hold locks for. + continue; } - if(lockObj != null) { - lockObjects.addAll(lockObj); + lockObj = getLockObjects(plan, lockDb, lockTable, lockPt, mode); + lockObjects.addAll(lockObj); + + if(!isLockDb) { ctx.getOutputLockObjects().put(output, lockObj); } }