diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 604bea7..344f7b4 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -726,6 +726,9 @@ HIVE_LOCK_SLEEP_BETWEEN_RETRIES("hive.lock.sleep.between.retries", 60), HIVE_LOCK_MAPRED_ONLY("hive.lock.mapred.only.operation", false), + HIVE_LOCK_TIMEOUT_MSEC("hive.lock.timeout.msec", 2000), + HIVE_UNLOCK_TIMEOUT_MSEC("hive.unlock.timeout.msec", 2000), + HIVE_ZOOKEEPER_QUORUM("hive.zookeeper.quorum", ""), HIVE_ZOOKEEPER_CLIENT_PORT("hive.zookeeper.client.port", "2181"), HIVE_ZOOKEEPER_SESSION_TIMEOUT("hive.zookeeper.session.timeout", 600*1000), diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index ca51e71..b3d8258 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -38,13 +38,11 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Properties; import java.util.Map.Entry; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; -import org.apache.commons.lang.ArrayUtils; import org.apache.commons.lang.StringEscapeUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; @@ -116,7 +114,6 @@ import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatter; import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc; import org.apache.hadoop.hive.ql.plan.AlterIndexDesc; @@ -2725,7 +2722,7 @@ public int compare(HiveLock o1, HiveLock o2) { LOG.warn("show function: " + stringifyException(e)); return 1; } catch (Exception e) { - throw new HiveException(e.toString()); + throw new HiveException(e.toString(), e); } finally { IOUtils.closeStream(outStream); } @@ -2938,6 +2935,7 @@ private int lockTable(LockTableDesc lockTbl) throws HiveException { if (lockMgr == null) { throw new HiveException("lock Table LockManager not specified"); } + String queryID = HiveConf.getVar(conf, ConfVars.HIVEQUERYID); HiveLockMode mode = HiveLockMode.valueOf(lockTbl.getMode()); String tabName = lockTbl.getTableName(); @@ -2948,10 +2946,10 @@ private int lockTable(LockTableDesc lockTbl) throws HiveException { Map partSpec = lockTbl.getPartSpec(); HiveLockObjectData lockData = - new HiveLockObjectData(lockTbl.getQueryId(), - String.valueOf(System.currentTimeMillis()), - "EXPLICIT", - lockTbl.getQueryStr()); + new HiveLockObjectData(queryID, + String.valueOf(System.currentTimeMillis()), + "EXPLICIT", + lockTbl.getQueryStr()); if (partSpec == null) { HiveLock lck = lockMgr.lock(new HiveLockObject(tbl, lockData), mode, true); diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/AbstractLockManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/AbstractLockManager.java new file mode 100644 index 0000000..a00336b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/AbstractLockManager.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.lockmgr; + +import org.apache.hadoop.hive.conf.HiveConf; + +public abstract class AbstractLockManager implements HiveLockManager { + + public abstract HiveLockManagerCtx getContext(); + + public HiveConf getConf() { + return getContext().getConf(); + } + + @Override + public void prepareRetry() throws LockException { + } + + @Override + public void refresh() { + getContext().refresh(); + } + + protected int lockTimeout() { + return getContext().lockTimeout(); + } + + protected int unlockTimeout() { + return getContext().unlockTimeout(); + } + + protected int numRetriesForLock() { + return getContext().numRetriesForLock(); + } + + protected int numRetriesForUnLock() { + return getContext().numRetriesForUnLock(); + } + + protected int sleepTime() { + return getContext().sleepTime(); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java index 8354ad9..e443291 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.util.ReflectionUtils; import java.util.*; @@ -58,16 +57,8 @@ public HiveLockManager getLockManager() throws LockException { boolean supportConcurrency = conf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY); if (supportConcurrency) { - String lockMgrName = - conf.getVar(HiveConf.ConfVars.HIVE_LOCK_MANAGER); - if ((lockMgrName == null) || (lockMgrName.isEmpty())) { - throw new LockException(ErrorMsg.LOCKMGR_NOT_SPECIFIED.getMsg()); - } - try { - LOG.info("Creating lock manager of type " + lockMgrName); - lockMgr = (HiveLockManager)ReflectionUtils.newInstance( - conf.getClassByName(lockMgrName), conf); + lockMgr = LockManagers.getLockManager(new HiveLockManagerCtx(conf)); lockMgr.setContext(new HiveLockManagerCtx(conf)); } catch (Exception e) { // set hiveLockMgr to null just in case this invalid manager got set to diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/EmbeddedLockManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/EmbeddedLockManager.java index 11434a0..eab140a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/EmbeddedLockManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/EmbeddedLockManager.java @@ -18,85 +18,61 @@ package org.apache.hadoop.hive.ql.lockmgr; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Stack; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantLock; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData; -import org.apache.hadoop.hive.ql.metadata.*; - -import java.util.*; -import java.util.concurrent.locks.ReentrantLock; +import org.apache.hadoop.hive.ql.metadata.DummyPartition; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.session.SessionState; /** * shared lock manager for dedicated hive server. all locks are managed in memory */ -public class EmbeddedLockManager implements HiveLockManager { +public class EmbeddedLockManager extends SharedLockManager { private static final Log LOG = LogFactory.getLog("EmbeddedHiveLockManager"); + private static final SessionState.LogHelper console = new SessionState.LogHelper(LOG); private final Node root = new Node(); - private HiveLockManagerCtx ctx; - - private int sleepTime = 1000; - private int numRetriesForLock = 0; - private int numRetriesForUnLock = 0; - public EmbeddedLockManager() { } - public void setContext(HiveLockManagerCtx ctx) throws LockException { - this.ctx = ctx; - refresh(); - } - - public HiveLock lock(HiveLockObject key, HiveLockMode mode, boolean keepAlive) - throws LockException { - return lock(key, mode, numRetriesForLock, sleepTime); - } - - public List lock(List objs, boolean keepAlive) throws LockException { - return lock(objs, numRetriesForLock, sleepTime); - } - - public void unlock(HiveLock hiveLock) throws LockException { - unlock(hiveLock, numRetriesForUnLock, sleepTime); - } - - public void releaseLocks(List hiveLocks) { - releaseLocks(hiveLocks, numRetriesForUnLock, sleepTime); - } - public List getLocks(boolean verifyTablePartitions, boolean fetchData) throws LockException { - return getLocks(verifyTablePartitions, fetchData, ctx.getConf()); + return getLocks(verifyTablePartitions, fetchData, getConf()); } public List getLocks(HiveLockObject key, boolean verifyTablePartitions, boolean fetchData) throws LockException { - return getLocks(key, verifyTablePartitions, fetchData, ctx.getConf()); + return getLocks(key, verifyTablePartitions, fetchData, getConf()); } - public void prepareRetry() { - } - - public void refresh() { - HiveConf conf = ctx.getConf(); - sleepTime = conf.getIntVar(HiveConf.ConfVars.HIVE_LOCK_SLEEP_BETWEEN_RETRIES) * 1000; - numRetriesForLock = conf.getIntVar(HiveConf.ConfVars.HIVE_LOCK_NUMRETRIES); - numRetriesForUnLock = conf.getIntVar(HiveConf.ConfVars.HIVE_UNLOCK_NUMRETRIES); - } - - public HiveLock lock(HiveLockObject key, HiveLockMode mode, int numRetriesForLock, int sleepTime) + public HiveLock lock(HiveLockObject key, HiveLockMode mode, boolean keepAlive) throws LockException { - for (int i = 0; i <= numRetriesForLock; i++) { + for (int i = 0; i <= numRetriesForLock(); i++) { if (i > 0) { - sleep(sleepTime); + sleep(sleepTime()); } - HiveLock lock = lockPrimitive(key, mode); + HiveLock lock = root.lock(key, mode, lockTimeout()); if (lock != null) { return lock; } + console.printError("conflicting lock present for " + key.getDisplayName() + " mode " + mode); } return null; } @@ -109,14 +85,14 @@ private void sleep(int sleepTime) { } } - public List lock(List objs, int numRetriesForLock, int sleepTime) + public List lock(List objs, boolean keepAlive) throws LockException { sortLocks(objs); - for (int i = 0; i <= numRetriesForLock; i++) { + for (int i = 0; i <= numRetriesForLock(); i++) { if (i > 0) { - sleep(sleepTime); + sleep(sleepTime()); } - List locks = lockPrimitive(objs, numRetriesForLock, sleepTime); + List locks = lockPrimitive(objs); if (locks != null) { return locks; } @@ -124,20 +100,14 @@ private void sleep(int sleepTime) { return null; } - private HiveLock lockPrimitive(HiveLockObject key, HiveLockMode mode) throws LockException { - if (root.lock(key.getPaths(), key.getData(), mode == HiveLockMode.EXCLUSIVE)) { - return new SimpleHiveLock(key, mode); - } - return null; - } - - private List lockPrimitive(List objs, int numRetriesForLock, - int sleepTime) throws LockException { + private List lockPrimitive(List objs) throws LockException { List locks = new ArrayList(); for (HiveLockObj obj : objs) { - HiveLock lock = lockPrimitive(obj.getObj(), obj.getMode()); + HiveLock lock = root.lock(obj.getObj(), obj.getMode(), lockTimeout()); if (lock == null) { - releaseLocks(locks, numRetriesForLock, sleepTime); + console.printError("conflicting lock present for " + + obj.getObj().getDisplayName() + " mode " + obj.getMode()); + releaseLocks(locks); return null; } locks.add(lock); @@ -164,13 +134,12 @@ public int compare(HiveLockObj o1, HiveLockObj o2) { }); } - public void unlock(HiveLock hiveLock, int numRetriesForUnLock, int sleepTime) - throws LockException { + public void unlock(HiveLock hiveLock) throws LockException { String[] paths = hiveLock.getHiveLockObject().getPaths(); HiveLockObjectData data = hiveLock.getHiveLockObject().getData(); - for (int i = 0; i <= numRetriesForUnLock; i++) { + for (int i = 0; i <= numRetriesForUnLock(); i++) { if (i > 0) { - sleep(sleepTime); + sleep(sleepTime()); } if (root.unlock(paths, data)) { return; @@ -179,10 +148,10 @@ public void unlock(HiveLock hiveLock, int numRetriesForUnLock, int sleepTime) throw new LockException("Failed to release lock " + hiveLock); } - public void releaseLocks(List hiveLocks, int numRetriesForUnLock, int sleepTime) { + public void releaseLocks(List hiveLocks) { for (HiveLock locked : hiveLocks) { try { - unlock(locked, numRetriesForUnLock, sleepTime); + unlock(locked); } catch (LockException e) { LOG.info(e); } @@ -214,26 +183,18 @@ private HiveLockObject verify(boolean verify, String[] names, HiveLockObjectData if (tab == null) { return null; } - if (names.length == 2) { + if (names.length == 2 || !tab.isPartitioned()) { return new HiveLockObject(tab, data); } Map partSpec = new HashMap(); - for (int indx = 2; indx < names.length; indx++) { - String[] partVals = names[indx].split("="); + for (String partial : names[2].split("/")) { + String[] partVals = partial.split("="); partSpec.put(partVals[0], partVals[1]); } - - Partition partn; - try { - partn = db.getPartition(tab, partSpec, false); - } catch (HiveException e) { - partn = null; - } - - if (partn == null) { + if (tab.getPartitionKeys().size() != partSpec.size()) { return new HiveLockObject(new DummyPartition(tab, null, partSpec), data); } - + Partition partn = db.getPartition(tab, partSpec, false); return new HiveLockObject(partn, data); } catch (Exception e) { throw new LockException(e); @@ -252,7 +213,7 @@ public void close() { private class Node { - private boolean exclusive; + private HiveLockMode lockMode; private Map children; private Map datas; private final ReentrantLock lock = new ReentrantLock(); @@ -260,16 +221,22 @@ public void close() { public Node() { } - public void set(HiveLockObjectData data, boolean exclusive) { - this.exclusive = exclusive; + private SimpleHiveLock addLock(HiveLockObject lock, HiveLockMode lockMode) { + this.lockMode = lockMode; if (datas == null) { - datas = new HashMap(3); + datas = new LinkedHashMap(3); } - datas.put(data.getQueryId(), data); + datas.put(lock.getQueryId(), lock.getData()); + return new SimpleHiveLock(lock, lockMode); } - public boolean lock(String[] paths, HiveLockObjectData data, boolean exclusive) { - return lock(paths, 0, data, exclusive); + public HiveLock lock(HiveLockObject key, HiveLockMode mode, long timeout) + throws LockException { + try { + return timedLock(key, 0, mode, timeout); + } catch (InterruptedException e) { + throw new LockException(e); + } } public boolean unlock(String[] paths, HiveLockObjectData data) { @@ -296,32 +263,63 @@ public boolean unlock(String[] paths, HiveLockObjectData data) { return locks; } - private boolean lock(String[] paths, int index, HiveLockObjectData data, boolean exclusive) { - if (!lock.tryLock()) { - return false; + private HiveLock timedLock(HiveLockObject key, int index, HiveLockMode mode, long remain) + throws InterruptedException { + if (remain <= 0) { + if (!lock.tryLock()) { + return null; + } + } else { + long start = System.currentTimeMillis(); + if (!lock.tryLock(remain, TimeUnit.MILLISECONDS)) { + return null; + } + remain -= System.currentTimeMillis() - start; + if (remain <= 0 && index < key.pathNames.length) { + return null; + } } try { - if (index == paths.length) { - if (this.exclusive || exclusive && hasLock()) { - return false; - } - set(data, exclusive); - return true; + return lock(key, index, mode, remain); + } finally { + lock.unlock(); + } + } + + private HiveLock lock(HiveLockObject key, int index, HiveLockMode mode, long remain) + throws InterruptedException { + if (index == key.pathNames.length) { + if (mode == HiveLockMode.EXCLUSIVE && hasChild()) { + return null; // lock on child + } + if (!hasLock()) { + return addLock(key, mode); } - Node child; - if (children == null) { - children = new HashMap(3); - children.put(paths[index], child = new Node()); - } else { - child = children.get(paths[index]); - if (child == null) { - children.put(paths[index], child = new Node()); + if (lockMode != mode) { + if (isSoleOwner(key.getQueryId())) { + // update lock data & mode + return addLock(key, HiveLockMode.EXCLUSIVE); } + // other query has lock on this, fail + return null; + } + if (mode == HiveLockMode.SHARED || isSoleOwner(key.getQueryId())) { + // update lock data + return addLock(key, mode); + } + return null; + } + Node child; + if (children == null) { + children = new HashMap(3); + children.put(key.pathNames[index], child = new Node()); + } else { + child = children.get(key.pathNames[index]); + if (child == null) { + children.put(key.pathNames[index], child = new Node()); } - return child.lock(paths, index + 1, data, exclusive); - } finally { - lock.unlock(); } + return child.timedLock(key, index + 1, mode, remain); } private boolean unlock(String[] paths, int index, HiveLockObjectData data) { @@ -331,13 +329,18 @@ private boolean unlock(String[] paths, int index, HiveLockObjectData data) { try { if (index == paths.length) { if (hasLock()) { - datas.remove(data.getQueryId()); + if (data == null) { + datas.clear(); + } else { + datas.remove(data.getQueryId()); + } } + lockMode = null; return true; } - Node child = children == null ? null : children.get(paths[index]); + Node child = hasChild() ? children.get(paths[index]) : null; if (child == null) { - return true; // should not happen + return true; // can be happened } if (child.unlock(paths, index + 1, data)) { if (!child.hasLock() && !child.hasChild()) { @@ -378,7 +381,7 @@ private void getLocks(String[] paths, int index, boolean verify, getLocks(paths, verify, fetchData, locks, conf); return; } - Node child = children.get(paths[index]); + Node child = hasChild() ? children.get(paths[index]) : null; if (child != null) { child.getLocks(paths, index + 1, verify, fetchData, locks, conf); } @@ -388,31 +391,23 @@ private void getLocks(String[] paths, int index, boolean verify, } private void getLocks(String[] paths, boolean verify, boolean fetchData, List locks, - HiveConf conf) throws LockException { - HiveLockMode lockMode = getLockMode(); - if (fetchData) { - for (HiveLockObjectData data : datas.values()) { - HiveLockObject lock = verify(verify, paths, data, conf); - if (lock != null) { - locks.add(new SimpleHiveLock(lock, lockMode)); - } - } - } else { - HiveLockObject lock = verify(verify, paths, null, conf); + HiveConf conf) throws LockException { + for (HiveLockObjectData data : datas.values()) { + HiveLockObject lock = verify(verify, paths, fetchData ? data : null, conf); if (lock != null) { locks.add(new SimpleHiveLock(lock, lockMode)); } } } - private HiveLockMode getLockMode() { - return exclusive ? HiveLockMode.EXCLUSIVE : HiveLockMode.SHARED; - } - private boolean hasLock() { return datas != null && !datas.isEmpty(); } + private boolean isSoleOwner(String queryId) { + return hasLock() && datas.size() == 1 && datas.containsKey(queryId); + } + private boolean hasChild() { return children != null && !children.isEmpty(); } @@ -450,8 +445,8 @@ public boolean equals(Object o) { } SimpleHiveLock simpleLock = (SimpleHiveLock) o; - return lockObj.equals(simpleLock.getHiveLockObject()) && - lockMode == simpleLock.getHiveLockMode(); + return lockMode == simpleLock.lockMode && + lockObj.equals(simpleLock.lockObj); } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockManagerCtx.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockManagerCtx.java index 323e8c0..7b9c8a5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockManagerCtx.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockManagerCtx.java @@ -21,20 +21,50 @@ import org.apache.hadoop.hive.conf.HiveConf; public class HiveLockManagerCtx { - HiveConf conf; - public HiveLockManagerCtx() { - } + private final HiveConf conf; + + private int sleepTime; + private int numRetriesForLock; + private int numRetriesForUnLock; + + private int lockTimeout; + private int unlockTimeout; public HiveLockManagerCtx(HiveConf conf) { this.conf = conf; + refresh(); } public HiveConf getConf() { return conf; } - public void setConf(HiveConf conf) { - this.conf = conf; + public void refresh() { + sleepTime = conf.getIntVar(HiveConf.ConfVars.HIVE_LOCK_SLEEP_BETWEEN_RETRIES) * 1000; + numRetriesForLock = conf.getIntVar(HiveConf.ConfVars.HIVE_LOCK_NUMRETRIES); + numRetriesForUnLock = conf.getIntVar(HiveConf.ConfVars.HIVE_UNLOCK_NUMRETRIES); + lockTimeout = conf.getIntVar(HiveConf.ConfVars.HIVE_LOCK_TIMEOUT_MSEC); + unlockTimeout = conf.getIntVar(HiveConf.ConfVars.HIVE_UNLOCK_TIMEOUT_MSEC); + } + + public int lockTimeout() { + return lockTimeout; + } + + public int unlockTimeout() { + return unlockTimeout; + } + + public int numRetriesForLock() { + return numRetriesForLock; + } + + public int numRetriesForUnLock() { + return numRetriesForUnLock; + } + + public int sleepTime() { + return sleepTime; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java index 1cc3074..978c4e5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java @@ -193,6 +193,10 @@ public HiveLockObject(DummyPartition par, HiveLockObjectData lockData) { this(new String[] {par.getName()}, lockData); } + public String getQueryId() { + return data.getQueryId(); + } + public String[] getPaths() { return pathNames; } diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/LockManagers.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/LockManagers.java new file mode 100644 index 0000000..fd306f9 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/LockManagers.java @@ -0,0 +1,154 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.lockmgr; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.util.ReflectionUtils; + +import java.util.List; + +public class LockManagers { + + private static final Log LOG = LogFactory.getLog(LockManagers.class.getName()); + + private static boolean initialized; + private static Class clazz; + private static SharedLockManager sharedLockMgr; + + private static synchronized boolean initialize(HiveLockManagerCtx context) throws HiveException { + if (!initialized) { + HiveConf conf = context.getConf(); + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY)) { + clazz = getLockManagerClass(context.getConf()); + if (SharedLockManager.class.isAssignableFrom(clazz)) { + sharedLockMgr = (SharedLockManager) createLockManager(clazz, context); + Runtime.getRuntime().addShutdownHook(new LockShutdownHook()); + } + } + initialized = true; + } + return clazz != null; + } + + public static HiveLockManager getLockManager(final HiveLockManagerCtx context) throws HiveException { + if (!initialize(context)) { + return null; + } + if (sharedLockMgr == null) { + return createLockManager(clazz, context); + } + // return proxy for shared manager, overriding close method + final HiveLockManager shared = sharedLockMgr; + HiveLockManager manager = new SharedLockManager() { + @Override + public HiveLock lock(HiveLockObject key, HiveLockMode mode, boolean keepAlive) + throws LockException { + return shared.lock(key, mode, keepAlive); + } + @Override + public List lock(List objs, boolean keepAlive) throws LockException { + return shared.lock(objs, keepAlive); + } + @Override + public void unlock(HiveLock hiveLock) throws LockException { + shared.unlock(hiveLock); + } + @Override + public void releaseLocks(List hiveLocks) { + shared.releaseLocks(hiveLocks); + } + @Override + public List getLocks(boolean verifyTablePartitions, boolean fetchData) + throws LockException { + return shared.getLocks(verifyTablePartitions, fetchData); + } + @Override + public List getLocks(HiveLockObject key, boolean verifyTablePartitions, + boolean fetchData) throws LockException { + return shared.getLocks(key, verifyTablePartitions, fetchData); + } + @Override + public void close() { + CTX.remove(); + } + }; + manager.setContext(context); + return manager; + } + + private static class LockShutdownHook extends Thread { + @Override + public void run() { + shutdown(); + } + } + + public static synchronized void shutdown() { + if (sharedLockMgr != null) { + try { + sharedLockMgr.close(); + } catch (Exception e) { + LOG.error("Failed to close shared lock manager " + sharedLockMgr, e); + } + sharedLockMgr = null; + } + clazz = null; + initialized = false; + } + + private static HiveLockManager createLockManager( + Class clazz, HiveLockManagerCtx context) throws HiveException { + HiveLockManager hiveLockMgr = null; + try { + hiveLockMgr = ReflectionUtils.newInstance(clazz, context.getConf()); + hiveLockMgr.setContext(context); + } catch (Exception e) { + // set hiveLockMgr to null just in case this invalid manager got set to + // next query's ctx. + if (hiveLockMgr != null) { + try { + hiveLockMgr.close(); + } catch (Exception e1) { + //nothing can do here + } + } + LOG.warn("Failed to create lock manager " + clazz, e); + throw new HiveException(ErrorMsg.LOCKMGR_NOT_INITIALIZED.getMsg(), e); + } + return hiveLockMgr; + } + + private static Class getLockManagerClass(HiveConf conf) + throws HiveException { + String lockMgr = conf.getVar(HiveConf.ConfVars.HIVE_LOCK_MANAGER); + if (lockMgr == null || lockMgr.isEmpty()) { + throw new HiveException(ErrorMsg.LOCKMGR_NOT_SPECIFIED.getMsg()); + } + try { + return (Class) conf.getClassByName(lockMgr); + } catch (Exception e) { + LOG.warn("Failed to find lock manager class " + lockMgr, e); + throw new HiveException(e); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/SharedLockManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/SharedLockManager.java new file mode 100644 index 0000000..3118694 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/SharedLockManager.java @@ -0,0 +1,20 @@ +package org.apache.hadoop.hive.ql.lockmgr; + +/** + * common super class for shared lock manager + */ +public abstract class SharedLockManager extends AbstractLockManager { + + protected static final ThreadLocal CTX + = new InheritableThreadLocal(); + + @Override + public void setContext(HiveLockManagerCtx ctx) throws LockException { + CTX.set(ctx); + } + + @Override + public HiveLockManagerCtx getContext() { + return CTX.get(); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 4a0056c..fbf5838 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -2331,8 +2331,7 @@ private void analyzeLockTable(ASTNode ast) partSpec = partSpecs.get(0); } - LockTableDesc lockTblDesc = new LockTableDesc(tableName, mode, partSpec, - HiveConf.getVar(conf, ConfVars.HIVEQUERYID)); + LockTableDesc lockTblDesc = new LockTableDesc(tableName, mode, partSpec); lockTblDesc.setQueryStr(this.ctx.getCmd()); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), lockTblDesc), conf)); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/LockTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/LockTableDesc.java index c3c4ba4..a77ff97 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/LockTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/LockTableDesc.java @@ -21,8 +21,6 @@ import java.io.Serializable; import java.util.Map; -import org.apache.hadoop.fs.Path; - /** * LockTableDesc. * @@ -34,17 +32,15 @@ private String tableName; private String mode; private Map partSpec; - private String queryId; private String queryStr; public LockTableDesc() { } - public LockTableDesc(String tableName, String mode, Map partSpec, String queryId) { + public LockTableDesc(String tableName, String mode, Map partSpec) { this.tableName = tableName; this.mode = mode; this.partSpec = partSpec; - this.queryId = queryId; } public String getTableName() { @@ -71,14 +67,6 @@ public void setPartSpec(Map partSpec) { this.partSpec = partSpec; } - public String getQueryId() { - return queryId; - } - - public void setQueryId(String queryId) { - this.queryId = queryId; - } - public String getQueryStr() { return queryStr; } diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestEmbeddedLockManager.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestEmbeddedLockManager.java index 0afbc1c..ec10d91 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestEmbeddedLockManager.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestEmbeddedLockManager.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.ql.lockmgr; import junit.framework.TestCase; - import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData; import org.junit.Assert; @@ -28,17 +27,13 @@ private int counter; - public void testLocking() throws LockException { - HiveConf conf = new HiveConf(); - conf.set("hive.lock.numretries", "0"); - conf.set("hive.unlock.numretries", "0"); - EmbeddedLockManager manager = new EmbeddedLockManager(); - manager.setContext(new HiveLockManagerCtx(conf)); + private String path1 = "database1/table1/x=100"; + private String path2 = "database1/table1/x=200"; + private String path3 = "database1/table2"; + private String path4 = "database2"; - String path1 = "database1/table1/x=100"; - String path2 = "database1/table1/x=200"; - String path3 = "database1/table2"; - String path4 = "database2"; + public void testLocking() throws LockException { + EmbeddedLockManager manager = createLockManager(-1); HiveLockObject path1sel1 = lockObj(path1, "select"); HiveLock path1sel1Lock = manager.lock(path1sel1, HiveLockMode.SHARED, false); @@ -48,12 +43,12 @@ public void testLocking() throws LockException { Assert.assertEquals(1, manager.getLocks(false, true).size()); HiveLockObject path1up1 = lockObj(path1, "update"); - Assert.assertNull(manager.lock(path1up1, HiveLockMode.EXCLUSIVE, false)); + Assert.assertNull(manager.lock(path1up1, HiveLockMode.EXCLUSIVE, false)); // FAIL HiveLockObject path1sel2 = lockObj(path1, "select"); HiveLock path1sel2Lock = manager.lock(path1sel2, HiveLockMode.SHARED, false); Assert.assertNotNull(path1sel2Lock); - Assert.assertEquals(1, manager.getLocks(path1sel1, false, false).size()); + Assert.assertEquals(2, manager.getLocks(path1sel1, false, false).size()); Assert.assertEquals(2, manager.getLocks(path1sel1, false, true).size()); Assert.assertEquals(2, manager.getLocks(path1sel2, false, true).size()); Assert.assertEquals(2, manager.getLocks(false, true).size()); @@ -64,7 +59,7 @@ public void testLocking() throws LockException { HiveLockObject path2sel1 = lockObj(path2, "select"); HiveLock path2sel1Lock = manager.lock(path2sel1, HiveLockMode.SHARED, false); Assert.assertNotNull(path2sel1Lock); - Assert.assertEquals(1, manager.getLocks(path1sel1, false, false).size()); + Assert.assertEquals(2, manager.getLocks(path1sel1, false, false).size()); Assert.assertEquals(2, manager.getLocks(path1sel1, false, true).size()); Assert.assertEquals(2, manager.getLocks(path1sel2, false, true).size()); Assert.assertEquals(1, manager.getLocks(path2sel1, false, true).size()); @@ -73,7 +68,7 @@ public void testLocking() throws LockException { HiveLockObject path3sel = lockObj(path3, "select"); HiveLock path3selLock = manager.lock(path3sel, HiveLockMode.SHARED, false); Assert.assertNotNull(path3selLock); - Assert.assertEquals(1, manager.getLocks(path1sel1, false, false).size()); + Assert.assertEquals(2, manager.getLocks(path1sel1, false, false).size()); Assert.assertEquals(2, manager.getLocks(path1sel1, false, true).size()); Assert.assertEquals(2, manager.getLocks(path1sel2, false, true).size()); Assert.assertEquals(1, manager.getLocks(path2sel1, false, true).size()); @@ -106,7 +101,11 @@ public void testLocking() throws LockException { Assert.assertEquals(1, manager.getLocks(path2up1, false, true).size()); Assert.assertEquals(1, manager.getLocks(false, true).size()); - Assert.assertNull(manager.lock(path2up1, HiveLockMode.EXCLUSIVE, false)); + // try lock on owned lock and should be succeeded + HiveLock path2up1Lock1 = manager.lock(path2up1, HiveLockMode.EXCLUSIVE, false); + Assert.assertNotNull(path2up1Lock1); + Assert.assertEquals(1, manager.getLocks(path2up1, false, true).size()); + Assert.assertEquals(1, manager.getLocks(false, true).size()); HiveLockObject path1sel3 = lockObj(path1, "select"); HiveLockObject path2sel2 = lockObj(path2, "select"); @@ -118,6 +117,54 @@ public void testLocking() throws LockException { Assert.assertEquals(2, manager.getLocks(false, true).size()); } + public void testLockOnOwnedLock1() throws LockException { + EmbeddedLockManager manager = createLockManager(-1); + + HiveLockObject obj1 = lockObj(path1, "select"); + HiveLock lock1 = manager.lock(obj1, HiveLockMode.SHARED, false); + Assert.assertNotNull(lock1); + Assert.assertEquals(HiveLockMode.SHARED, lock1.getHiveLockMode()); + + // SHARED + EXCLUSIVE = EXCLUSIVE + HiveLock lock2 = manager.lock(obj1, HiveLockMode.EXCLUSIVE, false); + Assert.assertNotNull(lock2); + Assert.assertEquals(HiveLockMode.EXCLUSIVE, lock2.getHiveLockMode()); + + // EXCLUSIVE + EXCLUSIVE = EXCLUSIVE + HiveLock lock3 = manager.lock(obj1, HiveLockMode.EXCLUSIVE, false); + Assert.assertNotNull(lock3); + Assert.assertEquals(HiveLockMode.EXCLUSIVE, lock3.getHiveLockMode()); + + // EXCLUSIVE + SHARED = EXCLUSIVE + HiveLock lock4 = manager.lock(obj1, HiveLockMode.SHARED, false); + Assert.assertNotNull(lock4); + Assert.assertEquals(HiveLockMode.EXCLUSIVE, lock4.getHiveLockMode()); + } + + public void testLockOnOwnedLock2() throws LockException { + EmbeddedLockManager manager = createLockManager(-1); + + HiveLockObject obj1 = lockObj(path1, "select"); + HiveLockObject obj2 = lockObj(path1, "select"); + + HiveLock lock1 = manager.lock(obj1, HiveLockMode.SHARED, false); + Assert.assertNotNull(lock1); + HiveLock lock2 = manager.lock(obj2, HiveLockMode.SHARED, false); + Assert.assertNotNull(lock2); + HiveLock lock3 = manager.lock(obj1, HiveLockMode.EXCLUSIVE, false); + Assert.assertNull(lock3); + } + + private EmbeddedLockManager createLockManager(long timeout) throws LockException { + HiveConf conf = new HiveConf(); + conf.set("hive.lock.numretries", "0"); + conf.set("hive.unlock.numretries", "0"); + conf.set("hive.embedded.lock.timeout", String.valueOf(timeout)); + EmbeddedLockManager manager = new EmbeddedLockManager(); + manager.setContext(new HiveLockManagerCtx(conf)); + return manager; + } + private HiveLockObject lockObj(String path, String query) { HiveLockObjectData data = new HiveLockObjectData(String.valueOf(++counter), null, null, query); return new HiveLockObject(path.split("/"), data); diff --git ql/src/test/queries/clientnegative/insert_into1.q ql/src/test/queries/clientnegative/insert_into1.q index 8c19767..9f5207e 100644 --- ql/src/test/queries/clientnegative/insert_into1.q +++ ql/src/test/queries/clientnegative/insert_into1.q @@ -1,5 +1,5 @@ -set hive.lock.numretries=5; -set hive.lock.sleep.between.retries=5; +set hive.lock.numretries=3; +set hive.lock.sleep.between.retries=3; DROP TABLE insert_into1_neg; diff --git ql/src/test/queries/clientnegative/insert_into2.q ql/src/test/queries/clientnegative/insert_into2.q index 73a3b6f..e99893b 100644 --- ql/src/test/queries/clientnegative/insert_into2.q +++ ql/src/test/queries/clientnegative/insert_into2.q @@ -1,10 +1,11 @@ -set hive.lock.numretries=5; -set hive.lock.sleep.between.retries=5; +set hive.lock.numretries=3; +set hive.lock.sleep.between.retries=3; -DROP TABLE insert_into1_neg; -CREATE TABLE insert_into1_neg (key int, value string); +DROP TABLE insert_into2_neg; -LOCK TABLE insert_into1_neg EXCLUSIVE; -INSERT INTO TABLE insert_into1_neg SELECT * FROM src LIMIT 100; +CREATE TABLE insert_into2_neg (key int, value string); -DROP TABLE insert_into1_neg; +LOCK TABLE insert_into2_neg EXCLUSIVE; +INSERT INTO TABLE insert_into2_neg SELECT * FROM src LIMIT 100; + +DROP TABLE insert_into2_neg; diff --git ql/src/test/queries/clientnegative/insert_into3.q ql/src/test/queries/clientnegative/insert_into3.q index 4d048b3..971ea7f 100644 --- ql/src/test/queries/clientnegative/insert_into3.q +++ ql/src/test/queries/clientnegative/insert_into3.q @@ -1,5 +1,5 @@ -set hive.lock.numretries=5; -set hive.lock.sleep.between.retries=5; +set hive.lock.numretries=3; +set hive.lock.sleep.between.retries=3; DROP TABLE insert_into3_neg; diff --git ql/src/test/queries/clientnegative/insert_into4.q ql/src/test/queries/clientnegative/insert_into4.q index b8944e7..90a4455 100644 --- ql/src/test/queries/clientnegative/insert_into4.q +++ ql/src/test/queries/clientnegative/insert_into4.q @@ -1,16 +1,16 @@ -set hive.lock.numretries=5; -set hive.lock.sleep.between.retries=5; +set hive.lock.numretries=3; +set hive.lock.sleep.between.retries=3; -DROP TABLE insert_into3_neg; +DROP TABLE insert_into4_neg; -CREATE TABLE insert_into3_neg (key int, value string) +CREATE TABLE insert_into4_neg (key int, value string) PARTITIONED BY (ds string); -INSERT INTO TABLE insert_into3_neg PARTITION (ds='1') +INSERT INTO TABLE insert_into4_neg PARTITION (ds='1') SELECT * FROM src LIMIT 100; -LOCK TABLE insert_into3_neg PARTITION (ds='1') EXCLUSIVE; -INSERT INTO TABLE insert_into3_neg PARTITION (ds='1') +LOCK TABLE insert_into4_neg PARTITION (ds='1') EXCLUSIVE; +INSERT INTO TABLE insert_into4_neg PARTITION (ds='1') SELECT * FROM src LIMIT 100; -DROP TABLE insert_into3_neg; +DROP TABLE insert_into4_neg; diff --git ql/src/test/queries/clientnegative/lockneg_query_tbl_in_locked_db.q ql/src/test/queries/clientnegative/lockneg_query_tbl_in_locked_db.q index 4966f2b..0195f39 100644 --- ql/src/test/queries/clientnegative/lockneg_query_tbl_in_locked_db.q +++ ql/src/test/queries/clientnegative/lockneg_query_tbl_in_locked_db.q @@ -1,3 +1,6 @@ +set hive.lock.numretries=0; +set hive.unlock.numretries=0; + create database lockneg1; use lockneg1; @@ -7,11 +10,14 @@ insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='11') select key, value from default.srcpart where ds='2008-04-08' and hr='11'; lock database lockneg1 shared; -show locks database lockneg1; +show locks; + select count(1) from tstsrcpart where ds='2008-04-08' and hr='11'; unlock database lockneg1; -show locks database lockneg1; +show locks; + lock database lockneg1 exclusive; -show locks database lockneg1; +show locks; + select count(1) from tstsrcpart where ds='2008-04-08' and hr='11'; diff --git ql/src/test/results/clientnegative/insert_into1.q.out ql/src/test/results/clientnegative/insert_into1.q.out index a38b679..bd3b40e 100644 --- ql/src/test/results/clientnegative/insert_into1.q.out +++ ql/src/test/results/clientnegative/insert_into1.q.out @@ -15,6 +15,4 @@ POSTHOOK: query: LOCK TABLE insert_into1_neg SHARED POSTHOOK: type: LOCKTABLE conflicting lock present for default@insert_into1_neg mode EXCLUSIVE conflicting lock present for default@insert_into1_neg mode EXCLUSIVE -conflicting lock present for default@insert_into1_neg mode EXCLUSIVE -conflicting lock present for default@insert_into1_neg mode EXCLUSIVE FAILED: Error in acquiring locks: Locks on the underlying objects cannot be acquired. retry after some time diff --git ql/src/test/results/clientnegative/insert_into2.q.out ql/src/test/results/clientnegative/insert_into2.q.out index f21823a..432e5de 100644 --- ql/src/test/results/clientnegative/insert_into2.q.out +++ ql/src/test/results/clientnegative/insert_into2.q.out @@ -1,20 +1,18 @@ -PREHOOK: query: DROP TABLE insert_into1_neg +PREHOOK: query: DROP TABLE insert_into2_neg PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE insert_into1_neg +POSTHOOK: query: DROP TABLE insert_into2_neg POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE insert_into1_neg (key int, value string) +PREHOOK: query: CREATE TABLE insert_into2_neg (key int, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE insert_into1_neg (key int, value string) +POSTHOOK: query: CREATE TABLE insert_into2_neg (key int, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@insert_into1_neg -PREHOOK: query: LOCK TABLE insert_into1_neg EXCLUSIVE +POSTHOOK: Output: default@insert_into2_neg +PREHOOK: query: LOCK TABLE insert_into2_neg EXCLUSIVE PREHOOK: type: LOCKTABLE -POSTHOOK: query: LOCK TABLE insert_into1_neg EXCLUSIVE +POSTHOOK: query: LOCK TABLE insert_into2_neg EXCLUSIVE POSTHOOK: type: LOCKTABLE -conflicting lock present for default@insert_into1_neg mode EXCLUSIVE -conflicting lock present for default@insert_into1_neg mode EXCLUSIVE -conflicting lock present for default@insert_into1_neg mode EXCLUSIVE -conflicting lock present for default@insert_into1_neg mode EXCLUSIVE +conflicting lock present for default@insert_into2_neg mode EXCLUSIVE +conflicting lock present for default@insert_into2_neg mode EXCLUSIVE FAILED: Error in acquiring locks: Locks on the underlying objects cannot be acquired. retry after some time diff --git ql/src/test/results/clientnegative/insert_into3.q.out ql/src/test/results/clientnegative/insert_into3.q.out index ef78c2a..e3fd31f 100644 --- ql/src/test/results/clientnegative/insert_into3.q.out +++ ql/src/test/results/clientnegative/insert_into3.q.out @@ -31,6 +31,4 @@ POSTHOOK: Lineage: insert_into3_neg PARTITION(ds=1).key EXPRESSION [(src)src.Fie POSTHOOK: Lineage: insert_into3_neg PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] conflicting lock present for default@insert_into3_neg@ds=1 mode EXCLUSIVE conflicting lock present for default@insert_into3_neg@ds=1 mode EXCLUSIVE -conflicting lock present for default@insert_into3_neg@ds=1 mode EXCLUSIVE -conflicting lock present for default@insert_into3_neg@ds=1 mode EXCLUSIVE FAILED: Error in acquiring locks: Locks on the underlying objects cannot be acquired. retry after some time diff --git ql/src/test/results/clientnegative/insert_into4.q.out ql/src/test/results/clientnegative/insert_into4.q.out index d26e79f..937a0e5 100644 --- ql/src/test/results/clientnegative/insert_into4.q.out +++ ql/src/test/results/clientnegative/insert_into4.q.out @@ -1,36 +1,34 @@ -PREHOOK: query: DROP TABLE insert_into3_neg +PREHOOK: query: DROP TABLE insert_into4_neg PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE insert_into3_neg +POSTHOOK: query: DROP TABLE insert_into4_neg POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE insert_into3_neg (key int, value string) +PREHOOK: query: CREATE TABLE insert_into4_neg (key int, value string) PARTITIONED BY (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -POSTHOOK: query: CREATE TABLE insert_into3_neg (key int, value string) +POSTHOOK: query: CREATE TABLE insert_into4_neg (key int, value string) PARTITIONED BY (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@insert_into3_neg -PREHOOK: query: INSERT INTO TABLE insert_into3_neg PARTITION (ds='1') +POSTHOOK: Output: default@insert_into4_neg +PREHOOK: query: INSERT INTO TABLE insert_into4_neg PARTITION (ds='1') SELECT * FROM src LIMIT 100 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@insert_into3_neg@ds=1 -POSTHOOK: query: INSERT INTO TABLE insert_into3_neg PARTITION (ds='1') +PREHOOK: Output: default@insert_into4_neg@ds=1 +POSTHOOK: query: INSERT INTO TABLE insert_into4_neg PARTITION (ds='1') SELECT * FROM src LIMIT 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@insert_into3_neg@ds=1 -POSTHOOK: Lineage: insert_into3_neg PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: insert_into3_neg PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: LOCK TABLE insert_into3_neg PARTITION (ds='1') EXCLUSIVE +POSTHOOK: Output: default@insert_into4_neg@ds=1 +POSTHOOK: Lineage: insert_into4_neg PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into4_neg PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: LOCK TABLE insert_into4_neg PARTITION (ds='1') EXCLUSIVE PREHOOK: type: LOCKTABLE -POSTHOOK: query: LOCK TABLE insert_into3_neg PARTITION (ds='1') EXCLUSIVE +POSTHOOK: query: LOCK TABLE insert_into4_neg PARTITION (ds='1') EXCLUSIVE POSTHOOK: type: LOCKTABLE -POSTHOOK: Lineage: insert_into3_neg PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: insert_into3_neg PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -conflicting lock present for default@insert_into3_neg@ds=1 mode EXCLUSIVE -conflicting lock present for default@insert_into3_neg@ds=1 mode EXCLUSIVE -conflicting lock present for default@insert_into3_neg@ds=1 mode EXCLUSIVE -conflicting lock present for default@insert_into3_neg@ds=1 mode EXCLUSIVE +POSTHOOK: Lineage: insert_into4_neg PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into4_neg PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +conflicting lock present for default@insert_into4_neg@ds=1 mode EXCLUSIVE +conflicting lock present for default@insert_into4_neg@ds=1 mode EXCLUSIVE FAILED: Error in acquiring locks: Locks on the underlying objects cannot be acquired. retry after some time diff --git ql/src/test/results/clientnegative/lockneg_query_tbl_in_locked_db.q.out ql/src/test/results/clientnegative/lockneg_query_tbl_in_locked_db.q.out index 157c2d0..1b85f2b 100644 --- ql/src/test/results/clientnegative/lockneg_query_tbl_in_locked_db.q.out +++ ql/src/test/results/clientnegative/lockneg_query_tbl_in_locked_db.q.out @@ -33,9 +33,9 @@ POSTHOOK: query: lock database lockneg1 shared POSTHOOK: type: LOCKDATABASE POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show locks database lockneg1 +PREHOOK: query: show locks PREHOOK: type: SHOWLOCKS -POSTHOOK: query: show locks database lockneg1 +POSTHOOK: query: show locks POSTHOOK: type: SHOWLOCKS POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]