diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java index 0d3b94c..073a978 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java @@ -18,8 +18,18 @@ package org.apache.hadoop.hive.conf; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.classification.InterfaceAudience.Private; +import java.io.File; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.StringTokenizer; + /** * Hive Configuration utils */ @@ -35,4 +45,44 @@ public static boolean isEmbeddedMetaStore(String msUri) { return (msUri == null) ? true : msUri.trim().isEmpty(); } + + /** + * Dumps all HiveConf for debugging. Convenient to dump state at process start up and log it + * so that in later analysis the values of all variables is known + */ + public static StringBuilder dumpConfig(HiveConf conf) { + StringBuilder sb = new StringBuilder("START========\"HiveConf()\"========\n"); + sb.append("hiveDefaultUrl=").append(conf.getHiveDefaultLocation()).append('\n'); + sb.append("hiveSiteURL=").append(HiveConf.getHiveSiteLocation()).append('\n'); + sb.append("hiveServer2SiteUrl=").append(HiveConf.getHiveServer2SiteLocation()).append('\n'); + sb.append("hivemetastoreSiteUrl=").append(HiveConf.getMetastoreSiteLocation()).append('\n'); + dumpConfig(conf, sb); + return sb.append("END========\"new HiveConf()\"========\n"); + } + public static void dumpConfig(Configuration conf, StringBuilder sb) { + Iterator> configIter = conf.iterator(); + List> configVals = new ArrayList<>(); + while(configIter.hasNext()) { + configVals.add(configIter.next()); + } + Collections.sort(configVals, new Comparator>() { + @Override + public int compare(Map.Entry ent, Map.Entry ent2) { + return ent.getKey().compareTo(ent2.getKey()); + } + }); + for(Map.Entry entry : configVals) { + //use get() to make sure variable substitution works + if(entry.getKey().toLowerCase().contains("path")) { + StringTokenizer st = new StringTokenizer(conf.get(entry.getKey()), File.pathSeparator); + sb.append(entry.getKey()).append("=\n"); + while(st.hasMoreTokens()) { + sb.append(" ").append(st.nextToken()).append(File.pathSeparator).append('\n'); + } + } + else { + sb.append(entry.getKey()).append('=').append(conf.get(entry.getKey())).append('\n'); + } + } + } } diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java index 10caf9b..25d236c 100644 --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java @@ -24,12 +24,10 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; -import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.StringTokenizer; +import org.apache.hadoop.hive.conf.HiveConfUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -247,47 +245,14 @@ private static void logConfigLoadAttempt(String path) { private String dumpEnvironent() { StringBuilder sb = TempletonUtils.dumpPropMap("========WebHCat System.getenv()========", System.getenv()); sb.append("START========WebHCat AppConfig.iterator()========: \n"); - dumpConfig(this, sb); + HiveConfUtil.dumpConfig(this, sb); sb.append("END========WebHCat AppConfig.iterator()========: \n"); sb.append(TempletonUtils.dumpPropMap("========WebHCat System.getProperties()========", System.getProperties())); - sb.append("START========\"new HiveConf()\"========\n"); - HiveConf c = new HiveConf(); - sb.append("hiveDefaultUrl=").append(c.getHiveDefaultLocation()).append('\n'); - sb.append("hiveSiteURL=").append(HiveConf.getHiveSiteLocation()).append('\n'); - sb.append("hiveServer2SiteUrl=").append(HiveConf.getHiveServer2SiteLocation()).append('\n'); - sb.append("hivemetastoreSiteUrl=").append(HiveConf.getMetastoreSiteLocation()).append('\n'); - dumpConfig(c, sb); - sb.append("END========\"new HiveConf()\"========\n"); + sb.append(HiveConfUtil.dumpConfig(new HiveConf())); return sb.toString(); } - private static void dumpConfig(Configuration conf, StringBuilder sb) { - Iterator> configIter = conf.iterator(); - List>configVals = new ArrayList<>(); - while(configIter.hasNext()) { - configVals.add(configIter.next()); - } - Collections.sort(configVals, new Comparator> () { - @Override - public int compare(Map.Entry ent, Map.Entry ent2) { - return ent.getKey().compareTo(ent2.getKey()); - } - }); - for(Map.Entry entry : configVals) { - //use get() to make sure variable substitution works - if(entry.getKey().toLowerCase().contains("path")) { - StringTokenizer st = new StringTokenizer(conf.get(entry.getKey()), File.pathSeparator); - sb.append(entry.getKey()).append("=\n"); - while(st.hasMoreTokens()) { - sb.append(" ").append(st.nextToken()).append(File.pathSeparator).append('\n'); - } - } - else { - sb.append(entry.getKey()).append('=').append(conf.get(entry.getKey())).append('\n'); - } - } - } public JobsListOrder getListJobsOrder() { String requestedOrder = get(TEMPLETON_JOBSLIST_ORDER); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index 7a89a0c..49be0f7 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hive.common.ServerUtils; import org.apache.hadoop.hive.common.classification.InterfaceAudience; import org.apache.hadoop.hive.common.classification.InterfaceStability; +import org.apache.hadoop.hive.conf.HiveConfUtil; import org.apache.hadoop.hive.metastore.HouseKeeperService; import org.apache.hadoop.hive.metastore.Warehouse; import org.slf4j.Logger; @@ -204,6 +205,7 @@ public static OpertaionType fromDataOperationType(DataOperationType dop) { */ private final static ConcurrentHashMap derbyKey2Lock = new ConcurrentHashMap<>(); private static final String hostname = ServerUtils.hostname(); + private static volatile boolean dumpConfig = true; // Private methods should never catch SQLException and then throw MetaException. The public // methods depend on SQLException coming back so they can detect and handle deadlocks. Private @@ -250,6 +252,11 @@ public void setConf(HiveConf conf) { retryLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HMSHANDLERATTEMPTS); deadlockRetryInterval = retryInterval / 10; maxOpenTxns = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_MAX_OPEN_TXNS); + if(dumpConfig) { + LOG.info(HiveConfUtil.dumpConfig(conf).toString()); + //only do this once per JVM; useful for support + dumpConfig = false; + } } public GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException { diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/HouseKeeperServiceBase.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/HouseKeeperServiceBase.java index caab10d..0b7332c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/HouseKeeperServiceBase.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/HouseKeeperServiceBase.java @@ -48,7 +48,7 @@ public void start(HiveConf hiveConf) throws Exception { private final AtomicInteger threadCounter = new AtomicInteger(); @Override public Thread newThread(Runnable r) { - return new Thread(r, this.getClass().getName() + "-" + threadCounter.getAndIncrement()); + return new Thread(r, HouseKeeperServiceBase.this.getClass().getName() + "-" + threadCounter.getAndIncrement()); } }); diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java index e4cbd5f..388b7c5 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java @@ -21,8 +21,15 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.LockState; +import org.apache.hadoop.hive.metastore.api.LockType; +import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; +import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; +import org.apache.hadoop.hive.metastore.txn.TxnStore; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.lockmgr.TestDbTxnManager2; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.txn.AcidHouseKeeperService; @@ -415,14 +422,31 @@ public void testTimeOutReaper() throws Exception { hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 2, TimeUnit.MILLISECONDS); AcidHouseKeeperService houseKeeperService = new AcidHouseKeeperService(); //this will abort the txn - houseKeeperService.start(hiveConf); - while(houseKeeperService.getIsAliveCounter() <= Integer.MIN_VALUE) { - Thread.sleep(100);//make sure it has run at least once - } - houseKeeperService.stop(); + TestTxnCommands2.runHouseKeeperService(houseKeeperService, hiveConf); //this should fail because txn aborted due to timeout CommandProcessorResponse cpr = runStatementOnDriverNegative("delete from " + Table.ACIDTBL + " where a = 5"); Assert.assertTrue("Actual: " + cpr.getErrorMessage(), cpr.getErrorMessage().contains("Transaction manager has aborted the transaction txnid:1")); + + //now test that we don't timeout locks we should not + hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 10, TimeUnit.MINUTES); + runStatementOnDriver("start transaction"); + runStatementOnDriver("select count(*) from " + Table.ACIDTBL + " where a = 17"); + TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf); + ShowLocksResponse slr = txnHandler.showLocks(new ShowLocksRequest()); + TestDbTxnManager2.checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", Table.ACIDTBL.name, null, slr.getLocks().get(0)); + TestTxnCommands2.runHouseKeeperService(houseKeeperService, hiveConf); + slr = txnHandler.showLocks(new ShowLocksRequest()); + TestDbTxnManager2.checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", Table.ACIDTBL.name, null, slr.getLocks().get(0)); + Assert.assertEquals("Unexpected lock count", 1, slr.getLocks().size()); + + TestTxnCommands2.runHouseKeeperService(houseKeeperService, hiveConf); + slr = txnHandler.showLocks(new ShowLocksRequest()); + TestDbTxnManager2.checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", Table.ACIDTBL.name, null, slr.getLocks().get(0)); + Assert.assertEquals("Unexpected lock count", 1, slr.getLocks().size()); + + runStatementOnDriver("rollback"); + slr = txnHandler.showLocks(new ShowLocksRequest()); + Assert.assertEquals("Unexpected lock count", 0, slr.getLocks().size()); } /** diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java index 19cde2f..a4d2290 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java @@ -630,8 +630,63 @@ public void checkExpectedLocks() throws Exception { relLocks.add(new DbLockManager.DbHiveLock(locks.get(0).getLockid())); txnMgr.getLockManager().releaseLocks(relLocks); } + /** + * Check to make sure we acquire proper locks for queries involving acid and non-acid tables + * + */ + @Test + public void checkExpectedLocks2() throws Exception { + checkCmdOnDriver(driver.run("drop table if exists tab_acid")); + checkCmdOnDriver(driver.run("drop table if exists tab_not_acid")); + checkCmdOnDriver(driver.run("create table if not exists tab_acid (a int, b int) partitioned by (p string) " + + "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')")); + checkCmdOnDriver(driver.run("create table if not exists tab_not_acid (na int, nb int) partitioned by (np string) " + + "clustered by (na) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='false')")); + checkCmdOnDriver(driver.run("insert into tab_acid partition(p) (a,b,p) values(1,2,'foo'),(3,4,'bar')")); + checkCmdOnDriver(driver.run("insert into tab_not_acid partition(np) (na,nb,np) values(1,2,'blah'),(3,4,'doh')")); + txnMgr.openTxn("T1"); + checkCmdOnDriver(driver.compileAndRespond("select * from tab_acid inner join tab_not_acid on a = na")); + txnMgr.acquireLocks(driver.getPlan(), ctx, "T1"); + List locks = getLocks(txnMgr); + Assert.assertEquals("Unexpected lock count", 6, locks.size()); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_not_acid", null, locks.get(3)); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_not_acid", "np=blah", locks.get(5)); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_not_acid", "np=doh", locks.get(4)); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_acid", "p=foo", locks.get(2)); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_acid", null, locks.get(0)); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_acid", "p=bar", locks.get(1)); + + //jdk 7 + //checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_not_acid", null, locks.get(0)); + //checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_not_acid", "np=blah", locks.get(1)); + //checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_not_acid", "np=doh", locks.get(2)); + //checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_acid", "p=foo", locks.get(3)); + //checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_acid", null, locks.get(4)); + //checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_acid", "p=bar", locks.get(5)); + HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); + txnMgr2.openTxn("T2"); + checkCmdOnDriver(driver.compileAndRespond("insert into tab_not_acid partition(np='doh') values(5,6)")); + LockState ls = ((DbTxnManager)txnMgr2).acquireLocks(driver.getPlan(), ctx, "T2", false); + locks = getLocks(txnMgr2); + Assert.assertEquals("Unexpected lock count", 7, locks.size()); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_not_acid", null, locks.get(3)); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_not_acid", "np=blah", locks.get(5)); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_not_acid", "np=doh", locks.get(4)); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_acid", "p=foo", locks.get(2)); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_acid", null, locks.get(0)); + checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_acid", "p=bar", locks.get(1)); + + //jdk 7 + //checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_not_acid", null, locks.get(0)); + //checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_not_acid", "np=blah", locks.get(1)); + //checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_not_acid", "np=doh", locks.get(2)); + //checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_acid", "p=foo", locks.get(3)); + //checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_acid", null, locks.get(4)); + //checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "tab_acid", "p=bar", locks.get(5)); + //checkLock(LockType.EXCLUSIVE, LockState.WAITING, "default", "tab_not_acid", "np=doh", locks.get(6)); + } - private void checkLock(LockType expectedType, LockState expectedState, String expectedDb, String expectedTable, String expectedPartition, ShowLocksResponseElement actual) { + public static void checkLock(LockType expectedType, LockState expectedState, String expectedDb, String expectedTable, String expectedPartition, ShowLocksResponseElement actual) { Assert.assertEquals(actual.toString(), expectedType, actual.getType()); Assert.assertEquals(actual.toString(), expectedState,actual.getState()); Assert.assertEquals(actual.toString(), normalizeCase(expectedDb), normalizeCase(actual.getDbname())); @@ -738,7 +793,7 @@ public void testShowLocksFilterOptions() throws Exception { private void checkCmdOnDriver(CommandProcessorResponse cpr) { Assert.assertTrue(cpr.toString(), cpr.getResponseCode() == 0); } - private String normalizeCase(String s) { + private static String normalizeCase(String s) { return s == null ? null : s.toLowerCase(); } private List getLocks() throws Exception { @@ -1332,8 +1387,14 @@ public void testCompletedTxnComponents() throws Exception { Assert.assertEquals(TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), 1, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=1 and ctc_table='tab1'")); } + + /** + * ToDo: multi-insert into txn table and non-tx table should be prevented + */ @Test public void testMultiInsert() throws Exception { + checkCmdOnDriver(driver.run("drop table if exists tab1")); + checkCmdOnDriver(driver.run("drop table if exists tab_not_acid")); CommandProcessorResponse cpr = driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " + "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); checkCmdOnDriver(cpr);