diff --git hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java index 1e73a4b9ba..005fb67f16 100644 --- hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java +++ hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java @@ -210,8 +210,8 @@ public TestStreaming() throws Exception { //1) Start from a clean slate (metastore) - TxnDbUtil.cleanDb(); - TxnDbUtil.prepDb(); + TxnDbUtil.cleanDb(conf); + TxnDbUtil.prepDb(conf); //2) obtain metastore clients msClient = new HiveMetaStoreClient(conf); diff --git hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingTestUtils.java hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingTestUtils.java index 87a72b5625..63690f9a24 100644 --- hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingTestUtils.java +++ hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingTestUtils.java @@ -66,8 +66,8 @@ public HiveConf newHiveConf(String metaStoreUri) { public void prepareTransactionDatabase(HiveConf conf) throws Exception { TxnDbUtil.setConfValues(conf); - TxnDbUtil.cleanDb(); - TxnDbUtil.prepDb(); + TxnDbUtil.cleanDb(conf); + TxnDbUtil.prepDb(conf); } public IMetaStoreClient newMetaStoreClient(HiveConf conf) throws Exception { diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java index 1002be7d8a..a19cc86744 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java @@ -259,12 +259,12 @@ public void stringifyValidTxns() throws Exception { @Before public void setUp() throws Exception { - TxnDbUtil.prepDb(); + TxnDbUtil.prepDb(conf); client = new HiveMetaStoreClient(conf); } @After public void tearDown() throws Exception { - TxnDbUtil.cleanDb(); + TxnDbUtil.cleanDb(conf); } } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java index 8b4b21fc48..5642f067b0 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java @@ -85,7 +85,6 @@ public String toString() { @Before public void setUp() throws Exception { - tearDown(); hiveConf = new HiveConf(this.getClass()); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); @@ -96,7 +95,7 @@ public void setUp() throws Exception { .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); TxnDbUtil.setConfValues(hiveConf); - TxnDbUtil.prepDb(); + TxnDbUtil.prepDb(hiveConf); File f = new File(TEST_WAREHOUSE_DIR); if (f.exists()) { FileUtil.fullyDelete(f); @@ -141,7 +140,7 @@ public void tearDown() throws Exception { d.close(); d = null; } - TxnDbUtil.cleanDb(); + TxnDbUtil.cleanDb(hiveConf); } finally { FileUtils.deleteDirectory(new File(TEST_DATA_DIR)); } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java index aea1dfc6f4..707bcd10b7 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java @@ -118,8 +118,8 @@ public void setup() throws Exception { //"org.apache.hadoop.hive.ql.io.HiveInputFormat" TxnDbUtil.setConfValues(hiveConf); - TxnDbUtil.cleanDb(); - TxnDbUtil.prepDb(); + TxnDbUtil.cleanDb(hiveConf); + TxnDbUtil.prepDb(hiveConf); conf = hiveConf; msClient = new HiveMetaStoreClient(conf); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index d5de4f2c7b..a4ec53637e 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -131,6 +131,7 @@ import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager; import org.apache.hadoop.hive.metastore.security.TUGIContainingTransport; +import org.apache.hadoop.hive.metastore.txn.AcidOpenTxnsCounterService; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.serde2.Deserializer; @@ -7583,6 +7584,7 @@ public void run() { + e.getMessage(), e); } } + ThreadPool.shutdown(); } }); @@ -7960,6 +7962,14 @@ private static void startHouseKeeperService(HiveConf conf) throws Exception { startHouseKeeperService(conf, Class.forName("org.apache.hadoop.hive.ql.txn.AcidHouseKeeperService")); startHouseKeeperService(conf, Class.forName("org.apache.hadoop.hive.ql.txn.AcidCompactionHistoryService")); startHouseKeeperService(conf, Class.forName("org.apache.hadoop.hive.ql.txn.AcidWriteSetService")); + + ThreadPool.initialize(conf); + RunnableConfigurable rc = new AcidOpenTxnsCounterService(); + rc.setConf(conf); + ThreadPool.getPool().scheduleAtFixedRate(rc, 100, MetastoreConf.getTimeVar(conf, + MetastoreConf.ConfVars.COUNT_OPEN_TXNS_INTERVAL, TimeUnit.MILLISECONDS), + TimeUnit.MILLISECONDS); + } private static void startHouseKeeperService(HiveConf conf, Class c) throws Exception { //todo: when metastore adds orderly-shutdown logic, houseKeeper.stop() diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/AcidOpenTxnsCounterService.java ql/src/java/org/apache/hadoop/hive/ql/txn/AcidOpenTxnsCounterService.java deleted file mode 100644 index 08fcff4b03..0000000000 --- ql/src/java/org/apache/hadoop/hive/ql/txn/AcidOpenTxnsCounterService.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.txn; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.txn.TxnStore; -import org.apache.hadoop.hive.metastore.txn.TxnUtils; -import org.apache.hadoop.hive.ql.txn.compactor.HouseKeeperServiceBase; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -/** - * Background running thread, periodically updating number of open transactions. - * Runs inside Hive Metastore Service. - */ -public class AcidOpenTxnsCounterService extends HouseKeeperServiceBase { - private static final Logger LOG = LoggerFactory.getLogger(AcidOpenTxnsCounterService.class); - @Override - protected long getStartDelayMs() { - return 100; // in miliseconds - } - @Override - protected long getIntervalMs() { - return hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_COUNT_OPEN_TXNS_INTERVAL, TimeUnit.MILLISECONDS); - } - @Override - protected Runnable getScheduedAction(HiveConf hiveConf, AtomicInteger isAliveCounter) { - return new OpenTxnsCounter(hiveConf, isAliveCounter); - } - @Override - public String getServiceDescription() { - return "Count number of open transactions"; - } - private static final class OpenTxnsCounter implements Runnable { - private static volatile long lastLogTime = 0; - private final TxnStore txnHandler; - private final AtomicInteger isAliveCounter; - private OpenTxnsCounter(HiveConf hiveConf, AtomicInteger isAliveCounter) { - txnHandler = TxnUtils.getTxnStore(hiveConf); - this.isAliveCounter = isAliveCounter; - } - @Override - public void run() { - try { - long startTime = System.currentTimeMillis(); - txnHandler.countOpenTxns(); - int count = isAliveCounter.incrementAndGet(); - if(System.currentTimeMillis() - lastLogTime > 60*1000) { - //don't flood the logs with too many msgs - LOG.info("OpenTxnsCounter ran for " + (System.currentTimeMillis() - startTime) / 1000 + - "seconds. isAliveCounter=" + count); - lastLogTime = System.currentTimeMillis(); - } - } - catch(Throwable t) { - LOG.error("Serious error in {}", Thread.currentThread().getName(), ": {}" + t.getMessage(), t); - } - } - } -} \ No newline at end of file diff --git ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java index f8ae86bea3..96005b4388 100644 --- ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java +++ ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java @@ -450,13 +450,13 @@ public void addDynamicPartitions() throws Exception { @Before public void setUp() throws Exception { - TxnDbUtil.prepDb(); + TxnDbUtil.prepDb(conf); txnHandler = TxnUtils.getTxnStore(conf); } @After public void tearDown() throws Exception { - TxnDbUtil.cleanDb(); + TxnDbUtil.cleanDb(conf); } private long openTxn() throws MetaException { diff --git ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java index 23efce0d19..6f607bb25f 100644 --- ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java +++ ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java @@ -1543,13 +1543,13 @@ private void updateLocks(Connection conn) throws SQLException { @Before public void setUp() throws Exception { - TxnDbUtil.prepDb(); + TxnDbUtil.prepDb(conf); txnHandler = TxnUtils.getTxnStore(conf); } @After public void tearDown() throws Exception { - TxnDbUtil.cleanDb(); + TxnDbUtil.cleanDb(conf); } private long openTxn() throws MetaException { diff --git ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNoConnectionPool.java ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNoConnectionPool.java index e5f4ddea54..a9cf3df73d 100644 --- ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNoConnectionPool.java +++ ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNoConnectionPool.java @@ -50,18 +50,18 @@ public void setUp() throws Exception { conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, "None"); TxnDbUtil.setConfValues(conf); try { - TxnDbUtil.prepDb(); + TxnDbUtil.prepDb(conf); } catch (SQLException e) { // Usually this means we've already created the tables, so clean them and then try again tearDown(); - TxnDbUtil.prepDb(); + TxnDbUtil.prepDb(conf); } txnHandler = TxnUtils.getTxnStore(conf); } @After public void tearDown() throws Exception { - TxnDbUtil.cleanDb(); + TxnDbUtil.cleanDb(conf); } @Test diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java index 4f1c7d8b1e..2e53155ad7 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java @@ -71,25 +71,6 @@ String getTestDataDir() { return TEST_DATA_DIR; } - private void dropTables() throws Exception { - for(Table t : Table.values()) { - runStatementOnDriver("drop table if exists " + t); - } - } - @After - public void tearDown() throws Exception { - try { - if (d != null) { - dropTables(); - d.destroy(); - d.close(); - d = null; - } - } finally { - TxnDbUtil.cleanDb(); - FileUtils.deleteDirectory(new File(TEST_DATA_DIR)); - } - } @Test//todo: what is this for? public void testInsertOverwrite() throws Exception { runStatementOnDriver("insert overwrite table " + Table.NONACIDORCTBL + " select a,b from " + Table.NONACIDORCTBL2); @@ -388,7 +369,7 @@ public void testTimeOutReaper() throws Exception { Assert.assertNotNull(txnInfo); Assert.assertEquals(14, txnInfo.getId()); Assert.assertEquals(TxnState.OPEN, txnInfo.getState()); - String s =TxnDbUtil.queryToString("select TXN_STARTED, TXN_LAST_HEARTBEAT from TXNS where TXN_ID = " + txnInfo.getId(), false); + String s =TxnDbUtil.queryToString(hiveConf, "select TXN_STARTED, TXN_LAST_HEARTBEAT from TXNS where TXN_ID = " + txnInfo.getId(), false); String[] vals = s.split("\\s+"); Assert.assertEquals("Didn't get expected timestamps", 2, vals.length); long lastHeartbeat = Long.parseLong(vals[1]); @@ -412,7 +393,7 @@ public void testTimeOutReaper() throws Exception { TestDbTxnManager2.checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", Table.ACIDTBL.name, null, slr.getLocks()); //should've done several heartbeats - s =TxnDbUtil.queryToString("select TXN_STARTED, TXN_LAST_HEARTBEAT from TXNS where TXN_ID = " + txnInfo.getId(), false); + s =TxnDbUtil.queryToString(hiveConf, "select TXN_STARTED, TXN_LAST_HEARTBEAT from TXNS where TXN_ID = " + txnInfo.getId(), false); vals = s.split("\\s+"); Assert.assertEquals("Didn't get expected timestamps", 2, vals.length); Assert.assertTrue("Heartbeat didn't progress: (old,new) (" + lastHeartbeat + "," + vals[1]+ ")", diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java index d08371d89c..f620283614 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java @@ -55,7 +55,7 @@ import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.txn.AcidCompactionHistoryService; -import org.apache.hadoop.hive.ql.txn.AcidOpenTxnsCounterService; +import org.apache.hadoop.hive.metastore.txn.AcidOpenTxnsCounterService; import org.apache.hadoop.hive.ql.txn.compactor.Cleaner; import org.apache.hadoop.hive.ql.txn.compactor.Initiator; import org.apache.hadoop.hive.ql.txn.compactor.Worker; @@ -70,6 +70,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +/** + * TODO: this should be merged with TestTxnCommands once that is checked in + * specifically the tests; the supporting code here is just a clone of TestTxnCommands + */ public class TestTxnCommands2 { static final private Logger LOG = LoggerFactory.getLogger(TestTxnCommands2.class); protected static final String TEST_DATA_DIR = new File(System.getProperty("java.io.tmpdir") + @@ -118,7 +122,6 @@ public void setUp() throws Exception { } protected void setUpWithTableProperties(String tableProperties) throws Exception { - tearDown(); hiveConf = new HiveConf(this.getClass()); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); @@ -131,7 +134,7 @@ protected void setUpWithTableProperties(String tableProperties) throws Exception hiveConf.setBoolVar(HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK, true); TxnDbUtil.setConfValues(hiveConf); - TxnDbUtil.prepDb(); + TxnDbUtil.prepDb(hiveConf); File f = new File(TEST_WAREHOUSE_DIR); if (f.exists()) { FileUtil.fullyDelete(f); @@ -168,7 +171,7 @@ public void tearDown() throws Exception { d.close(); d = null; } - TxnDbUtil.cleanDb(); + TxnDbUtil.cleanDb(hiveConf); } finally { FileUtils.deleteDirectory(new File(TEST_DATA_DIR)); } @@ -1284,7 +1287,8 @@ public void testOpenTxnsCounter() throws Exception { OpenTxnsResponse openTxnsResponse = txnHandler.openTxns(new OpenTxnRequest(3, "me", "localhost")); AcidOpenTxnsCounterService openTxnsCounterService = new AcidOpenTxnsCounterService(); - runHouseKeeperService(openTxnsCounterService, hiveConf); // will update current number of open txns to 3 + openTxnsCounterService.setConf(hiveConf); + openTxnsCounterService.run(); // will update current number of open txns to 3 MetaException exception = null; // This should fail once it finds out the threshold has been reached @@ -1301,7 +1305,7 @@ public void testOpenTxnsCounter() throws Exception { for (long txnid : openTxnsResponse.getTxn_ids()) { txnHandler.commitTxn(new CommitTxnRequest(txnid)); } - runHouseKeeperService(openTxnsCounterService, hiveConf); // will update current number of open txns back to 0 + openTxnsCounterService.run(); // will update current number of open txns back to 0 exception = null; try { txnHandler.openTxns(new OpenTxnRequest(1, "him", "localhost")); diff --git ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java index ad2aac5f56..e0344c533a 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java +++ ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java @@ -53,7 +53,6 @@ public void setUp() throws Exception { setUpInternal(); } void setUpInternal() throws Exception { - tearDown(); hiveConf = new HiveConf(this.getClass()); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); @@ -65,7 +64,7 @@ void setUpInternal() throws Exception { "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); hiveConf.setBoolVar(HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK, true); TxnDbUtil.setConfValues(hiveConf); - TxnDbUtil.prepDb(); + TxnDbUtil.prepDb(hiveConf); File f = new File(getWarehouseDir()); if (f.exists()) { FileUtil.fullyDelete(f); @@ -99,7 +98,7 @@ public void tearDown() throws Exception { d = null; } } finally { - TxnDbUtil.cleanDb(); + TxnDbUtil.cleanDb(hiveConf); FileUtils.deleteDirectory(new File(getTestDataDir())); } } diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java index 14ff58eb2b..e46e65be42 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java @@ -87,7 +87,7 @@ public void testSingleReadTable() throws Exception { List locks = ctx.getHiveLocks(); Assert.assertEquals(1, locks.size()); Assert.assertEquals(1, - TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId)); + TxnDbUtil.countLockComponents(conf, ((DbLockManager.DbHiveLock) locks.get(0)).lockId)); txnMgr.commitTxn(); locks = txnMgr.getLockManager().getLocks(false, false); Assert.assertEquals(0, locks.size()); @@ -102,7 +102,7 @@ public void testSingleReadPartition() throws Exception { List locks = ctx.getHiveLocks(); Assert.assertEquals(1, locks.size()); Assert.assertEquals(1, - TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId)); + TxnDbUtil.countLockComponents(conf, ((DbLockManager.DbHiveLock) locks.get(0)).lockId)); txnMgr.commitTxn(); locks = txnMgr.getLockManager().getLocks(false, false); Assert.assertEquals(0, locks.size()); @@ -121,7 +121,7 @@ public void testSingleReadMultiPartition() throws Exception { List locks = ctx.getHiveLocks(); Assert.assertEquals(1, locks.size()); Assert.assertEquals(3, - TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId)); + TxnDbUtil.countLockComponents(conf, ((DbLockManager.DbHiveLock) locks.get(0)).lockId)); txnMgr.commitTxn(); locks = txnMgr.getLockManager().getLocks(false, false); Assert.assertEquals(0, locks.size()); @@ -140,7 +140,7 @@ public void testJoin() throws Exception { List locks = ctx.getHiveLocks(); Assert.assertEquals(1, locks.size()); Assert.assertEquals(4, - TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId)); + TxnDbUtil.countLockComponents(conf, ((DbLockManager.DbHiveLock) locks.get(0)).lockId)); txnMgr.commitTxn(); locks = txnMgr.getLockManager().getLocks(false, false); Assert.assertEquals(0, locks.size()); @@ -155,7 +155,7 @@ public void testSingleWriteTable() throws Exception { List locks = ctx.getHiveLocks(); Assert.assertEquals(1, locks.size()); Assert.assertEquals(1, - TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId)); + TxnDbUtil.countLockComponents(conf, ((DbLockManager.DbHiveLock) locks.get(0)).lockId)); txnMgr.commitTxn(); locks = txnMgr.getLockManager().getLocks(false, false); Assert.assertEquals(0, locks.size()); @@ -171,7 +171,7 @@ public void testSingleWritePartition() throws Exception { List locks = ctx.getHiveLocks(); Assert.assertEquals(1, locks.size()); Assert.assertEquals(1, - TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId)); + TxnDbUtil.countLockComponents(conf, ((DbLockManager.DbHiveLock) locks.get(0)).lockId)); txnMgr.commitTxn(); locks = txnMgr.getLockManager().getLocks(false, false); Assert.assertEquals(0, locks.size()); @@ -307,7 +307,7 @@ public void testReadWrite() throws Exception { List locks = ctx.getHiveLocks(); Assert.assertEquals(1, locks.size()); Assert.assertEquals(4, - TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId)); + TxnDbUtil.countLockComponents(conf, ((DbLockManager.DbHiveLock) locks.get(0)).lockId)); txnMgr.commitTxn(); locks = txnMgr.getLockManager().getLocks(false, false); Assert.assertEquals(0, locks.size()); @@ -322,7 +322,7 @@ public void testUpdate() throws Exception { List locks = ctx.getHiveLocks(); Assert.assertEquals(1, locks.size()); Assert.assertEquals(1, - TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId)); + TxnDbUtil.countLockComponents(conf, ((DbLockManager.DbHiveLock) locks.get(0)).lockId)); txnMgr.commitTxn(); locks = txnMgr.getLockManager().getLocks(false, false); Assert.assertEquals(0, locks.size()); @@ -337,7 +337,7 @@ public void testDelete() throws Exception { List locks = ctx.getHiveLocks(); Assert.assertEquals(1, locks.size()); Assert.assertEquals(1, - TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId)); + TxnDbUtil.countLockComponents(conf, ((DbLockManager.DbHiveLock) locks.get(0)).lockId)); txnMgr.commitTxn(); locks = txnMgr.getLockManager().getLocks(false, false); Assert.assertEquals(0, locks.size()); @@ -352,7 +352,7 @@ public void testRollback() throws Exception { List locks = ctx.getHiveLocks(); Assert.assertEquals(1, locks.size()); Assert.assertEquals(1, - TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId)); + TxnDbUtil.countLockComponents(conf, ((DbLockManager.DbHiveLock) locks.get(0)).lockId)); txnMgr.rollbackTxn(); locks = txnMgr.getLockManager().getLocks(false, false); Assert.assertEquals(0, locks.size()); @@ -367,7 +367,7 @@ public void testDDLExclusive() throws Exception { List locks = ctx.getHiveLocks(); Assert.assertEquals(1, locks.size()); Assert.assertEquals(1, - TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId)); + TxnDbUtil.countLockComponents(conf, ((DbLockManager.DbHiveLock) locks.get(0)).lockId)); txnMgr.rollbackTxn(); locks = txnMgr.getLockManager().getLocks(false, false); Assert.assertEquals(0, locks.size()); @@ -382,7 +382,7 @@ public void testDDLShared() throws Exception { List locks = ctx.getHiveLocks(); Assert.assertEquals(1, locks.size()); Assert.assertEquals(1, - TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId)); + TxnDbUtil.countLockComponents(conf, ((DbLockManager.DbHiveLock) locks.get(0)).lockId)); txnMgr.commitTxn(); locks = txnMgr.getLockManager().getLocks(false, false); Assert.assertEquals(0, locks.size()); @@ -481,7 +481,7 @@ public void testHeartbeater() throws Exception { @Before public void setUp() throws Exception { - TxnDbUtil.prepDb(); + TxnDbUtil.prepDb(conf); txnMgr = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf); txnMgr.getLockManager();//init lock manager Assert.assertTrue(txnMgr instanceof DbTxnManager); @@ -497,7 +497,7 @@ public void setUp() throws Exception { public void tearDown() throws Exception { if(houseKeeperService != null) houseKeeperService.stop(); if (txnMgr != null) txnMgr.closeTxnManager(); - TxnDbUtil.cleanDb(); + TxnDbUtil.cleanDb(conf); } private static class MockQueryPlan extends QueryPlan { diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java index 6d1cdcb433..e9833cb5e9 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java @@ -104,8 +104,8 @@ public void setUp() throws Exception { ctx = new Context(conf); driver = new Driver(conf); driver.init(); - TxnDbUtil.cleanDb(); - TxnDbUtil.prepDb(); + TxnDbUtil.cleanDb(conf); + TxnDbUtil.prepDb(conf); SessionState ss = SessionState.get(); ss.initTxnMgr(conf); txnMgr = ss.getTxnMgr(); @@ -445,9 +445,9 @@ public void testMetastoreTablesCleanup() throws Exception { checkCmdOnDriver(cpr); cpr = driver.run("insert into temp.T13p partition (ds='tomorrow', hour='2') values (8, 8)"); checkCmdOnDriver(cpr); - int count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t10', 't11')"); + int count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t10', 't11')"); Assert.assertEquals(4, count); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t12p', 't13p')"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t12p', 't13p')"); Assert.assertEquals(4, count); // Fail some inserts, so that we have records in TXN_COMPONENTS @@ -460,132 +460,132 @@ public void testMetastoreTablesCleanup() throws Exception { checkCmdOnDriver(cpr); cpr = driver.run("insert into temp.T13p partition (ds='today', hour='1') values (12, 12)"); checkCmdOnDriver(cpr); - count = TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE in ('t10', 't11', 't12p', 't13p')"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(4, count); conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); // Drop a table/partition; corresponding records in TXN_COMPONENTS and COMPLETED_TXN_COMPONENTS should disappear - count = TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE='t10'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE='t10'"); Assert.assertEquals(1, count); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t10'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t10'"); Assert.assertEquals(2, count); cpr = driver.run("drop table temp.T10"); checkCmdOnDriver(cpr); - count = TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE='t10'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE='t10'"); Assert.assertEquals(0, count); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t10'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t10'"); Assert.assertEquals(0, count); - count = TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE='t12p' and TC_PARTITION='ds=today/hour=1'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE='t12p' and TC_PARTITION='ds=today/hour=1'"); Assert.assertEquals(1, count); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t12p' and CTC_PARTITION='ds=today/hour=1'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t12p' and CTC_PARTITION='ds=today/hour=1'"); Assert.assertEquals(1, count); cpr = driver.run("alter table temp.T12p drop partition (ds='today', hour='1')"); checkCmdOnDriver(cpr); - count = TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE='t12p' and TC_PARTITION='ds=today/hour=1'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE='t12p' and TC_PARTITION='ds=today/hour=1'"); Assert.assertEquals(0, count); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t12p' and CTC_PARTITION='ds=today/hour=1'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t12p' and CTC_PARTITION='ds=today/hour=1'"); Assert.assertEquals(0, count); // Successfully perform compaction on a table/partition, so that we have successful records in COMPLETED_COMPACTIONS cpr = driver.run("alter table temp.T11 compact 'minor'"); checkCmdOnDriver(cpr); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='i'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='i'"); Assert.assertEquals(1, count); org.apache.hadoop.hive.ql.TestTxnCommands2.runWorker(conf); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='r' and CQ_TYPE='i'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='r' and CQ_TYPE='i'"); Assert.assertEquals(1, count); org.apache.hadoop.hive.ql.TestTxnCommands2.runCleaner(conf); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11'"); Assert.assertEquals(0, count); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t11' and CC_STATE='s' and CC_TYPE='i'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t11' and CC_STATE='s' and CC_TYPE='i'"); Assert.assertEquals(1, count); cpr = driver.run("alter table temp.T12p partition (ds='tomorrow', hour='2') compact 'minor'"); checkCmdOnDriver(cpr); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='i'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='i'"); Assert.assertEquals(1, count); org.apache.hadoop.hive.ql.TestTxnCommands2.runWorker(conf); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='r' and CQ_TYPE='i'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='r' and CQ_TYPE='i'"); Assert.assertEquals(1, count); org.apache.hadoop.hive.ql.TestTxnCommands2.runCleaner(conf); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p'"); Assert.assertEquals(0, count); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t12p' and CC_STATE='s' and CC_TYPE='i'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t12p' and CC_STATE='s' and CC_TYPE='i'"); Assert.assertEquals(1, count); // Fail compaction, so that we have failed records in COMPLETED_COMPACTIONS conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true); cpr = driver.run("alter table temp.T11 compact 'major'"); checkCmdOnDriver(cpr); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='a'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='a'"); Assert.assertEquals(1, count); org.apache.hadoop.hive.ql.TestTxnCommands2.runWorker(conf); // will fail - count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='a'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='a'"); Assert.assertEquals(0, count); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t11' and CC_STATE='f' and CC_TYPE='a'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t11' and CC_STATE='f' and CC_TYPE='a'"); Assert.assertEquals(1, count); cpr = driver.run("alter table temp.T12p partition (ds='tomorrow', hour='2') compact 'major'"); checkCmdOnDriver(cpr); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='a'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='a'"); Assert.assertEquals(1, count); org.apache.hadoop.hive.ql.TestTxnCommands2.runWorker(conf); // will fail - count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='a'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='a'"); Assert.assertEquals(0, count); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t12p' and CC_STATE='f' and CC_TYPE='a'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t12p' and CC_STATE='f' and CC_TYPE='a'"); Assert.assertEquals(1, count); conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, false); // Put 2 records into COMPACTION_QUEUE and do nothing cpr = driver.run("alter table temp.T11 compact 'major'"); checkCmdOnDriver(cpr); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='a'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='a'"); Assert.assertEquals(1, count); cpr = driver.run("alter table temp.T12p partition (ds='tomorrow', hour='2') compact 'major'"); checkCmdOnDriver(cpr); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='a'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='a'"); Assert.assertEquals(1, count); // Drop a table/partition, corresponding records in COMPACTION_QUEUE and COMPLETED_COMPACTIONS should disappear cpr = driver.run("drop table temp.T11"); checkCmdOnDriver(cpr); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11'"); Assert.assertEquals(0, count); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t11'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t11'"); Assert.assertEquals(0, count); cpr = driver.run("alter table temp.T12p drop partition (ds='tomorrow', hour='2')"); checkCmdOnDriver(cpr); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p'"); Assert.assertEquals(0, count); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t12p'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t12p'"); Assert.assertEquals(0, count); // Put 1 record into COMPACTION_QUEUE and do nothing cpr = driver.run("alter table temp.T13p partition (ds='today', hour='1') compact 'major'"); checkCmdOnDriver(cpr); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t13p' and CQ_STATE='i' and CQ_TYPE='a'"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t13p' and CQ_STATE='i' and CQ_TYPE='a'"); Assert.assertEquals(1, count); // Drop database, everything in all 4 meta tables should disappear - count = TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE in ('t10', 't11', 't12p', 't13p')"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(1, count); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t10', 't11', 't12p', 't13p')"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(2, count); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE in ('t10', 't11', 't12p', 't13p')"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(1, count); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE in ('t10', 't11', 't12p', 't13p')"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(0, count); cpr = driver.run("drop database if exists temp cascade"); checkCmdOnDriver(cpr); - count = TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE in ('t10', 't11', 't12p', 't13p')"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(0, count); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t10', 't11', 't12p', 't13p')"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(0, count); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE in ('t10', 't11', 't12p', 't13p')"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(0, count); - count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE in ('t10', 't11', 't12p', 't13p')"); + count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(0, count); } @@ -959,7 +959,7 @@ public void testWriteSetTracking3() throws Exception { @Test public void testWriteSetTracking4() throws Exception { dropTable(new String[] {"TAB_PART", "TAB2"}); - Assert.assertEquals(0, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET")); + Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); CommandProcessorResponse cpr = driver.run("create table if not exists TAB_PART (a int, b int) " + "partitioned by (p string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); checkCmdOnDriver(cpr); @@ -986,14 +986,14 @@ public void testWriteSetTracking4() throws Exception { checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB_PART", null, locks); checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB2", null, locks); //update stmt has p=blah, thus nothing is actually update and we generate empty dyn part list - Assert.assertEquals(0, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET")); + Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), "default", "tab2", Collections.EMPTY_LIST); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); txnMgr2.commitTxn(); //Short Running updated nothing, so we expect 0 rows in WRITE_SET - Assert.assertEquals( 0, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET")); + Assert.assertEquals( 0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); txnMgr2.openTxn(ctx, "T3"); checkCmdOnDriver(driver.compileAndRespond("update TAB2 set b = 7 where p = 'two'"));//pretend this partition exists @@ -1003,19 +1003,19 @@ public void testWriteSetTracking4() throws Exception { checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB_PART", null, locks); checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB2", null, locks);//since TAB2 is empty //update stmt has p=blah, thus nothing is actually update and we generate empty dyn part list - Assert.assertEquals(0, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET")); + Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), "default", "tab2", Collections.singletonList("p=two")); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp);//simulate partition update txnMgr2.commitTxn(); - Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET")); + Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); AcidWriteSetService houseKeeper = new AcidWriteSetService(); TestTxnCommands2.runHouseKeeperService(houseKeeper, conf); //since T3 overlaps with Long Running (still open) GC does nothing - Assert.assertEquals(1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET")); + Assert.assertEquals(1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); checkCmdOnDriver(driver.compileAndRespond("update TAB2 set b = 17 where a = 1"));//no rows match txnMgr.acquireLocks(driver.getPlan(), ctx, "Long Running"); //so generate empty Dyn Part call @@ -1028,7 +1028,7 @@ public void testWriteSetTracking4() throws Exception { locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 0, locks.size()); TestTxnCommands2.runHouseKeeperService(houseKeeper, conf); - Assert.assertEquals(0, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET")); + Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); } /** * overlapping txns updating the same resource but 1st one rolls back; 2nd commits @@ -1037,7 +1037,7 @@ public void testWriteSetTracking4() throws Exception { @Test public void testWriteSetTracking5() throws Exception { dropTable(new String[] {"TAB_PART"}); - Assert.assertEquals(0, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET")); + Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); CommandProcessorResponse cpr = driver.run("create table if not exists TAB_PART (a int, b int) " + "partitioned by (p string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); checkCmdOnDriver(cpr); @@ -1063,9 +1063,9 @@ public void testWriteSetTracking5() throws Exception { Arrays.asList("p=blah")); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); - Assert.assertEquals(0, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET")); + Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); txnMgr2.commitTxn();//since conflicting txn rolled back, commit succeeds - Assert.assertEquals(1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET")); + Assert.assertEquals(1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); } /** * check that read query concurrent with txn works ok @@ -1073,7 +1073,7 @@ public void testWriteSetTracking5() throws Exception { @Test public void testWriteSetTracking6() throws Exception { dropTable(new String[] {"TAB2"}); - Assert.assertEquals(0, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET")); + Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); CommandProcessorResponse cpr = driver.run("create table if not exists TAB2(a int, b int) clustered " + "by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); checkCmdOnDriver(cpr); @@ -1086,19 +1086,19 @@ public void testWriteSetTracking6() throws Exception { swapTxnManager(txnMgr2); checkCmdOnDriver(driver.compileAndRespond("update TAB2 set b = 17 where a = 101")); txnMgr2.acquireLocks(driver.getPlan(), ctx, "Horton"); - Assert.assertEquals(0, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET")); + Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 2, locks.size()); checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB2", null, locks); checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB2", null, locks); txnMgr2.commitTxn();//no conflict - Assert.assertEquals(1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET")); + Assert.assertEquals(1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 1, locks.size()); checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB2", null, locks); txnMgr.commitTxn(); TestTxnCommands2.runHouseKeeperService(new AcidWriteSetService(), conf); - Assert.assertEquals(0, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET")); + Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); } /** @@ -1108,7 +1108,7 @@ public void testWriteSetTracking6() throws Exception { @Test public void testWriteSetTracking7() throws Exception { dropTable(new String[] {"tab2", "TAB2"}); - Assert.assertEquals(0, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET")); + Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET")); CommandProcessorResponse cpr = driver.run("create table if not exists tab2 (a int, b int) " + "partitioned by (p string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); checkCmdOnDriver(cpr); @@ -1150,13 +1150,13 @@ public void testWriteSetTracking7() throws Exception { txnMgr.commitTxn();//txnid:idTxnUpdate2 //now both txns concurrently updated TAB2 but different partitions. - Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u'")); - Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='u'")); + Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u'")); + Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='u'")); //2 from txnid:1, 1 from txnid:2, 1 from txnid:3 - Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), - 4, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab2' and ctc_partition is not null")); + Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), + 4, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab2' and ctc_partition is not null")); //================ //test with predicates such that partition pruning doesn't kick in @@ -1204,13 +1204,13 @@ public void testWriteSetTracking7() throws Exception { txnHandler.addDynamicPartitions(adp); txnMgr.commitTxn();//txnid:idTxnUpdate4 - Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u' and ws_table='tab1'")); - Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='u' and ws_table='tab1'")); + Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u' and ws_table='tab1'")); + Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='u' and ws_table='tab1'")); //2 from insert + 1 for each update stmt - Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), - 4, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); + Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), + 4, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); } /** * Concurrent updates with partition pruning predicate and w/o one @@ -1261,12 +1261,12 @@ public void testWriteSetTracking8() throws Exception { txnHandler.addDynamicPartitions(adp); txnMgr.commitTxn();//txnid:idTxnUpdate2 - Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u' and ws_table='tab1'")); - Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='u' and ws_table='tab1'")); - Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), - 4, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); + Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u' and ws_table='tab1'")); + Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='u' and ws_table='tab1'")); + Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), + 4, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); } /** * Concurrent update/delete of different partitions - should pass @@ -1317,18 +1317,18 @@ public void testWriteSetTracking9() throws Exception { txnHandler.addDynamicPartitions(adp); txnMgr.commitTxn();//txnid:idTxnUpdate2 - Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), - 2, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + (idTxnUpdate1 - 1) + " and ctc_table='tab1'")); - Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), - 1, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + idTxnUpdate1 + " and ctc_table='tab1' and ctc_partition='p=one'")); - Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), - 1, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + idTxnDelete1 + " and ctc_table='tab1' and ctc_partition='p=two'")); - Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u' and ws_table='tab1'")); - Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='d' and ws_table='tab1'")); - Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), - 4, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); + Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), + 2, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + (idTxnUpdate1 - 1) + " and ctc_table='tab1'")); + Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + idTxnUpdate1 + " and ctc_table='tab1' and ctc_partition='p=one'")); + Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + idTxnDelete1 + " and ctc_table='tab1' and ctc_partition='p=two'")); + Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u' and ws_table='tab1'")); + Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='d' and ws_table='tab1'")); + Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), + 4, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); } /** * Concurrent update/delete of same partition - should fail to commit @@ -1387,10 +1387,10 @@ public void testWriteSetTracking10() throws Exception { "Aborting [txnid:5,5] due to a write conflict on default/tab1/p=two committed by [txnid:4,5] d/u", exception.getCause().getMessage()); - Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='u' and ws_table='tab1'")); - Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), - 3, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); + Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='u' and ws_table='tab1'")); + Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), + 3, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); } /** * Concurrent delte/detele of same partition - should pass @@ -1448,16 +1448,16 @@ public void testWriteSetTracking11() throws Exception { txnHandler.addDynamicPartitions(adp); txnMgr.commitTxn();//"select * from tab1" txn - Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='d' and ws_table='tab1' and ws_txnid=" + txnIdDelete)); - Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='d' and ws_table='tab1' and ws_txnid=" + txnIdSelect)); - Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='d' and ws_table='tab1' and ws_txnid=" + txnIdDelete)); - Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"), - 1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='d' and ws_table='tab1' and ws_txnid=" + txnIdSelect)); - Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), - 4, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); + Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='d' and ws_table='tab1' and ws_txnid=" + txnIdDelete)); + Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='d' and ws_table='tab1' and ws_txnid=" + txnIdSelect)); + Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='d' and ws_table='tab1' and ws_txnid=" + txnIdDelete)); + Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='d' and ws_table='tab1' and ws_txnid=" + txnIdSelect)); + Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), + 4, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null")); } @Test public void testCompletedTxnComponents() throws Exception { @@ -1470,11 +1470,11 @@ public void testCompletedTxnComponents() throws Exception { checkCmdOnDriver(driver.run("insert into tab_not_acid2 values(1,1),(2,2)")); //writing both acid and non-acid resources in the same txn checkCmdOnDriver(driver.run("from tab_not_acid2 insert into tab1 partition(p='two')(a,b) select a,b insert into tab_not_acid2(a,b) select a,b "));//txnid:1 - Assert.assertEquals(TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), - 1, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS")); + Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS")); //only expect transactional components to be in COMPLETED_TXN_COMPONENTS - Assert.assertEquals(TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), - 1, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=6 and ctc_table='tab1'")); + Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), + 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=6 and ctc_table='tab1'")); } /** @@ -1495,13 +1495,13 @@ public void testMultiInsert() throws Exception { //writing both acid and non-acid resources in the same txn //tab1 write is a dynamic partition insert checkCmdOnDriver(driver.run("from tab_not_acid insert into tab1 partition(p)(a,b,p) select a,b,p insert into tab_not_acid(a,b) select a,b where p='two'"));//txnid:9 - Assert.assertEquals(TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), - 4, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS")); + Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), + 4, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS")); //only expect transactional components to be in COMPLETED_TXN_COMPONENTS - Assert.assertEquals(TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), - 2, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=9")); - Assert.assertEquals(TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), - 2, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=9 and ctc_table='tab1'")); + Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), + 2, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=9")); + Assert.assertEquals(TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), + 2, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=9 and ctc_table='tab1'")); } //todo: Concurrent insert/update of same partition - should pass @@ -1611,9 +1611,9 @@ private void testMerge3Way(boolean cc) throws Exception { Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 0, - TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1)); + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1)); //complete 1st txn AddDynamicPartitions adp = new AddDynamicPartitions(txnId1, "default", "target", Collections.singletonList("p=1/q=3"));//update clause @@ -1629,39 +1629,39 @@ private void testMerge3Way(boolean cc) throws Exception { txnHandler.addDynamicPartitions(adp); Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 1, - TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1 + + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1 + " and tc_operation_type='u'")); Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 2, - TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1 + + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1 + " and tc_operation_type='d'")); Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 3, - TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1 + + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1 + " and tc_operation_type='i'")); txnMgr.commitTxn();//commit T1 Assert.assertEquals( "COMPLETED_TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), 6, - TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + txnId1)); + TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + txnId1)); Assert.assertEquals( "WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString("select * from WRITE_SET"), + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), 1, - TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_txnid=" + txnId1 + + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnId1 + " and ws_operation_type='u'")); Assert.assertEquals( "WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString("select * from WRITE_SET"), + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), 2, - TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_txnid=" + txnId1 + + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnId1 + " and ws_operation_type='d'")); //re-check locks which were in Waiting state - should now be Acquired @@ -1676,9 +1676,9 @@ private void testMerge3Way(boolean cc) throws Exception { Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + - TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 0, - TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId2)); + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId2)); //complete 2nd txn adp = new AddDynamicPartitions(txnId2, "default", "target", Collections.singletonList(cc ? "p=1/q=3" : "p=1/p=2"));//update clause @@ -1694,21 +1694,21 @@ private void testMerge3Way(boolean cc) throws Exception { txnHandler.addDynamicPartitions(adp); Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + - TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 1, - TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId2 + + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId2 + " and tc_operation_type='u'")); Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + - TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 2, - TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId2 + + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId2 + " and tc_operation_type='d'")); Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + - TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 3, - TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId2 + + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId2 + " and tc_operation_type='i'")); LockException expectedException = null; @@ -1725,33 +1725,33 @@ private void testMerge3Way(boolean cc) throws Exception { "committed by [txnid:10,11] u/u", expectedException.getMessage()); Assert.assertEquals( "COMPLETED_TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + - TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), 0, - TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + txnId2)); + TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + txnId2)); Assert.assertEquals( "WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + - TxnDbUtil.queryToString("select * from WRITE_SET"), + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), 0, - TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_txnid=" + txnId2)); + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnId2)); } else { Assert.assertNull("Unexpected exception " + expectedException, expectedException); Assert.assertEquals( "COMPLETED_TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + - TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), 6, - TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + txnId2)); + TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + txnId2)); Assert.assertEquals( "WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + - TxnDbUtil.queryToString("select * from WRITE_SET"), + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), 1, - TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_txnid=" + txnId2 + + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnId2 + " and ws_operation_type='u'")); Assert.assertEquals( "WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " + - TxnDbUtil.queryToString("select * from WRITE_SET"), + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), 2, - TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_txnid=" + txnId2 + + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnId2 + " and ws_operation_type='d'")); } @@ -1790,9 +1790,9 @@ private void testMergeUnpartitioned(boolean causeConflict) throws Exception { txnMgr.acquireLocks(driver.getPlan(), ctx, "T1"); Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid1) + "): " + - TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 1,//no DP, so it's populated from lock info - TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid1)); + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid1)); List locks = getLocks(txnMgr); Assert.assertEquals("Unexpected lock count", 1, locks.size()); @@ -1820,9 +1820,9 @@ private void testMergeUnpartitioned(boolean causeConflict) throws Exception { txnMgr.commitTxn();//commit T1 Assert.assertEquals("WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnid1) + "): " + - TxnDbUtil.queryToString("select * from WRITE_SET"), + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), causeConflict ? 1 : 0,//Inserts are not tracked by WRITE_SET - TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_txnid=" + txnid1 + + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnid1 + " and ws_operation_type=" + (causeConflict ? "'u'" : "'i'"))); @@ -1835,14 +1835,14 @@ private void testMergeUnpartitioned(boolean causeConflict) throws Exception { Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + - TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 1,// - TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2)); + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2)); Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + - TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 1,// - TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2 + + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2 + "and tc_operation_type='d'")); //complete T2 txn @@ -1862,9 +1862,9 @@ private void testMergeUnpartitioned(boolean causeConflict) throws Exception { } else { Assert.assertEquals("WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnid1) + "): " + - TxnDbUtil.queryToString("select * from WRITE_SET"), + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), 1,//Unpartitioned table: 1 row for Delete; Inserts are not tracked in WRITE_SET - TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_txnid=" + txnid2 + + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnid2 + " and ws_operation_type='d'")); } } @@ -1887,15 +1887,15 @@ public void testDynamicPartitionInsert() throws Exception { checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "target", null, locks); Assert.assertEquals( "HIVE_LOCKS mismatch(" + JavaUtils.txnIdToString(txnid1) + "): " + - TxnDbUtil.queryToString("select * from HIVE_LOCKS"), + TxnDbUtil.queryToString(conf, "select * from HIVE_LOCKS"), 1, - TxnDbUtil.countQueryAgent("select count(*) from HIVE_LOCKS where hl_txnid=" + txnid1)); + TxnDbUtil.countQueryAgent(conf, "select count(*) from HIVE_LOCKS where hl_txnid=" + txnid1)); txnMgr.rollbackTxn(); Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid1) + "): " + - TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 0, - TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid1)); + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid1)); //now actually write to table to generate some partitions checkCmdOnDriver(driver.run("insert into target partition(p=1,q) values (1,2,2), (3,4,2), (5,6,3), (7,8,2)")); driver.run("select count(*) from target"); @@ -1904,10 +1904,10 @@ public void testDynamicPartitionInsert() throws Exception { Assert.assertEquals("", "4", r.get(0)); Assert.assertEquals(//look in COMPLETED_TXN_COMPONENTS because driver.run() committed!!!! "COMPLETED_TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid1 + 1) + "): " + - TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), 2,//2 distinct partitions created //txnid+1 because we want txn used by previous driver.run("insert....) - TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + (txnid1 + 1))); + TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + (txnid1 + 1))); long txnid2 = txnMgr.openTxn(ctx, "T1"); @@ -1922,9 +1922,9 @@ public void testDynamicPartitionInsert() throws Exception { txnHandler.addDynamicPartitions(adp); Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + - TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 2,//2 distinct partitions modified - TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2)); + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2)); txnMgr.commitTxn(); } @Test @@ -1986,9 +1986,9 @@ private void testMergePartitioned(boolean causeConflict) throws Exception { Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 0,//because it's using a DP write - TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1)); + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1)); //complete T1 transaction (simulate writing to 2 partitions) AddDynamicPartitions adp = new AddDynamicPartitions(txnId1, "default", "target", Arrays.asList("p=1/q=2","p=1/q=3")); @@ -1996,15 +1996,15 @@ private void testMergePartitioned(boolean causeConflict) throws Exception { txnHandler.addDynamicPartitions(adp); Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 2, - TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1 + + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1 + " and tc_operation_type='u'")); txnMgr.commitTxn();//commit T1 Assert.assertEquals("WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + - TxnDbUtil.queryToString("select * from WRITE_SET"), + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), 2,//2 partitions updated - TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_txnid=" + txnId1 + + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnId1 + " and ws_operation_type='u'")); @@ -2021,9 +2021,9 @@ private void testMergePartitioned(boolean causeConflict) throws Exception { Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + - TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 0,//because it's using a DP write - TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2)); + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2)); //complete T2 txn //simulate Insert into 2 partitions adp = new AddDynamicPartitions(txnid2, "default", "target", @@ -2032,9 +2032,9 @@ private void testMergePartitioned(boolean causeConflict) throws Exception { txnHandler.addDynamicPartitions(adp); Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + - TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 2, - TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2 + " and tc_operation_type='i'")); + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2 + " and tc_operation_type='i'")); //simulate Update of 1 partitions; depending on causeConflict, choose one of the partitions //which was modified by the T1 update stmt or choose a non-conflicting one adp = new AddDynamicPartitions(txnid2, "default", "target", @@ -2043,9 +2043,9 @@ private void testMergePartitioned(boolean causeConflict) throws Exception { txnHandler.addDynamicPartitions(adp); Assert.assertEquals( "TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + - TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), + TxnDbUtil.queryToString(conf, "select * from TXN_COMPONENTS"), 1, - TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2 + " and tc_operation_type='u'")); + TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2 + " and tc_operation_type='u'")); LockException expectedException = null; @@ -2064,14 +2064,14 @@ private void testMergePartitioned(boolean causeConflict) throws Exception { } else { Assert.assertEquals("WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + - TxnDbUtil.queryToString("select * from WRITE_SET"), + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), 1,//1 partitions updated - TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_txnid=" + txnid2 + + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnid2 + " and ws_operation_type='u'")); Assert.assertEquals("WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + - TxnDbUtil.queryToString("select * from WRITE_SET"), + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), 1,//1 partitions updated (and no other entries) - TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_txnid=" + txnid2)); + TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_txnid=" + txnid2)); } } diff --git ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java index af8a63d74e..27840098c1 100644 --- ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java +++ ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java @@ -95,7 +95,7 @@ protected CompactorTest() throws Exception { conf = new HiveConf(); TxnDbUtil.setConfValues(conf); - TxnDbUtil.cleanDb(); + TxnDbUtil.cleanDb(conf); ms = new HiveMetaStoreClient(conf); txnHandler = TxnUtils.getTxnStore(conf); tmpdir = new File (Files.createTempDirectory("compactor_test_table_").toString()); diff --git standalone-metastore/pom.xml standalone-metastore/pom.xml index d91b22de50..acc50ca430 100644 --- standalone-metastore/pom.xml +++ standalone-metastore/pom.xml @@ -33,6 +33,11 @@ + com.jolbox + bonecp + ${bonecp.version} + + com.github.joshelser dropwizard-metrics-hadoop-metrics2-reporter ${dropwizard-metrics-hadoop-metrics2-reporter.version} @@ -48,6 +53,16 @@ ${protobuf.version} + com.zaxxer + HikariCP + ${hikaricp.version} + + + commons-dbcp + commons-dbcp + ${commons-dbcp.version} + + io.dropwizard.metrics metrics-core ${dropwizard.version} @@ -68,6 +83,11 @@ ${commons-lang3.version} + org.apache.derby + derby + ${derby.version} + + org.apache.hadoop hadoop-common ${hadoop.version} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RunnableConfigurable.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RunnableConfigurable.java new file mode 100644 index 0000000000..9fa5cabb0b --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RunnableConfigurable.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configurable; + +/** + * Combination of Runnable and Configurable + */ +public interface RunnableConfigurable extends Configurable, Runnable { +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/datasource/BoneCPDataSourceProvider.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/BoneCPDataSourceProvider.java similarity index 93% rename from metastore/src/java/org/apache/hadoop/hive/metastore/datasource/BoneCPDataSourceProvider.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/BoneCPDataSourceProvider.java index 34765b0b2f..6a2f7704d3 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/datasource/BoneCPDataSourceProvider.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/BoneCPDataSourceProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,9 +47,8 @@ public DataSource create(Configuration hdpConfig) throws SQLException { String driverUrl = DataSourceProvider.getMetastoreJdbcDriverUrl(hdpConfig); String user = DataSourceProvider.getMetastoreJdbcUser(hdpConfig); String passwd = DataSourceProvider.getMetastoreJdbcPasswd(hdpConfig); - int maxPoolSize = hdpConfig.getInt( - MetastoreConf.ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS.varname, - ((Long)MetastoreConf.ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS.defaultVal).intValue()); + int maxPoolSize = MetastoreConf.getIntVar(hdpConfig, + MetastoreConf.ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS); Properties properties = DataSourceProvider.getPrefixedProperties(hdpConfig, BONECP); long connectionTimeout = hdpConfig.getLong(CONNECTION_TIMEOUT_PROPERTY, 30000L); @@ -82,8 +81,8 @@ public boolean mayReturnClosedConnection() { @Override public boolean supports(Configuration configuration) { String poolingType = - configuration.get( - MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE.varname).toLowerCase(); + MetastoreConf.getVar(configuration, + MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE).toLowerCase(); if (BONECP.equals(poolingType)) { int boneCpPropsNr = DataSourceProvider.getPrefixedProperties(configuration, BONECP).size(); LOG.debug("Found " + boneCpPropsNr + " nr. of bonecp specific configurations"); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProvider.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProvider.java similarity index 94% rename from metastore/src/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProvider.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProvider.java index ad1763e121..17ff8d1bd3 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProvider.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ static Properties getPrefixedProperties(Configuration hdpConfig, String factoryP } static String getMetastoreJdbcUser(Configuration conf) { - return conf.get(MetastoreConf.ConfVars.CONNECTION_USER_NAME.varname); + return MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECTION_USER_NAME); } static String getMetastoreJdbcPasswd(Configuration conf) throws SQLException { @@ -73,7 +73,7 @@ static String getMetastoreJdbcPasswd(Configuration conf) throws SQLException { } static String getMetastoreJdbcDriverUrl(Configuration conf) throws SQLException { - return conf.get(MetastoreConf.ConfVars.CONNECTURLKEY.varname); + return MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECTURLKEY); } } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProviderFactory.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProviderFactory.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProviderFactory.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProviderFactory.java index 1eb792ce45..e3c18e3358 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProviderFactory.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProviderFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/datasource/HikariCPDataSourceProvider.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/HikariCPDataSourceProvider.java similarity index 93% rename from metastore/src/java/org/apache/hadoop/hive/metastore/datasource/HikariCPDataSourceProvider.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/HikariCPDataSourceProvider.java index 9b3d6d5d70..16baeb274c 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/datasource/HikariCPDataSourceProvider.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/HikariCPDataSourceProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,11 +44,11 @@ public DataSource create(Configuration hdpConfig) throws SQLException { LOG.debug("Creating Hikari connection pool for the MetaStore"); String driverUrl = DataSourceProvider.getMetastoreJdbcDriverUrl(hdpConfig); + LOG.info("XXX got url " + driverUrl + " from DataSourceProvider"); String user = DataSourceProvider.getMetastoreJdbcUser(hdpConfig); String passwd = DataSourceProvider.getMetastoreJdbcPasswd(hdpConfig); - int maxPoolSize = hdpConfig.getInt( - MetastoreConf.ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS.varname, - ((Long)MetastoreConf.ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS.defaultVal).intValue()); + int maxPoolSize = MetastoreConf.getIntVar(hdpConfig, + MetastoreConf.ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS); Properties properties = replacePrefix( DataSourceProvider.getPrefixedProperties(hdpConfig, HIKARI)); @@ -77,8 +77,8 @@ public boolean mayReturnClosedConnection() { @Override public boolean supports(Configuration configuration) { String poolingType = - configuration.get( - MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE.varname).toLowerCase(); + MetastoreConf.getVar(configuration, + MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE).toLowerCase(); if (HIKARI.equals(poolingType)) { int hikariPropsNr = DataSourceProvider.getPrefixedProperties(configuration, HIKARI).size(); LOG.debug("Found " + hikariPropsNr + " nr. of hikari specific configurations"); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/datasource/package-info.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/package-info.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/datasource/package-info.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/package-info.java index 86d6a26e06..9a4f22a67a 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/datasource/package-info.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/package-info.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java similarity index 92% rename from metastore/src/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java index 0c0bfefc3e..8268af9559 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java @@ -18,9 +18,11 @@ Licensed to the Apache Software Foundation (ASF) under one package org.apache.hadoop.hive.metastore.tools; import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.DatabaseProduct; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -36,9 +38,9 @@ Licensed to the Apache Software Foundation (ASF) under one public final class SQLGenerator { static final private Logger LOG = LoggerFactory.getLogger(SQLGenerator.class.getName()); private final DatabaseProduct dbProduct; - private final HiveConf conf; + private final Configuration conf; - public SQLGenerator(DatabaseProduct dbProduct, HiveConf conf) { + public SQLGenerator(DatabaseProduct dbProduct, Configuration conf) { this.dbProduct = dbProduct; this.conf = conf; } @@ -62,8 +64,7 @@ public SQLGenerator(DatabaseProduct dbProduct, HiveConf conf) { //http://www.oratable.com/oracle-insert-all/ //https://livesql.oracle.com/apex/livesql/file/content_BM1LJQ87M5CNIOKPOWPV6ZGR3.html for (int numRows = 0; numRows < rows.size(); numRows++) { - if (numRows % conf - .getIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE) == 0) { + if (numRows % MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE) == 0) { if (numRows > 0) { sb.append(" select * from dual"); insertStmts.add(sb.toString()); @@ -84,8 +85,7 @@ public SQLGenerator(DatabaseProduct dbProduct, HiveConf conf) { case POSTGRES: case SQLSERVER: for (int numRows = 0; numRows < rows.size(); numRows++) { - if (numRows % conf - .getIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE) == 0) { + if (numRows % MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE) == 0) { if (numRows > 0) { insertStmts.add(sb.substring(0, sb.length() - 1));//exclude trailing comma } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java new file mode 100644 index 0000000000..864b01ac55 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.txn; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.RunnableConfigurable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Background running thread, periodically updating number of open transactions. + * Runs inside Hive Metastore Service. + */ +public class AcidOpenTxnsCounterService implements RunnableConfigurable { + private static final Logger LOG = LoggerFactory.getLogger(AcidOpenTxnsCounterService.class); + + private Configuration conf; + + @Override + public void run() { + try { + TxnStore txnHandler = TxnUtils.getTxnStore(conf); + if (LOG.isDebugEnabled()) { + LOG.debug("Running open txn counter"); + } + txnHandler.countOpenTxns(); + } + catch(Throwable t) { + LOG.error("Serious error in {}", Thread.currentThread().getName(), ": {}" + t.getMessage(), t); + } + } + + @Override + public void setConf(Configuration configuration) { + conf = configuration; + } + + @Override + public Configuration getConf() { + return conf; + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java index 413ce3b74d..41e428be53 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java similarity index 96% rename from metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java index 60839faa35..e676b91a70 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,9 +18,10 @@ package org.apache.hadoop.hive.metastore.txn; import org.apache.hadoop.hive.common.classification.RetrySemantics; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,7 +61,7 @@ public CompactionTxnHandler() { @RetrySemantics.ReadOnly public Set findPotentialCompactions(int maxAborted) throws MetaException { Connection dbConn = null; - Set response = new HashSet(); + Set response = new HashSet<>(); Statement stmt = null; ResultSet rs = null; try { @@ -277,7 +278,7 @@ public void markCompacted(CompactionInfo info) throws MetaException { @RetrySemantics.ReadOnly public List findReadyToClean() throws MetaException { Connection dbConn = null; - List rc = new ArrayList(); + List rc = new ArrayList<>(); Statement stmt = null; ResultSet rs = null; @@ -387,7 +388,7 @@ public void markCleaned(CompactionInfo info) throws MetaException { while (rs.next()) txnids.add(rs.getLong(1)); // Remove entries from txn_components, as there may be aborted txn components if (txnids.size() > 0) { - List queries = new ArrayList(); + List queries = new ArrayList<>(); // Prepare prefix and suffix StringBuilder prefix = new StringBuilder(); @@ -466,7 +467,7 @@ public void cleanEmptyAbortedTxns() throws MetaException { return; } Collections.sort(txnids);//easier to read logs - List queries = new ArrayList(); + List queries = new ArrayList<>(); StringBuilder prefix = new StringBuilder(); StringBuilder suffix = new StringBuilder(); @@ -626,7 +627,7 @@ public void revokeTimedoutWorkers(long timeout) throws MetaException { + (ci.partName == null ? "" : " AND PARTITION_NAME='" + ci.partName + "'");*/ LOG.debug("Going to execute <" + s + ">"); rs = stmt.executeQuery(s); - List columns = new ArrayList(); + List columns = new ArrayList<>(); while (rs.next()) { columns.add(rs.getString(1)); } @@ -743,9 +744,9 @@ public void purgeCompactionHistory() throws MetaException { CompactionInfo ci = new CompactionInfo(rs.getLong(1), rs.getString(2), rs.getString(3), rs.getString(4), rs.getString(5).charAt(0)); if(!ci.getFullPartitionName().equals(lastCompactedEntity)) { lastCompactedEntity = ci.getFullPartitionName(); - rc = new RetentionCounters(conf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED), + rc = new RetentionCounters(MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED), getFailedCompactionRetention(), - conf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_SUCCEEDED)); + MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_HISTORY_RETENTION_SUCCEEDED)); } checkForDeletion(deleteSet, ci, rc); } @@ -755,7 +756,7 @@ public void purgeCompactionHistory() throws MetaException { return; } - List queries = new ArrayList(); + List queries = new ArrayList<>(); StringBuilder prefix = new StringBuilder(); StringBuilder suffix = new StringBuilder(); @@ -788,12 +789,12 @@ public void purgeCompactionHistory() throws MetaException { * compaction threshold which prevents new compactions from being scheduled. */ private int getFailedCompactionRetention() { - int failedThreshold = conf.getIntVar(HiveConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD); - int failedRetention = conf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED); + int failedThreshold = MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD); + int failedRetention = MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED); if(failedRetention < failedThreshold) { - LOG.warn("Invalid configuration " + HiveConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD.varname + - "=" + failedRetention + " < " + HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED + "=" + - failedRetention + ". Will use " + HiveConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD.varname + + LOG.warn("Invalid configuration " + ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD.varname + + "=" + failedRetention + " < " + ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED + "=" + + failedRetention + ". Will use " + ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD.varname + "=" + failedRetention); failedRetention = failedThreshold; } @@ -825,7 +826,7 @@ public boolean checkFailedCompactions(CompactionInfo ci) throws MetaException { " and CC_STATE != " + quoteChar(ATTEMPTED_STATE) + " order by CC_ID desc"); int numFailed = 0; int numTotal = 0; - int failedThreshold = conf.getIntVar(HiveConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD); + int failedThreshold = MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD); while(rs.next() && ++numTotal <= failedThreshold) { if(rs.getString(1).charAt(0) == FAILED_STATE) { numFailed++; diff --git metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java similarity index 83% rename from metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java index 0161894740..d09c9580ff 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,10 +28,11 @@ import java.util.Properties; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.conf.HiveConf; /** * Utility methods for creating and destroying txn database/schema, plus methods for @@ -56,12 +57,12 @@ private TxnDbUtil() { * * @param conf HiveConf to add these values to */ - public static void setConfValues(HiveConf conf) { - conf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, TXN_MANAGER); - conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, true); + public static void setConfValues(Configuration conf) { + MetastoreConf.setVar(conf, ConfVars.HIVE_TXN_MANAGER, TXN_MANAGER); + MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, true); } - public static void prepDb() throws Exception { + public static void prepDb(Configuration conf) throws Exception { // This is a bogus hack because it copies the contents of the SQL file // intended for creating derby databases, and thus will inexorably get // out of date with it. I'm open to any suggestions on how to make this @@ -70,7 +71,7 @@ public static void prepDb() throws Exception { Connection conn = null; Statement stmt = null; try { - conn = getConnection(); + conn = getConnection(conf); stmt = conn.createStatement(); stmt.execute("CREATE TABLE TXNS (" + " TXN_ID bigint PRIMARY KEY," + @@ -174,7 +175,7 @@ public static void prepDb() throws Exception { // This might be a deadlock, if so, let's retry if (e instanceof SQLTransactionRollbackException && deadlockCnt++ < 5) { LOG.warn("Caught deadlock, retrying db creation"); - prepDb(); + prepDb(conf); } else { throw e; } @@ -184,14 +185,14 @@ public static void prepDb() throws Exception { } } - public static void cleanDb() throws Exception { + public static void cleanDb(Configuration conf) throws Exception { int retryCount = 0; while(++retryCount <= 3) { boolean success = true; Connection conn = null; Statement stmt = null; try { - conn = getConnection(); + conn = getConnection(conf); stmt = conn.createStatement(); // We want to try these, whether they succeed or fail. @@ -224,20 +225,31 @@ public static void cleanDb() throws Exception { return; } } + throw new RuntimeException("Failed to clean up txn tables"); } private static boolean dropTable(Statement stmt, String name, int retryCount) throws SQLException { - try { - stmt.execute("DROP TABLE " + name); - return true; - } catch (SQLException e) { - if("42Y55".equals(e.getSQLState()) && 30000 == e.getErrorCode()) { - //failed because object doesn't exist + for (int i = 0; i < 3; i++) { + try { + stmt.execute("DROP TABLE " + name); + LOG.debug("Successfully dropped table " + name); return true; + } catch (SQLException e) { + if ("42Y55".equals(e.getSQLState()) && 30000 == e.getErrorCode()) { + LOG.debug("Not dropping " + name + " because it doesn't exist"); + //failed because object doesn't exist + return true; + } + if ("X0Y25".equals(e.getSQLState()) && 30000 == e.getErrorCode()) { + // Intermittent failure + LOG.warn("Intermittent drop failure, retrying, try number " + i); + continue; + } + LOG.error("Unable to drop table " + name + ": " + e.getMessage() + + " State=" + e.getSQLState() + " code=" + e.getErrorCode() + " retryCount=" + retryCount); } - LOG.error("Unable to drop table " + name + ": " + e.getMessage() + - " State=" + e.getSQLState() + " code=" + e.getErrorCode() + " retryCount=" + retryCount); } + LOG.error("Failed to drop table, don't know why"); return false; } @@ -249,12 +261,12 @@ private static boolean dropTable(Statement stmt, String name, int retryCount) th * * @return number of components, or 0 if there is no lock */ - public static int countLockComponents(long lockId) throws Exception { + public static int countLockComponents(Configuration conf, long lockId) throws Exception { Connection conn = null; PreparedStatement stmt = null; ResultSet rs = null; try { - conn = getConnection(); + conn = getConnection(conf); stmt = conn.prepareStatement("SELECT count(*) FROM hive_locks WHERE hl_lock_ext_id = ?"); stmt.setLong(1, lockId); rs = stmt.executeQuery(); @@ -273,12 +285,12 @@ public static int countLockComponents(long lockId) throws Exception { * @return count countQuery result * @throws Exception */ - public static int countQueryAgent(String countQuery) throws Exception { + public static int countQueryAgent(Configuration conf, String countQuery) throws Exception { Connection conn = null; Statement stmt = null; ResultSet rs = null; try { - conn = getConnection(); + conn = getConnection(conf); stmt = conn.createStatement(); rs = stmt.executeQuery(countQuery); if (!rs.next()) { @@ -290,16 +302,17 @@ public static int countQueryAgent(String countQuery) throws Exception { } } @VisibleForTesting - public static String queryToString(String query) throws Exception { - return queryToString(query, true); + public static String queryToString(Configuration conf, String query) throws Exception { + return queryToString(conf, query, true); } - public static String queryToString(String query, boolean includeHeader) throws Exception { + public static String queryToString(Configuration conf, String query, boolean includeHeader) + throws Exception { Connection conn = null; Statement stmt = null; ResultSet rs = null; StringBuilder sb = new StringBuilder(); try { - conn = getConnection(); + conn = getConnection(conf); stmt = conn.createStatement(); rs = stmt.executeQuery(query); ResultSetMetaData rsmd = rs.getMetaData(); @@ -321,13 +334,12 @@ public static String queryToString(String query, boolean includeHeader) throws E return sb.toString(); } - static Connection getConnection() throws Exception { - HiveConf conf = new HiveConf(); - String jdbcDriver = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER); + static Connection getConnection(Configuration conf) throws Exception { + String jdbcDriver = MetastoreConf.getVar(conf, ConfVars.CONNECTION_DRIVER); Driver driver = (Driver) Class.forName(jdbcDriver).newInstance(); Properties prop = new Properties(); - String driverUrl = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORECONNECTURLKEY); - String user = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME); + String driverUrl = MetastoreConf.getVar(conf, ConfVars.CONNECTURLKEY); + String user = MetastoreConf.getVar(conf, ConfVars.CONNECTION_USER_NAME); String passwd = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD); prop.setProperty("user", user); prop.setProperty("password", passwd); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java similarity index 96% rename from metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index f77900d5d7..bb4d6710a0 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,28 +23,29 @@ import org.apache.commons.dbcp.DriverManagerConnectionFactory; import org.apache.commons.dbcp.PoolableConnectionFactory; import org.apache.commons.lang.NotImplementedException; -import org.apache.hadoop.hive.common.ServerUtils; -import org.apache.hadoop.hive.common.classification.InterfaceAudience; -import org.apache.hadoop.hive.common.classification.InterfaceStability; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.classification.RetrySemantics; import org.apache.hadoop.hive.metastore.DatabaseProduct; -import org.apache.hadoop.hive.metastore.HouseKeeperService; +import org.apache.hadoop.hive.metastore.RunnableConfigurable; +import org.apache.hadoop.hive.metastore.ThreadPool; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.datasource.BoneCPDataSourceProvider; import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider; import org.apache.hadoop.hive.metastore.datasource.HikariCPDataSourceProvider; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; import org.apache.hadoop.hive.metastore.tools.SQLGenerator; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; +import org.apache.hadoop.hive.metastore.utils.StringableMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.commons.dbcp.PoolingDataSource; import org.apache.commons.pool.impl.GenericObjectPool; -import org.apache.hadoop.hive.common.JavaUtils; -import org.apache.hadoop.hive.common.StringableMap; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConfUtil; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.util.StringUtils; @@ -58,6 +59,7 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantLock; import java.util.regex.Pattern; @@ -66,8 +68,8 @@ * server. * * Note on log messages: Please include txnid:X and lockid info using - * {@link org.apache.hadoop.hive.common.JavaUtils#txnIdToString(long)} - * and {@link org.apache.hadoop.hive.common.JavaUtils#lockIdToString(long)} in all messages. + * {@link JavaUtils#txnIdToString(long)} + * and {@link JavaUtils#lockIdToString(long)} in all messages. * The txnid:X and lockid:Y matches how Thrift object toString() methods are generated, * so keeping the format consistent makes grep'ing the logs much easier. * @@ -194,15 +196,13 @@ public static OpertaionType fromDataOperationType(DataOperationType dop) { private static volatile int maxOpenTxns = 0; // Whether number of open transactions reaches the threshold private static volatile boolean tooManyOpenTxns = false; - // The AcidHouseKeeperService for counting open transactions - private static volatile HouseKeeperService openTxnsCounter = null; /** * Number of consecutive deadlocks we have seen */ private int deadlockCnt; private long deadlockRetryInterval; - protected HiveConf conf; + protected Configuration conf; private static DatabaseProduct dbProduct; private static SQLGenerator sqlGenerator; @@ -225,7 +225,9 @@ public static OpertaionType fromDataOperationType(DataOperationType dop) { * (e.g. via Compactor services) */ private final static ConcurrentHashMap derbyKey2Lock = new ConcurrentHashMap<>(); - private static final String hostname = ServerUtils.hostname(); + private static final String hostname = JavaUtils.hostname(); + + private static final AtomicBoolean startedOpenTxnCounter = new AtomicBoolean(); // Private methods should never catch SQLException and then throw MetaException. The public // methods depend on SQLException coming back so they can detect and handle deadlocks. Private @@ -242,22 +244,19 @@ public TxnHandler() { /** * This is logically part of c'tor and must be called prior to any other method. - * Not physically part of c'tor due to use of relfection + * Not physically part of c'tor due to use of reflection */ - public void setConf(HiveConf conf) { + public void setConf(Configuration conf) { this.conf = conf; checkQFileTestHack(); synchronized (TxnHandler.class) { if (connPool == null) { - //only do this once per JVM; useful for support - LOG.info(HiveConfUtil.dumpConfig(conf).toString()); - Connection dbConn = null; // Set up the JDBC connection pool try { - int maxPoolSize = conf.getIntVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_MAX_CONNECTIONS); + int maxPoolSize = MetastoreConf.getIntVar(conf, ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS); long getConnectionTimeoutMs = 30000; connPool = setupJdbcConnectionPool(conf, maxPoolSize, getConnectionTimeoutMs); /*the mutex pools should ideally be somewhat larger since some operations require 1 @@ -283,14 +282,20 @@ public void setConf(HiveConf conf) { numOpenTxns = Metrics.getOrCreateGauge(MetricsConstants.NUM_OPEN_TXNS); - timeout = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS); + timeout = MetastoreConf.getTimeVar(conf, ConfVars.TXN_TIMEOUT, TimeUnit.MILLISECONDS); buildJumpTable(); - retryInterval = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HMSHANDLERINTERVAL, + retryInterval = MetastoreConf.getTimeVar(conf, ConfVars.HMSHANDLERINTERVAL, TimeUnit.MILLISECONDS); - retryLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HMSHANDLERATTEMPTS); + retryLimit = MetastoreConf.getIntVar(conf, ConfVars.HMSHANDLERATTEMPTS); deadlockRetryInterval = retryInterval / 10; - maxOpenTxns = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_MAX_OPEN_TXNS); + maxOpenTxns = MetastoreConf.getIntVar(conf, ConfVars.MAX_OPEN_TXNS); + } + + @Override + public Configuration getConf() { + return conf; } + @Override @RetrySemantics.ReadOnly public GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException { @@ -325,7 +330,7 @@ public GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException { "initialized, null record found in next_txn_id"); } close(rs); - List txnInfos = new ArrayList(); + List txnInfos = new ArrayList<>(); //need the WHERE clause below to ensure consistent results with READ_COMMITTED s = "select txn_id, txn_state, txn_user, txn_host, txn_started, txn_last_heartbeat from " + "TXNS where txn_id <= " + hwm; @@ -398,7 +403,7 @@ public GetOpenTxnsResponse getOpenTxns() throws MetaException { "initialized, null record found in next_txn_id"); } close(rs); - List openList = new ArrayList(); + List openList = new ArrayList<>(); //need the WHERE clause below to ensure consistent results with READ_COMMITTED s = "select txn_id, txn_state from TXNS where txn_id <= " + hwm + " order by txn_id"; LOG.debug("Going to execute query<" + s + ">"); @@ -437,17 +442,6 @@ public GetOpenTxnsResponse getOpenTxns() throws MetaException { } } - private static void startHouseKeeperService(HiveConf conf, Class c){ - try { - openTxnsCounter = (HouseKeeperService)c.newInstance(); - openTxnsCounter.start(conf); - } catch (Exception ex) { - LOG.error("Failed to start {}" , openTxnsCounter.getClass() + - ". The system will not handle {} " , openTxnsCounter.getServiceDescription(), - ". Root Cause: ", ex); - } - } - /** * Retry-by-caller note: * Worst case, it will leave an open txn which will timeout. @@ -455,18 +449,6 @@ private static void startHouseKeeperService(HiveConf conf, Class c){ @Override @RetrySemantics.Idempotent public OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException { - if (openTxnsCounter == null) { - synchronized (TxnHandler.class) { - try { - if (openTxnsCounter == null) { - startHouseKeeperService(conf, Class.forName("org.apache.hadoop.hive.ql.txn.AcidOpenTxnsCounterService")); - } - } catch (ClassNotFoundException e) { - throw new MetaException(e.getMessage()); - } - } - } - if (!tooManyOpenTxns && numOpenTxns.get() >= maxOpenTxns) { tooManyOpenTxns = true; } @@ -507,8 +489,7 @@ public OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException { */ dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); // Make sure the user has not requested an insane amount of txns. - int maxTxns = HiveConf.getIntVar(conf, - HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH); + int maxTxns = MetastoreConf.getIntVar(conf, ConfVars.TXN_MAX_OPEN_BATCH); if (numTxns > maxTxns) numTxns = maxTxns; stmt = dbConn.createStatement(); @@ -525,7 +506,7 @@ public OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException { stmt.executeUpdate(s); long now = getDbTime(dbConn); - List txnIds = new ArrayList(numTxns); + List txnIds = new ArrayList<>(numTxns); List rows = new ArrayList<>(); for (long i = first; i < first + numTxns; i++) { @@ -639,7 +620,7 @@ public void abortTxns(AbortTxnsRequest rqst) throws NoSuchTxnException, MetaExce * that they read appropriately. In particular, if txns do not overlap, then one follows the other * (assumig they write the same entity), and thus the 2nd must see changes of the 1st. We ensure * this by locking in snapshot after - * {@link #openTxns(OpenTxnRequest)} call is made (see {@link org.apache.hadoop.hive.ql.Driver#acquireLocksAndOpenTxn()}) + * {@link #openTxns(OpenTxnRequest)} call is made (see org.apache.hadoop.hive.ql.Driver.acquireLocksAndOpenTxn) * and mutexing openTxn() with commit(). In other words, once a S.commit() starts we must ensure * that txn T which will be considered a later txn, locks in a snapshot that includes the result * of S's commit (assuming no other txns). @@ -1038,7 +1019,7 @@ private ConnectionLockIdPair enqueueLockWithRetry(LockRequest rqst) throws NoSuc long intLockId = 0; for (LockComponent lc : rqst.getComponent()) { if(lc.isSetOperationType() && lc.getOperationType() == DataOperationType.UNSET && - (conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) || conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEZ_TEST))) { + (MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) || MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEZ_TEST))) { //old version of thrift client should have (lc.isSetOperationType() == false) but they do not //If you add a default value to a variable, isSet() for that variable is true regardless of the where the //message was created (for object variables. It works correctly for boolean vars, e.g. LockComponent.isAcid). @@ -1288,8 +1269,8 @@ public ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException { try { Connection dbConn = null; ShowLocksResponse rsp = new ShowLocksResponse(); - List elems = new ArrayList(); - List sortedList = new ArrayList(); + List elems = new ArrayList<>(); + List sortedList = new ArrayList<>(); Statement stmt = null; try { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); @@ -1423,8 +1404,8 @@ public HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst Connection dbConn = null; Statement stmt = null; HeartbeatTxnRangeResponse rsp = new HeartbeatTxnRangeResponse(); - Set nosuch = new HashSet(); - Set aborted = new HashSet(); + Set nosuch = new HashSet<>(); + Set aborted = new HashSet<>(); rsp.setNosuch(nosuch); rsp.setAborted(aborted); try { @@ -1627,7 +1608,7 @@ private static String compactorStateToResponse(char s) { } @RetrySemantics.ReadOnly public ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaException { - ShowCompactResponse response = new ShowCompactResponse(new ArrayList()); + ShowCompactResponse response = new ShowCompactResponse(new ArrayList<>()); Connection dbConn = null; Statement stmt = null; try { @@ -1775,7 +1756,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, String tblName; dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); stmt = dbConn.createStatement(); - List queries = new ArrayList(); + List queries = new ArrayList<>(); StringBuilder buff = new StringBuilder(); switch (type) { @@ -2340,14 +2321,14 @@ public int compare(LockType t1, LockType t2) { private static Map>> jumpTable; private void checkQFileTestHack() { - boolean hackOn = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEST) || - HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEZ_TEST); + boolean hackOn = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) || + MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEZ_TEST); if (hackOn) { LOG.info("Hacking in canned values for transaction manager"); // Set up the transaction/locking db in the derby metastore TxnDbUtil.setConfValues(conf); try { - TxnDbUtil.prepDb(); + TxnDbUtil.prepDb(conf); } catch (Exception e) { // We may have already created the tables and thus don't need to redo it. if (e.getMessage() != null && !e.getMessage().contains("already exists")) { @@ -2389,7 +2370,7 @@ private int abortTxns(Connection dbConn, List txnids, long max_heartbeat, stmt = dbConn.createStatement(); //This is an update statement, thus at any Isolation level will take Write locks so will block //all other ops using S4U on TXNS row. - List queries = new ArrayList(); + List queries = new ArrayList<>(); StringBuilder prefix = new StringBuilder(); StringBuilder suffix = new StringBuilder(); @@ -2488,7 +2469,7 @@ private LockResponse checkLock(Connection dbConn, long extLockId) "hl_lock_int_id, hl_db, hl_table, hl_partition, hl_lock_state, " + "hl_lock_type, hl_txnid from HIVE_LOCKS where hl_db in ("); - Set strings = new HashSet(locksBeingChecked.size()); + Set strings = new HashSet<>(locksBeingChecked.size()); //This the set of entities that the statement represented by extLockId wants to update List writeSet = new ArrayList<>(); @@ -2992,7 +2973,7 @@ private LockInfo getTxnIdFromLockId(Connection dbConn, long extLockId) LOG.debug("Going to execute query <" + s + ">"); ResultSet rs = stmt.executeQuery(s); boolean sawAtLeastOne = false; - List ourLockInfo = new ArrayList(); + List ourLockInfo = new ArrayList<>(); while (rs.next()) { ourLockInfo.add(new LockInfo(rs)); sawAtLeastOne = true; @@ -3033,7 +3014,7 @@ private void timeOutLocks(Connection dbConn, long now) { return; } - List queries = new ArrayList(); + List queries = new ArrayList<>(); StringBuilder prefix = new StringBuilder(); StringBuilder suffix = new StringBuilder(); @@ -3187,12 +3168,11 @@ public void countOpenTxns() throws MetaException { } } - private static synchronized DataSource setupJdbcConnectionPool(HiveConf conf, int maxPoolSize, long getConnectionTimeoutMs) throws SQLException { + private static synchronized DataSource setupJdbcConnectionPool(Configuration conf, int maxPoolSize, long getConnectionTimeoutMs) throws SQLException { String driverUrl = DataSourceProvider.getMetastoreJdbcDriverUrl(conf); String user = DataSourceProvider.getMetastoreJdbcUser(conf); String passwd = DataSourceProvider.getMetastoreJdbcPasswd(conf); - String connectionPooler = conf.getVar( - HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE).toLowerCase(); + String connectionPooler = MetastoreConf.getVar(conf, ConfVars.CONNECTION_POOLING_TYPE).toLowerCase(); if ("bonecp".equals(connectionPooler)) { doRetryOnConnPool = true; // Enable retries to work around BONECP bug. @@ -3221,16 +3201,14 @@ private static synchronized DataSource setupJdbcConnectionPool(HiveConf conf, in private static synchronized void buildJumpTable() { if (jumpTable != null) return; - jumpTable = - new HashMap>>(3); + jumpTable = new HashMap<>(3); // SR: Lock we are trying to acquire is shared read - Map> m = - new HashMap>(3); + Map> m = new HashMap<>(3); jumpTable.put(LockType.SHARED_READ, m); // SR.SR: Lock we are examining is shared read - Map m2 = new HashMap(2); + Map m2 = new HashMap<>(2); m.put(LockType.SHARED_READ, m2); // SR.SR.acquired Lock we are examining is acquired; We can acquire @@ -3244,7 +3222,7 @@ private static synchronized void buildJumpTable() { m2.put(LockState.WAITING, LockAction.KEEP_LOOKING); // SR.SW: Lock we are examining is shared write - m2 = new HashMap(2); + m2 = new HashMap<>(2); m.put(LockType.SHARED_WRITE, m2); // SR.SW.acquired Lock we are examining is acquired; We can acquire @@ -3259,7 +3237,7 @@ private static synchronized void buildJumpTable() { m2.put(LockState.WAITING, LockAction.KEEP_LOOKING); // SR.E: Lock we are examining is exclusive - m2 = new HashMap(2); + m2 = new HashMap<>(2); m.put(LockType.EXCLUSIVE, m2); // No matter whether it has acquired or not, we cannot pass an exclusive. @@ -3267,11 +3245,11 @@ private static synchronized void buildJumpTable() { m2.put(LockState.WAITING, LockAction.WAIT); // SW: Lock we are trying to acquire is shared write - m = new HashMap>(3); + m = new HashMap<>(3); jumpTable.put(LockType.SHARED_WRITE, m); // SW.SR: Lock we are examining is shared read - m2 = new HashMap(2); + m2 = new HashMap<>(2); m.put(LockType.SHARED_READ, m2); // SW.SR.acquired Lock we are examining is acquired; We need to keep @@ -3285,7 +3263,7 @@ private static synchronized void buildJumpTable() { m2.put(LockState.WAITING, LockAction.KEEP_LOOKING); // SW.SW: Lock we are examining is shared write - m2 = new HashMap(2); + m2 = new HashMap<>(2); m.put(LockType.SHARED_WRITE, m2); // Regardless of acquired or waiting, one shared write cannot pass another. @@ -3293,7 +3271,7 @@ private static synchronized void buildJumpTable() { m2.put(LockState.WAITING, LockAction.WAIT); // SW.E: Lock we are examining is exclusive - m2 = new HashMap(2); + m2 = new HashMap<>(2); m.put(LockType.EXCLUSIVE, m2); // No matter whether it has acquired or not, we cannot pass an exclusive. @@ -3301,11 +3279,11 @@ private static synchronized void buildJumpTable() { m2.put(LockState.WAITING, LockAction.WAIT); // E: Lock we are trying to acquire is exclusive - m = new HashMap>(3); + m = new HashMap<>(3); jumpTable.put(LockType.EXCLUSIVE, m); // E.SR: Lock we are examining is shared read - m2 = new HashMap(2); + m2 = new HashMap<>(2); m.put(LockType.SHARED_READ, m2); // Exclusives can never pass @@ -3313,7 +3291,7 @@ private static synchronized void buildJumpTable() { m2.put(LockState.WAITING, LockAction.WAIT); // E.SW: Lock we are examining is shared write - m2 = new HashMap(2); + m2 = new HashMap<>(2); m.put(LockType.SHARED_WRITE, m2); // Exclusives can never pass @@ -3321,7 +3299,7 @@ private static synchronized void buildJumpTable() { m2.put(LockState.WAITING, LockAction.WAIT); // E.E: Lock we are examining is exclusive - m2 = new HashMap(2); + m2 = new HashMap<>(2); m.put(LockType.EXCLUSIVE, m2); // No matter whether it has acquired or not, we cannot pass an exclusive. @@ -3331,7 +3309,7 @@ private static synchronized void buildJumpTable() { /** * Returns true if {@code ex} should be retried */ - static boolean isRetryable(HiveConf conf, Exception ex) { + static boolean isRetryable(Configuration conf, Exception ex) { if(ex instanceof SQLException) { SQLException sqlException = (SQLException)ex; if("08S01".equalsIgnoreCase(sqlException.getSQLState())) { @@ -3343,7 +3321,7 @@ static boolean isRetryable(HiveConf conf, Exception ex) { return true; } - String regex = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_TXN_RETRYABLE_SQLEX_REGEX); + String regex = MetastoreConf.getVar(conf, ConfVars.TXN_RETRYABLE_SQLEX_REGEX); if (regex != null && !regex.isEmpty()) { String[] patterns = regex.split(",(?=\\S)"); String message = getMessage((SQLException)ex); @@ -3568,13 +3546,13 @@ public void releaseLocks() { // Note that this depends on the fact that no-one in this class calls anything but // getConnection. If you want to use any of the Logger or wrap calls you'll have to // implement them. - private final HiveConf conf; + private final Configuration conf; private Driver driver; private String connString; private String user; private String passwd; - public NoPoolConnectionPool(HiveConf conf) { + public NoPoolConnectionPool(Configuration conf) { this.conf = conf; } @@ -3591,10 +3569,10 @@ public Connection getConnection() throws SQLException { public Connection getConnection(String username, String password) throws SQLException { // Find the JDBC driver if (driver == null) { - String driverName = conf.getVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER); + String driverName = MetastoreConf.getVar(conf, ConfVars.CONNECTION_DRIVER); if (driverName == null || driverName.equals("")) { String msg = "JDBC driver for transaction db not set in configuration " + - "file, need to set " + HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER.varname; + "file, need to set " + ConfVars.CONNECTION_DRIVER.varname; LOG.error(msg); throw new RuntimeException(msg); } @@ -3612,7 +3590,7 @@ public Connection getConnection(String username, String password) throws SQLExce throw new RuntimeException("Unable to find driver " + driverName + ", " + e.getMessage(), e); } - connString = conf.getVar(HiveConf.ConfVars.METASTORECONNECTURLKEY); + connString = MetastoreConf.getVar(conf, ConfVars.CONNECTURLKEY); } try { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java similarity index 78% rename from metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java index 3eb3827d06..96a7f56715 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,10 +18,10 @@ package org.apache.hadoop.hive.metastore.txn; import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hive.common.classification.InterfaceAudience; -import org.apache.hadoop.hive.common.classification.InterfaceStability; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hive.common.classification.RetrySemantics; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.*; import java.sql.SQLException; @@ -32,21 +32,10 @@ /** * A handler to answer transaction related calls that come into the metastore * server. - * - * Note on log messages: Please include txnid:X and lockid info using - * {@link org.apache.hadoop.hive.common.JavaUtils#txnIdToString(long)} - * and {@link org.apache.hadoop.hive.common.JavaUtils#lockIdToString(long)} in all messages. - * The txnid:X and lockid:Y matches how Thrift object toString() methods are generated, - * so keeping the format consistent makes grep'ing the logs much easier. - * - * Note on HIVE_LOCKS.hl_last_heartbeat. - * For locks that are part of transaction, we set this 0 (would rather set it to NULL but - * Currently the DB schema has this NOT NULL) and only update/read heartbeat from corresponding - * transaction in TXNS. */ @InterfaceAudience.Private @InterfaceStability.Evolving -public interface TxnStore { +public interface TxnStore extends Configurable { enum MUTEX_KEY {Initiator, Cleaner, HouseKeeper, CompactionHistory, CheckLock, WriteSetCleaner, CompactionScheduler} @@ -58,9 +47,7 @@ String SUCCEEDED_RESPONSE = "succeeded"; String ATTEMPTED_RESPONSE = "attempted"; - public static final int TIMED_OUT_TXN_ABORT_BATCH_SIZE = 50000; - - public void setConf(HiveConf conf); + int TIMED_OUT_TXN_ABORT_BATCH_SIZE = 50000; /** * Get information about open transactions. This gives extensive information about the @@ -70,7 +57,7 @@ * @throws MetaException */ @RetrySemantics.ReadOnly - public GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException; + GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException; /** * Get list of valid transactions. This gives just the list of transactions that are open. @@ -78,14 +65,14 @@ * @throws MetaException */ @RetrySemantics.ReadOnly - public GetOpenTxnsResponse getOpenTxns() throws MetaException; + GetOpenTxnsResponse getOpenTxns() throws MetaException; /** * Get the count for open transactions. * @throws MetaException */ @RetrySemantics.ReadOnly - public void countOpenTxns() throws MetaException; + void countOpenTxns() throws MetaException; /** * Open a set of transactions @@ -94,7 +81,7 @@ * @throws MetaException */ @RetrySemantics.Idempotent - public OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException; + OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException; /** * Abort (rollback) a transaction. @@ -103,7 +90,7 @@ * @throws MetaException */ @RetrySemantics.Idempotent - public void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException, TxnAbortedException; + void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException, TxnAbortedException; /** * Abort (rollback) a list of transactions in one request. @@ -112,7 +99,7 @@ * @throws MetaException */ @RetrySemantics.Idempotent - public void abortTxns(AbortTxnsRequest rqst) throws NoSuchTxnException, MetaException; + void abortTxns(AbortTxnsRequest rqst) throws NoSuchTxnException, MetaException; /** * Commit a transaction @@ -122,7 +109,7 @@ * @throws MetaException */ @RetrySemantics.Idempotent - public void commitTxn(CommitTxnRequest rqst) + void commitTxn(CommitTxnRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException; /** @@ -135,7 +122,7 @@ public void commitTxn(CommitTxnRequest rqst) * @throws MetaException */ @RetrySemantics.CannotRetry - public LockResponse lock(LockRequest rqst) + LockResponse lock(LockRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException; /** @@ -149,7 +136,7 @@ public LockResponse lock(LockRequest rqst) * @throws MetaException */ @RetrySemantics.SafeToRetry - public LockResponse checkLock(CheckLockRequest rqst) + LockResponse checkLock(CheckLockRequest rqst) throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException; /** @@ -162,7 +149,7 @@ public LockResponse checkLock(CheckLockRequest rqst) * @throws MetaException */ @RetrySemantics.Idempotent - public void unlock(UnlockRequest rqst) + void unlock(UnlockRequest rqst) throws NoSuchLockException, TxnOpenException, MetaException; /** @@ -172,7 +159,7 @@ public void unlock(UnlockRequest rqst) * @throws MetaException */ @RetrySemantics.ReadOnly - public ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException; + ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException; /** * Send a heartbeat for a lock or a transaction @@ -183,7 +170,7 @@ public void unlock(UnlockRequest rqst) * @throws MetaException */ @RetrySemantics.SafeToRetry - public void heartbeat(HeartbeatRequest ids) + void heartbeat(HeartbeatRequest ids) throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException; /** @@ -193,7 +180,7 @@ public void heartbeat(HeartbeatRequest ids) * @throws MetaException */ @RetrySemantics.SafeToRetry - public HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst) + HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst) throws MetaException; /** @@ -204,7 +191,7 @@ public HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst * @throws MetaException */ @RetrySemantics.Idempotent - public CompactionResponse compact(CompactionRequest rqst) throws MetaException; + CompactionResponse compact(CompactionRequest rqst) throws MetaException; /** * Show list of current compactions @@ -213,7 +200,7 @@ public HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst * @throws MetaException */ @RetrySemantics.ReadOnly - public ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaException; + ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaException; /** * Add information on a set of dynamic partitions that participated in a transaction. @@ -223,7 +210,7 @@ public HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst * @throws MetaException */ @RetrySemantics.SafeToRetry - public void addDynamicPartitions(AddDynamicPartitions rqst) + void addDynamicPartitions(AddDynamicPartitions rqst) throws NoSuchTxnException, TxnAbortedException, MetaException; /** @@ -235,14 +222,14 @@ public void addDynamicPartitions(AddDynamicPartitions rqst) * @throws MetaException */ @RetrySemantics.Idempotent - public void cleanupRecords(HiveObjectType type, Database db, Table table, + void cleanupRecords(HiveObjectType type, Database db, Table table, Iterator partitionIterator) throws MetaException; /** * Timeout transactions and/or locks. This should only be called by the compactor. */ @RetrySemantics.Idempotent - public void performTimeOuts(); + void performTimeOuts(); /** * This will look through the completed_txn_components table and look for partitions or tables @@ -254,7 +241,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, * or runAs set since these are only potential compactions not actual ones. */ @RetrySemantics.ReadOnly - public Set findPotentialCompactions(int maxAborted) throws MetaException; + Set findPotentialCompactions(int maxAborted) throws MetaException; /** * Sets the user to run as. This is for the case @@ -263,7 +250,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, * @param user user to run the jobs as */ @RetrySemantics.Idempotent - public void setRunAs(long cq_id, String user) throws MetaException; + void setRunAs(long cq_id, String user) throws MetaException; /** * This will grab the next compaction request off of @@ -272,7 +259,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, * @return an info element for this compaction request, or null if there is no work to do now. */ @RetrySemantics.ReadOnly - public CompactionInfo findNextToCompact(String workerId) throws MetaException; + CompactionInfo findNextToCompact(String workerId) throws MetaException; /** * This will mark an entry in the queue as compacted @@ -280,7 +267,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, * @param info info on the compaction entry to mark as compacted. */ @RetrySemantics.SafeToRetry - public void markCompacted(CompactionInfo info) throws MetaException; + void markCompacted(CompactionInfo info) throws MetaException; /** * Find entries in the queue that are ready to @@ -288,7 +275,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, * @return information on the entry in the queue. */ @RetrySemantics.ReadOnly - public List findReadyToClean() throws MetaException; + List findReadyToClean() throws MetaException; /** * This will remove an entry from the queue after @@ -297,7 +284,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, * @param info info on the compaction entry to remove */ @RetrySemantics.CannotRetry - public void markCleaned(CompactionInfo info) throws MetaException; + void markCleaned(CompactionInfo info) throws MetaException; /** * Mark a compaction entry as failed. This will move it to the compaction history queue with a @@ -307,7 +294,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, * @throws MetaException */ @RetrySemantics.CannotRetry - public void markFailed(CompactionInfo info) throws MetaException; + void markFailed(CompactionInfo info) throws MetaException; /** * Clean up aborted transactions from txns that have no components in txn_components. The reson such @@ -315,7 +302,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, * abandoned it w/o doing any work) or due to {@link #markCleaned(CompactionInfo)} being called. */ @RetrySemantics.SafeToRetry - public void cleanEmptyAbortedTxns() throws MetaException; + void cleanEmptyAbortedTxns() throws MetaException; /** * This will take all entries assigned to workers @@ -327,7 +314,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, * so that like hostname% will match the worker id. */ @RetrySemantics.Idempotent - public void revokeFromLocalWorkers(String hostname) throws MetaException; + void revokeFromLocalWorkers(String hostname) throws MetaException; /** * This call will return all compaction queue @@ -339,7 +326,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, * declared dead. */ @RetrySemantics.Idempotent - public void revokeTimedoutWorkers(long timeout) throws MetaException; + void revokeTimedoutWorkers(long timeout) throws MetaException; /** * Queries metastore DB directly to find columns in the table which have statistics information. @@ -348,13 +335,13 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, * @throws MetaException */ @RetrySemantics.ReadOnly - public List findColumnsWithStats(CompactionInfo ci) throws MetaException; + List findColumnsWithStats(CompactionInfo ci) throws MetaException; /** * Record the highest txn id that the {@code ci} compaction job will pay attention to. */ @RetrySemantics.Idempotent - public void setCompactionHighestTxnId(CompactionInfo ci, long highestTxnId) throws MetaException; + void setCompactionHighestTxnId(CompactionInfo ci, long highestTxnId) throws MetaException; /** * For any given compactable entity (partition, table if not partitioned) the history of compactions @@ -365,14 +352,14 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, * @throws MetaException */ @RetrySemantics.SafeToRetry - public void purgeCompactionHistory() throws MetaException; + void purgeCompactionHistory() throws MetaException; /** * WriteSet tracking is used to ensure proper transaction isolation. This method deletes the * transaction metadata once it becomes unnecessary. */ @RetrySemantics.SafeToRetry - public void performWriteSetGC(); + void performWriteSetGC(); /** * Determine if there are enough consecutive failures compacting a table or partition that no @@ -383,16 +370,16 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, * @throws MetaException */ @RetrySemantics.ReadOnly - public boolean checkFailedCompactions(CompactionInfo ci) throws MetaException; + boolean checkFailedCompactions(CompactionInfo ci) throws MetaException; @VisibleForTesting - public int numLocksInLockTable() throws SQLException, MetaException; + int numLocksInLockTable() throws SQLException, MetaException; @VisibleForTesting long setTimeout(long milliseconds); @RetrySemantics.Idempotent - public MutexAPI getMutexAPI(); + MutexAPI getMutexAPI(); /** * This is primarily designed to provide coarse grained mutex support to operations running @@ -401,12 +388,12 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, * * In RDMBS world each {@code LockHandle} uses a java.sql.Connection so use it sparingly. */ - public static interface MutexAPI { + interface MutexAPI { /** * The {@code key} is name of the lock. Will acquire and exclusive lock or block. It retuns * a handle which must be used to release the lock. Each invocation returns a new handle. */ - public LockHandle acquireLock(String key) throws MetaException; + LockHandle acquireLock(String key) throws MetaException; /** * Same as {@link #acquireLock(String)} but takes an already existing handle as input. This @@ -414,12 +401,12 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, * the same handle will be released together. * @param handle not NULL */ - public void acquireLock(String key, LockHandle handle) throws MetaException; - public static interface LockHandle { + void acquireLock(String key, LockHandle handle) throws MetaException; + interface LockHandle { /** * Releases all locks associated with this handle. */ - public void releaseLocks(); + void releaseLocks(); } } @@ -429,5 +416,5 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, * @param id {@link CompactionInfo#id} */ @RetrySemantics.Idempotent - public void setHadoopJobId(String hadoopJobId, long id); + void setHadoopJobId(String hadoopJobId, long id); } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java similarity index 91% rename from metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java index 30b155f3b3..7579ae8af2 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,17 +17,19 @@ */ package org.apache.hadoop.hive.metastore.txn; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ValidCompactorTxnList; import org.apache.hadoop.hive.common.ValidReadTxnList; import org.apache.hadoop.hive.common.ValidTxnList; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TxnInfo; import org.apache.hadoop.hive.metastore.api.TxnState; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -114,11 +116,10 @@ public static ValidTxnList createValidCompactTxnList(GetOpenTxnsInfoResponse txn * @param conf configuration * @return txn store */ - public static TxnStore getTxnStore(HiveConf conf) { - String className = conf.getVar(HiveConf.ConfVars.METASTORE_TXN_STORE_IMPL); + public static TxnStore getTxnStore(Configuration conf) { + String className = MetastoreConf.getVar(conf, ConfVars.TXN_STORE_IMPL); try { - TxnStore handler = ((Class) MetaStoreUtils.getClass( - className)).newInstance(); + TxnStore handler = JavaUtils.getClass(className, TxnStore.class).newInstance(); handler.setConf(conf); return handler; } catch (Exception e) { @@ -155,13 +156,13 @@ public static boolean isAcidTable(Table table) { * e.g. ( id in (1,2,3) OR id in (4,5,6) ) * @param notIn clause to be broken up is NOT IN */ - public static void buildQueryWithINClause(HiveConf conf, List queries, StringBuilder prefix, + public static void buildQueryWithINClause(Configuration conf, List queries, StringBuilder prefix, StringBuilder suffix, List inList, String inColumn, boolean addParens, boolean notIn) { if (inList == null || inList.size() == 0) { throw new IllegalArgumentException("The IN list is empty!"); } - int batchSize = conf.getIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE); + int batchSize = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE); int numWholeBatches = inList.size() / batchSize; StringBuilder buf = new StringBuilder(); buf.append(prefix); @@ -233,8 +234,8 @@ public static void buildQueryWithINClause(HiveConf conf, List queries, S } /** Estimate if the size of a string will exceed certain limit */ - private static boolean needNewQuery(HiveConf conf, StringBuilder sb) { - int queryMemoryLimit = conf.getIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH); + private static boolean needNewQuery(Configuration conf, StringBuilder sb) { + int queryMemoryLimit = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH); // http://www.javamex.com/tutorials/memory/string_memory_usage.shtml long sizeInBytes = 8 * (((sb.length() * 2) + 45) / 8); return sizeInBytes / 1024 > queryMemoryLimit; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java index 81f8a8518d..40f739301e 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java @@ -17,7 +17,16 @@ */ package org.apache.hadoop.hive.metastore.utils; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.InetAddress; +import java.net.UnknownHostException; + public class JavaUtils { + public static final Logger LOG = LoggerFactory.getLogger(JavaUtils.class); + /** * Standard way of getting classloader in Hive code (outside of Hadoop). * @@ -34,4 +43,41 @@ public static ClassLoader getClassLoader() { } return classLoader; } + + @SuppressWarnings(value = "unchecked") + public static Class getClass(String className, Class clazz) + throws MetaException { + try { + return (Class) Class.forName(className, true, getClassLoader()); + } catch (ClassNotFoundException e) { + throw new MetaException(className + " class not found"); + } + } + + /** + * @return name of current host + */ + public static String hostname() { + try { + return InetAddress.getLocalHost().getHostName(); + } catch (UnknownHostException e) { + LOG.error("Unable to resolve my host name " + e.getMessage()); + throw new RuntimeException(e); + } + } + + /** + * Utility method for ACID to normalize logging info. Matches + * org.apache.hadoop.hive.metastore.api.LockRequest#toString + */ + public static String lockIdToString(long extLockId) { + return "lockid:" + extLockId; + } + /** + * Utility method for ACID to normalize logging info. Matches + * org.apache.hadoop.hive.metastore.api.LockResponse#toString + */ + public static String txnIdToString(long txnId) { + return "txnid:" + txnId; + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/StringableMap.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/StringableMap.java new file mode 100644 index 0000000000..b3f1749763 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/StringableMap.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.utils; + + +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +/** + * A utility class that can convert a HashMap of Properties into a colon separated string, + * and can take the same format of string and convert it to a HashMap of Properties. + */ +public class StringableMap extends HashMap { + + public StringableMap(String s) { + String[] parts = s.split(":", 2); + // read that many chars + int numElements = Integer.parseInt(parts[0]); + s = parts[1]; + for (int i = 0; i < numElements; i++) { + parts = s.split(":", 2); + int len = Integer.parseInt(parts[0]); + String key = null; + if (len > 0) key = parts[1].substring(0, len); + parts = parts[1].substring(len).split(":", 2); + len = Integer.parseInt(parts[0]); + String value = null; + if (len > 0) value = parts[1].substring(0, len); + s = parts[1].substring(len); + put(key, value); + } + } + + public StringableMap(Map m) { + super(m); + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append(size()); + buf.append(':'); + if (size() > 0) { + for (Map.Entry entry : entrySet()) { + int length = (entry.getKey() == null) ? 0 : entry.getKey().length(); + buf.append(entry.getKey() == null ? 0 : length); + buf.append(':'); + if (length > 0) buf.append(entry.getKey()); + length = (entry.getValue() == null) ? 0 : entry.getValue().length(); + buf.append(length); + buf.append(':'); + if (length > 0) buf.append(entry.getValue()); + } + } + return buf.toString(); + } + + public Properties toProperties() { + Properties props = new Properties(); + props.putAll(this); + return props; + } +} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/datasource/TestDataSourceProviderFactory.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/datasource/TestDataSourceProviderFactory.java similarity index 79% rename from metastore/src/test/org/apache/hadoop/hive/metastore/datasource/TestDataSourceProviderFactory.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/datasource/TestDataSourceProviderFactory.java index daea544c71..3f21ed15d4 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/datasource/TestDataSourceProviderFactory.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/datasource/TestDataSourceProviderFactory.java @@ -19,7 +19,9 @@ import com.jolbox.bonecp.BoneCPDataSource; import com.zaxxer.hikari.HikariDataSource; -import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -29,13 +31,13 @@ public class TestDataSourceProviderFactory { - private HiveConf conf; + private Configuration conf; @Before public void init() { - conf = new HiveConf(); - conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, "dummyUser"); - conf.setVar(HiveConf.ConfVars.METASTOREPWD, "dummyPass"); + conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setVar(conf, ConfVars.CONNECTION_USER_NAME, "dummyUser"); + MetastoreConf.setVar(conf, ConfVars.PWD, "dummyPass"); } @Test @@ -44,7 +46,7 @@ public void testNoDataSourceCreatedWithoutProps() throws SQLException { DataSourceProvider dsp = DataSourceProviderFactory.getDataSourceProvider(conf); Assert.assertNull(dsp); - conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP); + MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP); dsp = DataSourceProviderFactory.getDataSourceProvider(conf); Assert.assertNull(dsp); @@ -53,7 +55,7 @@ public void testNoDataSourceCreatedWithoutProps() throws SQLException { @Test public void testCreateBoneCpDataSource() throws SQLException { - conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP); + MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP); conf.set(BoneCPDataSourceProvider.BONECP + ".firstProp", "value"); conf.set(BoneCPDataSourceProvider.BONECP + ".secondProp", "value"); @@ -67,7 +69,7 @@ public void testCreateBoneCpDataSource() throws SQLException { @Test public void testSetBoneCpStringProperty() throws SQLException { - conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP); + MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP); conf.set(BoneCPDataSourceProvider.BONECP + ".initSQL", "select 1 from dual"); DataSourceProvider dsp = DataSourceProviderFactory.getDataSourceProvider(conf); @@ -81,7 +83,7 @@ public void testSetBoneCpStringProperty() throws SQLException { @Test public void testSetBoneCpNumberProperty() throws SQLException { - conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP); + MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP); conf.set(BoneCPDataSourceProvider.BONECP + ".acquireRetryDelayInMs", "599"); DataSourceProvider dsp = DataSourceProviderFactory.getDataSourceProvider(conf); @@ -95,7 +97,7 @@ public void testSetBoneCpNumberProperty() throws SQLException { @Test public void testSetBoneCpBooleanProperty() throws SQLException { - conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP); + MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP); conf.set(BoneCPDataSourceProvider.BONECP + ".disableJMX", "true"); DataSourceProvider dsp = DataSourceProviderFactory.getDataSourceProvider(conf); @@ -109,7 +111,7 @@ public void testSetBoneCpBooleanProperty() throws SQLException { @Test public void testCreateHikariCpDataSource() throws SQLException { - conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, HikariCPDataSourceProvider.HIKARI); + MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, HikariCPDataSourceProvider.HIKARI); // This is needed to prevent the HikariDataSource from trying to connect to the DB conf.set(HikariCPDataSourceProvider.HIKARI + ".initializationFailTimeout", "-1"); @@ -123,7 +125,7 @@ public void testCreateHikariCpDataSource() throws SQLException { @Test public void testSetHikariCpStringProperty() throws SQLException { - conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, HikariCPDataSourceProvider.HIKARI); + MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, HikariCPDataSourceProvider.HIKARI); conf.set(HikariCPDataSourceProvider.HIKARI + ".connectionInitSql", "select 1 from dual"); conf.set(HikariCPDataSourceProvider.HIKARI + ".initializationFailTimeout", "-1"); @@ -138,7 +140,7 @@ public void testSetHikariCpStringProperty() throws SQLException { @Test public void testSetHikariCpNumberProperty() throws SQLException { - conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, HikariCPDataSourceProvider.HIKARI); + MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, HikariCPDataSourceProvider.HIKARI); conf.set(HikariCPDataSourceProvider.HIKARI + ".idleTimeout", "59999"); conf.set(HikariCPDataSourceProvider.HIKARI + ".initializationFailTimeout", "-1"); @@ -153,7 +155,7 @@ public void testSetHikariCpNumberProperty() throws SQLException { @Test public void testSetHikariCpBooleanProperty() throws SQLException { - conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, HikariCPDataSourceProvider.HIKARI); + MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, HikariCPDataSourceProvider.HIKARI); conf.set(HikariCPDataSourceProvider.HIKARI + ".allowPoolSuspension", "false"); conf.set(HikariCPDataSourceProvider.HIKARI + ".initializationFailTimeout", "-1"); @@ -164,10 +166,5 @@ public void testSetHikariCpBooleanProperty() throws SQLException { Assert.assertTrue(ds instanceof HikariDataSource); Assert.assertEquals(false, ((HikariDataSource)ds).isAllowPoolSuspension()); } - @Test(expected = IllegalArgumentException.class) - public void testBoneCPConfigCannotBeSet() { - conf.addToRestrictList(BoneCPDataSourceProvider.BONECP); - conf.verifyAndSet(BoneCPDataSourceProvider.BONECP + ".disableJMX", "true"); - } } diff --git metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNegative.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNegative.java similarity index 87% rename from metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNegative.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNegative.java index 639669ebe7..d8640b584b 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNegative.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNegative.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,9 +17,10 @@ */ package org.apache.hadoop.hive.metastore.txn; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.conf.HiveConf; import org.junit.Test; import static junit.framework.Assert.assertNotNull; @@ -35,8 +36,8 @@ */ @Test public void testBadConnection() throws Exception { - HiveConf conf = new HiveConf(); - conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, "blah"); + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTURLKEY, "blah"); RuntimeException e = null; try { TxnUtils.getTxnStore(conf); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnUtils.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/txn/TestTxnUtils.java similarity index 97% rename from metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnUtils.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/txn/TestTxnUtils.java index 1497c00e5d..674084d093 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnUtils.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/txn/TestTxnUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,9 +17,11 @@ */ package org.apache.hadoop.hive.metastore.txn; -import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.DatabaseProduct; import org.apache.hadoop.hive.metastore.tools.SQLGenerator; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -35,14 +37,14 @@ * Tests for TxnUtils */ public class TestTxnUtils { - private HiveConf conf; + private Configuration conf; public TestTxnUtils() throws Exception { } @Test public void testBuildQueryWithINClause() throws Exception { - List queries = new ArrayList(); + List queries = new ArrayList<>(); StringBuilder prefix = new StringBuilder(); StringBuilder suffix = new StringBuilder(); @@ -53,9 +55,9 @@ public void testBuildQueryWithINClause() throws Exception { // Case 1 - Max in list members: 10; Max query string length: 1KB // The first query happens to have 2 full batches. - conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH, 1); - conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE, 10); - List inList = new ArrayList(); + MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH, 1); + MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE, 10); + List inList = new ArrayList<>(); for (long i = 1; i <= 200; i++) { inList.add(i); } @@ -72,8 +74,8 @@ public void testBuildQueryWithINClause() throws Exception { runAgainstDerby(queries); // Case 3.1 - Max in list members: 1000, Max query string length: 1KB, and exact 1000 members in a single IN clause - conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH, 1); - conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE, 1000); + MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH, 1); + MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE, 1000); queries.clear(); for (long i = 202; i <= 1000; i++) { inList.add(i); @@ -83,8 +85,8 @@ public void testBuildQueryWithINClause() throws Exception { runAgainstDerby(queries); // Case 3.2 - Max in list members: 1000, Max query string length: 10KB, and exact 1000 members in a single IN clause - conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH, 10); - conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE, 1000); + MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH, 10); + MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE, 1000); queries.clear(); TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, inList, "TXN_ID", true, false); Assert.assertEquals(1, queries.size()); @@ -94,12 +96,12 @@ public void testBuildQueryWithINClause() throws Exception { for (long i = 1001; i <= 2000; i++) { inList.add(i); } - conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH, 1); + MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH, 1); queries.clear(); TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, inList, "TXN_ID", true, false); Assert.assertEquals(2, queries.size()); runAgainstDerby(queries); - conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH, 10); + MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH, 10); queries.clear(); TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, inList, "TXN_ID", true, false); Assert.assertEquals(1, queries.size()); @@ -138,7 +140,7 @@ private void runAgainstDerby(List queries) throws Exception { ResultSet rs = null; try { - conn = TxnDbUtil.getConnection(); + conn = TxnDbUtil.getConnection(conf); stmt = conn.createStatement(); for (String query : queries) { rs = stmt.executeQuery(query); @@ -165,7 +167,7 @@ public void testSQLGenerator() throws Exception { Assert.assertEquals("Wrong stmt", "insert all into colors(name, category) values('yellow', 1) into colors(name, category) values('red', 2) into colors(name, category) values('orange', 3) select * from dual", sql.get(0)); - for(int i = 0; i < conf.getIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE); i++) { + for(int i = 0; i < MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE); i++) { rows.add("\'G\'," + i); } sql = sqlGenerator.createInsertValuesStmt("colors(name, category)", rows); @@ -185,7 +187,7 @@ public void testSQLGenerator() throws Exception { sql = sqlGenerator.createInsertValuesStmt("colors(name, category)", rows); Assert.assertEquals("Number of stmts", 1, sql.size()); Assert.assertEquals("Wrong stmt", "insert into colors(name, category) values('yellow', 1),('red', 2),('orange', 3)", sql.get(0)); - for(int i = 0; i < conf.getIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE); i++) { + for(int i = 0; i < MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE); i++) { rows.add("\'G\'," + i); } sql = sqlGenerator.createInsertValuesStmt("colors(name, category)", rows); @@ -202,14 +204,13 @@ public void testSQLGenerator() throws Exception { @Before public void setUp() throws Exception { - tearDown(); - conf = new HiveConf(this.getClass()); + conf = MetastoreConf.newMetastoreConf(); TxnDbUtil.setConfValues(conf); - TxnDbUtil.prepDb(); + TxnDbUtil.prepDb(conf); } @After public void tearDown() throws Exception { - TxnDbUtil.cleanDb(); + TxnDbUtil.cleanDb(conf); } } diff --git storage-api/pom.xml storage-api/pom.xml index 199acec715..807cb2c741 100644 --- storage-api/pom.xml +++ storage-api/pom.xml @@ -31,6 +31,7 @@ 2.6 + 1.1.3 14.0.1 2.8.1 4.11 @@ -115,6 +116,12 @@ ${junit.version} test + + commons-logging + commons-logging + ${commons-logging.version} + test + diff --git common/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java storage-api/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java similarity index 99% rename from common/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java rename to storage-api/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java index eaa0b34370..94b8c58d28 100644 --- common/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java +++ storage-api/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,7 +64,7 @@ public ValidCompactorTxnList(long[] abortedTxnList, BitSet abortedBits, long hig else { lastElementPos = idx; } - /** + /* * ensure that we throw out any exceptions above highWatermark to make * {@link #isTxnValid(long)} faster */ diff --git common/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java similarity index 99% rename from common/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java rename to storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java index 002afd6ab5..ccdd4b727a 100644 --- common/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java +++ storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,8 +18,6 @@ package org.apache.hadoop.hive.common; -import com.google.common.annotations.VisibleForTesting; - import java.util.Arrays; import java.util.BitSet; diff --git common/src/java/org/apache/hadoop/hive/common/ValidTxnList.java storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnList.java similarity index 100% rename from common/src/java/org/apache/hadoop/hive/common/ValidTxnList.java rename to storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnList.java diff --git metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestValidCompactorTxnList.java storage-api/src/test/org/apache/hadoop/hive/common/TestValidCompactorTxnList.java similarity index 99% rename from metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestValidCompactorTxnList.java rename to storage-api/src/test/org/apache/hadoop/hive/common/TestValidCompactorTxnList.java index eb88e32d66..68c00b958e 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestValidCompactorTxnList.java +++ storage-api/src/test/org/apache/hadoop/hive/common/TestValidCompactorTxnList.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.txn; +package org.apache.hadoop.hive.common; import org.apache.hadoop.hive.common.ValidCompactorTxnList; import org.apache.hadoop.hive.common.ValidTxnList; diff --git common/src/test/org/apache/hadoop/hive/common/TestValidReadTxnList.java storage-api/src/test/org/apache/hadoop/hive/common/TestValidReadTxnList.java similarity index 100% rename from common/src/test/org/apache/hadoop/hive/common/TestValidReadTxnList.java rename to storage-api/src/test/org/apache/hadoop/hive/common/TestValidReadTxnList.java