diff --git common/src/java/org/apache/hadoop/hive/common/JavaUtils.java common/src/java/org/apache/hadoop/hive/common/JavaUtils.java index f4ebd3bd7a..8b1bbaa2d5 100644 --- common/src/java/org/apache/hadoop/hive/common/JavaUtils.java +++ common/src/java/org/apache/hadoop/hive/common/JavaUtils.java @@ -39,6 +39,7 @@ */ public final class JavaUtils { + public static final String BASE_PREFIX = "base"; public static final String DELTA_PREFIX = "delta"; public static final String DELTA_DIGITS = "%07d"; public static final int DELTA_DIGITS_LEN = 7; @@ -167,8 +168,8 @@ private JavaUtils() { public static Long extractTxnId(Path file) { String fileName = file.getName(); - String[] parts = fileName.split("_", 4); // e.g. delta_0000001_0000001_0000 - if (parts.length < 4 || !DELTA_PREFIX.equals(parts[0])) { + String[] parts = fileName.split("_", 4); // e.g. delta_0000001_0000001_0000 or base_0000022 + if (parts.length < 2 || !(DELTA_PREFIX.equals(parts[0]) || BASE_PREFIX.equals(parts[0]))) { LOG.debug("Cannot extract transaction ID for a MM table: " + file + " (" + Arrays.toString(parts) + ")"); return null; @@ -185,20 +186,31 @@ public static Long extractTxnId(Path file) { } public static class IdPathFilter implements PathFilter { - private final String mmDirName; + private String mmDirName; private final boolean isMatch, isIgnoreTemp, isPrefix; + public IdPathFilter(long writeId, int stmtId, boolean isMatch) { - this(writeId, stmtId, isMatch, false); + this(writeId, stmtId, isMatch, false, false); } public IdPathFilter(long writeId, int stmtId, boolean isMatch, boolean isIgnoreTemp) { - String mmDirName = DELTA_PREFIX + "_" + String.format(DELTA_DIGITS, writeId) + "_" + - String.format(DELTA_DIGITS, writeId) + "_"; - if (stmtId >= 0) { - mmDirName += String.format(STATEMENT_DIGITS, stmtId); - isPrefix = false; + this(writeId, stmtId, isMatch, isIgnoreTemp, false); + } + public IdPathFilter(long writeId, int stmtId, boolean isMatch, boolean isIgnoreTemp, boolean isBaseDir) { + String mmDirName = null; + if (!isBaseDir) { + mmDirName = DELTA_PREFIX + "_" + String.format(DELTA_DIGITS, writeId) + "_" + + String.format(DELTA_DIGITS, writeId) + "_"; + if (stmtId >= 0) { + mmDirName += String.format(STATEMENT_DIGITS, stmtId); + isPrefix = false; + } else { + isPrefix = true; + } } else { - isPrefix = true; + mmDirName = BASE_PREFIX + "_" + String.format(DELTA_DIGITS, writeId); + isPrefix = false; } + this.mmDirName = mmDirName; this.isMatch = isMatch; this.isIgnoreTemp = isIgnoreTemp; diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 0cc8de0e66..ab5657f42d 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1828,7 +1828,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "filter operators."), // Concurrency - HIVE_SUPPORT_CONCURRENCY("hive.support.concurrency", false, + HIVE_SUPPORT_CONCURRENCY("hive.support.concurrency", true, "Whether Hive supports concurrency control or not. \n" + "A ZooKeeper instance must be up and running when using zookeeper Hive lock manager "), HIVE_LOCK_MANAGER("hive.lock.manager", "org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager", ""), @@ -1877,7 +1877,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal // Transactions HIVE_TXN_MANAGER("hive.txn.manager", - "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager", + "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager", "Set to org.apache.hadoop.hive.ql.lockmgr.DbTxnManager as part of turning on Hive\n" + "transactions, which also requires appropriate settings for hive.compactor.initiator.on,\n" + "hive.compactor.worker.threads, hive.support.concurrency (true),\n" + @@ -2961,7 +2961,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal + "When it is set to false, only [a-zA-Z_0-9]+ are supported.\n" + "The only supported special character right now is '/'. This flag applies only to quoted table names.\n" + "The default value is true."), - HIVE_CREATE_TABLES_AS_INSERT_ONLY("hive.create.as.insert.only", false, + HIVE_CREATE_TABLES_AS_INSERT_ONLY("hive.create.as.insert.only", true, "Whether the eligible tables should be created as ACID insert-only by default. Does \n" + "not apply to external tables, the ones using storage handlers, etc."), // role names are case-insensitive diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java index 80c5d63e64..762594dbfd 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java @@ -98,7 +98,6 @@ protected void setUp() throws Exception { hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); hcatConf.setTimeVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 60, TimeUnit.SECONDS); - hcatConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); clientWH = new Warehouse(hcatConf); msc = new HiveMetaStoreClient(hcatConf); System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java index 1606982574..0f5b45dcfd 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java @@ -43,7 +43,6 @@ protected void setUp() throws Exception { HiveConf hcatConf = new HiveConf(this.getClass()); hcatConf.set(ConfVars.PREEXECHOOKS.varname, ""); hcatConf.set(ConfVars.POSTEXECHOOKS.varname, ""); - hcatConf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); hcatConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); hcatDriver = new Driver(hcatConf); diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java index c77bc48e6c..1e9e6f0d60 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java @@ -235,8 +235,6 @@ public LocalMetaServer() { HCatSemanticAnalyzer.class.getName()); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, - "false"); System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); } diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/data/HCatDataCheckUtil.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/data/HCatDataCheckUtil.java index ff56234cc1..9ffa88bc8b 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/data/HCatDataCheckUtil.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/data/HCatDataCheckUtil.java @@ -47,7 +47,7 @@ public static Driver instantiateDriver(MiniCluster cluster) { } hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + // hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); LOG.debug("Hive conf : {}", hiveConf.getAllProperties()); Driver driver = new Driver(hiveConf); diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatBaseTest.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatBaseTest.java index f78eb15622..f64dffaa0f 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatBaseTest.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatBaseTest.java @@ -79,7 +79,7 @@ protected void setUpHiveConf() { hiveConf = new HiveConf(this.getClass()); hiveConf.setVar(HiveConf.ConfVars.PREEXECHOOKS, ""); hiveConf.setVar(HiveConf.ConfVars.POSTEXECHOOKS, ""); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + // hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); hiveConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, TEST_WAREHOUSE_DIR); hiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, true); diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java index ba42ffd627..2b54342162 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java @@ -189,7 +189,6 @@ private static void initializeSetup() throws Exception { HCatSemanticAnalyzer.class.getName()); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java index a000642bf3..ea74289beb 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java @@ -123,8 +123,6 @@ public static void setup() throws Exception { HCatSemanticAnalyzer.class.getName()); hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, - "false"); msc = new HiveMetaStoreClient(hcatConf); System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java index 31857bf643..b2a3448b89 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java @@ -62,7 +62,6 @@ public void Initialize() throws Exception { hiveConf = new HiveConf(this.getClass()); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, diff --git hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java index 4a6c6a3a6f..daa09e66f0 100644 --- hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java +++ hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java @@ -86,7 +86,6 @@ public void setUp() throws Exception { HiveConf hiveConf = new HiveConf(this.getClass()); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, diff --git hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java index ea9cdda31c..5652929b85 100644 --- hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java +++ hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java @@ -120,7 +120,6 @@ public static void setUpBeforeClass() throws Exception { HiveConf hiveConf = new HiveConf(TestHCatLoaderComplexSchema.class); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java index 496f3c85da..ca2ec60759 100644 --- hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java +++ hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java @@ -163,7 +163,6 @@ public void setup() throws Exception { HiveConf hiveConf = new HiveConf(this.getClass()); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, diff --git hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java index 40ea923858..734547337e 100644 --- hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java +++ hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java @@ -107,7 +107,6 @@ public void setUp() throws Exception { HiveConf hiveConf = new HiveConf(this.getClass()); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); hiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); hiveConf diff --git hcatalog/server-extensions/src/test/java/org/apache/hive/hcatalog/listener/TestMsgBusConnection.java hcatalog/server-extensions/src/test/java/org/apache/hive/hcatalog/listener/TestMsgBusConnection.java index 9f7c207214..a8b70ecbcc 100644 --- hcatalog/server-extensions/src/test/java/org/apache/hive/hcatalog/listener/TestMsgBusConnection.java +++ hcatalog/server-extensions/src/test/java/org/apache/hive/hcatalog/listener/TestMsgBusConnection.java @@ -69,7 +69,6 @@ protected void setUp() throws Exception { hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); hiveConf.set(HCatConstants.HCAT_MSGBUS_TOPIC_PREFIX, "planetlab.hcat"); diff --git hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java index 49aad392d8..4dc9f1cfbc 100644 --- hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java +++ hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java @@ -206,7 +206,6 @@ public TestStreaming() throws Exception { conf.setVar(HiveConf.ConfVars.METASTOREURIS, metaStoreURI); } conf.setBoolVar(HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI, true); - conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, true); dbFolder.create(); diff --git hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java index 78e767e7fc..19f64c8529 100644 --- hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java +++ hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java @@ -128,8 +128,6 @@ public static void startMetaStoreServer() throws Exception { HCatSemanticAnalyzer.class.getName()); hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, - "false"); System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); } diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/ManyMiniCluster.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/ManyMiniCluster.java index ad44bc2d62..90607d332f 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/ManyMiniCluster.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/ManyMiniCluster.java @@ -299,7 +299,7 @@ private void setUpMetastore() throws Exception { //is present only in the ql/test directory hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + // hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); hiveConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:" + new File(workDir + "/metastore_db") + ";create=true"); hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.toString(), diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java index 120b4af826..d1da0f3f75 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java @@ -78,7 +78,6 @@ public void Initialize() throws Exception { URI fsuri = getFileSystem().getUri(); Path whPath = new Path(fsuri.getScheme(), fsuri.getAuthority(), getTestDir()); - hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); hcatConf.set(ConfVars.METASTOREWAREHOUSE.varname, whPath.toString()); diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java index 308ab0d36a..8f9896c3f9 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java @@ -237,7 +237,6 @@ public static void connectToMetastore() throws Exception { DbNotificationListener.class.getName()); conf.setVar(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS, MockMetaStoreEventListener.class.getName()); conf.setVar(HiveConf.ConfVars.METASTORE_EVENT_DB_LISTENER_TTL, String.valueOf(EVENTS_TTL) + "s"); - conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setBoolVar(HiveConf.ConfVars.FIRE_EVENTS_FOR_DML, true); conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); conf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, DummyRawStoreFailEvent.class.getName()); diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java index d2dbe8f287..9db93944d5 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java @@ -74,7 +74,6 @@ public void setup() throws Exception { hiveConf = new HiveConf(this.getClass()); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, warehouseDir); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, diff --git itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/JdbcWithMiniKdcSQLAuthTest.java itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/JdbcWithMiniKdcSQLAuthTest.java index 0f0ae547f3..eef43744a2 100644 --- itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/JdbcWithMiniKdcSQLAuthTest.java +++ itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/JdbcWithMiniKdcSQLAuthTest.java @@ -55,7 +55,7 @@ public static void beforeTestBase() throws Exception { hiveConf.setVar(ConfVars.HIVE_AUTHENTICATOR_MANAGER, SessionStateUserAuthenticator.class.getName()); hiveConf.setBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED, true); - hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + // hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); hiveConf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); miniHiveKdc = MiniHiveKdc.getMiniHiveKdc(hiveConf); diff --git itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java index 1cd0ee8842..c968d0c825 100644 --- itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java +++ itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java @@ -54,7 +54,6 @@ public static void setUpBeforeClass() throws Exception { confOverlay.put(ConfVars.PREEXECHOOKS.varname, PreExecHook.class.getName()); confOverlay.put(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, SemanticAnalysisHook.class.getName()); - confOverlay.put(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "" + Boolean.FALSE); HiveConf hiveConf = new HiveConf(); miniHiveKdc = MiniHiveKdc.getMiniHiveKdc(hiveConf); miniHS2 = MiniHiveKdc.getMiniHS2WithKerb(miniHiveKdc, hiveConf); diff --git itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java index 5e70d68803..ca104d8af9 100644 --- itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java +++ itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java @@ -55,7 +55,6 @@ public static void beforeTest() throws Exception { // set a small time unit as cookie max age so that the server sends a 401 hiveConf.setTimeVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_COOKIE_MAX_AGE, 1, TimeUnit.SECONDS); - hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); miniHiveKdc = MiniHiveKdc.getMiniHiveKdc(hiveConf); miniHS2 = MiniHiveKdc.getMiniHS2WithKerb(miniHiveKdc, hiveConf); miniHS2.start(new HashMap()); diff --git itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestSSLWithMiniKdc.java itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestSSLWithMiniKdc.java index 3153b9f9c0..cdbf6bd075 100644 --- itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestSSLWithMiniKdc.java +++ itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestSSLWithMiniKdc.java @@ -47,7 +47,6 @@ public static void beforeTest() throws Exception { HiveConf hiveConf = new HiveConf(); SSLTestUtils.setMetastoreSslConf(hiveConf); - hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); miniHiveKdc = MiniHiveKdc.getMiniHiveKdc(hiveConf); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java index 7f2517b1b8..3fe14c206b 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java @@ -146,7 +146,6 @@ public static void setUpBeforeClass() throws Exception { PostExecHook.class.getName()); hiveConf.setVar(ConfVars.SEMANTIC_ANALYZER_HOOK, SemanticAnalysisHook.class.getName()); - hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); hiveServer2 = new HiveServer2(); hiveServer2.init(hiveConf); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java index ebdfab2b5f..41675b7977 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java @@ -64,7 +64,7 @@ protected static void setup() throws Exception { hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + // hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); msc = new HiveMetaStoreClient(hiveConf); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java index 6a2bbd2841..8bb994cfb4 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java @@ -171,7 +171,6 @@ public static void setUp() throws Exception { hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); hiveConf.setVar(ConfVars.METASTORE_FILTER_HOOK, DummyMetaStoreFilterHookImpl.class.getName()); UtilsForTest.setNewDerbyDbLocation(hiveConf, TestFilterHooks.class.getSimpleName()); int port = MetaStoreTestUtils.findFreePort(); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreAlterColumnPar.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreAlterColumnPar.java index 569e932209..44e07eca14 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreAlterColumnPar.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreAlterColumnPar.java @@ -53,7 +53,6 @@ public static void startServices() throws Exception { HiveConf hiveConf = new HiveConf(); hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS, 2); hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS, 2); - hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); miniHS2 = new MiniHS2.Builder().withMiniMR().withRemoteMetastore().withConf(hiveConf).build(); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java index 3fa4b1eb28..06d19152c2 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java @@ -81,7 +81,6 @@ protected void setUp() throws Exception { hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); SessionState.start(new CliSessionState(hiveConf)); msc = new HiveMetaStoreClient(hiveConf); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java index 7cf351fb35..464a0eff93 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java @@ -52,7 +52,6 @@ protected void setUp() throws Exception { hiveConf = new HiveConf(this.getClass()); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); SessionState.start(new CliSessionState(hiveConf)); } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java index 27bc791c1f..0df3215acf 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java @@ -55,7 +55,6 @@ protected void setUp() throws Exception { hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); SessionState.start(new CliSessionState(hiveConf)); msc = new HiveMetaStoreClient(hiveConf); driver = new Driver(hiveConf); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java index 4df2f8661c..d45c51000f 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java @@ -104,7 +104,6 @@ protected void setUp() throws Exception { hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); SessionState.start(new CliSessionState(hiveConf)); msc = new HiveMetaStoreClient(hiveConf); driver = new Driver(hiveConf); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java index d9da42229d..30f8100449 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java @@ -58,7 +58,6 @@ protected void setUp() throws Exception { hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); SessionState.start(new CliSessionState(hiveConf)); msc = new HiveMetaStoreClient(hiveConf); driver = new Driver(hiveConf); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java index 9578ef9219..915ed4d124 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java @@ -48,7 +48,6 @@ protected void setUp() throws Exception { hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); SessionState.start(new CliSessionState(hiveConf)); msc = new HiveMetaStoreClient(hiveConf); driver = new Driver(hiveConf); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java index a899739f47..4b7b94be2f 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java @@ -46,7 +46,6 @@ public static void before() throws Exception { hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hiveConf.setBoolVar(HiveConf.ConfVars.METASTORE_METRICS, true); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java index 4ce27311d1..26653dd354 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java @@ -84,7 +84,6 @@ protected void setUp() throws Exception { MetaStoreTestUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge()); conf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); conf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - conf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, new URI(tmppath + "/warehouse").getPath()); // Initialize second mocked filesystem (implement only necessary stuff) diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java index e78318035a..276f5e36b5 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java @@ -68,7 +68,6 @@ public static void beforeTest() throws Exception { conf = new HiveConf(); DriverManager.setLoginTimeout(0); - conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setIntVar(HiveConf.ConfVars.METASTORE_LIMIT_PARTITION_REQUEST, PARTITION_REQUEST_LIMIT); conf.setBoolVar(HiveConf.ConfVars.METASTORE_INTEGER_JDO_PUSHDOWN, true); conf.setBoolVar(HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL, true); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java index 2f0efceaa9..b007c33205 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java @@ -80,7 +80,6 @@ protected void setUp() { } } - conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); // copy the test files into hadoop if required. int i = 0; diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java index 2170ca3706..a22beb5342 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java @@ -35,7 +35,6 @@ public void testHookLoading() throws Exception{ HiveConf conf = new HiveConf(this.getClass()); conf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, DummySemanticAnalyzerHook.class.getName()); - conf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); SessionState.start(conf); Driver driver = new Driver(conf); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 585411664c..2728eb9a59 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -146,7 +146,6 @@ public static void setUpBeforeClass() throws Exception { hconf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hconf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hconf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hconf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); hconf.set(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL.varname, "org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore"); hconf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, true); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java index cde7a3e33c..a4be42b88f 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java @@ -121,7 +121,7 @@ private void initialize(String cmRoot, String warehouseRoot, hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + // hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java index fa5ae809a8..2fd9f1ed96 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java @@ -81,7 +81,7 @@ public void setUp() throws Exception { clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + // clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java index 102af6a08a..63934fbe45 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java @@ -68,7 +68,6 @@ protected void setUp() throws Exception { clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java index 60273e7aad..dd2ad9d601 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java @@ -78,7 +78,6 @@ protected void setUp() throws Exception { clientHiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java index f0196c605c..24a3525ba6 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java @@ -108,7 +108,6 @@ protected void setUp() throws Exception { clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java index 62835b090c..90896e0fc5 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java @@ -67,7 +67,6 @@ public static void setUp() throws Exception { clientHiveConf = new HiveConf(); clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); SessionState.start(new CliSessionState(clientHiveConf)); msc = new HiveMetaStoreClient(clientHiveConf); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java index 19694b093e..30337510e6 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java @@ -97,7 +97,6 @@ public static void beforeTest() throws Exception { conf.setVar(ConfVars.HIVE_AUTHENTICATOR_MANAGER, SessionStateUserAuthenticator.class.getName()); conf.setBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED, true); conf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, true); conf.setVar(ConfVars.HIVE_TXN_MANAGER, DbTxnManager.class.getName()); conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java index 5922a8c603..758df3ab92 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java @@ -119,7 +119,6 @@ public static void beforeTest() throws Exception { conf.setVar(ConfVars.HIVE_AUTHENTICATOR_MANAGER, SessionStateUserAuthenticator.class.getName()); conf.setBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED, true); conf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); UtilsForTest.setNewDerbyDbLocation(conf, TestHiveAuthorizerShowFilters.class.getSimpleName()); SessionState.start(conf); diff --git itests/hive-unit/src/test/java/org/apache/hive/beeline/hs2connection/BeelineWithHS2ConnectionFileTestBase.java itests/hive-unit/src/test/java/org/apache/hive/beeline/hs2connection/BeelineWithHS2ConnectionFileTestBase.java index 2fb64536a6..3738aae3a7 100644 --- itests/hive-unit/src/test/java/org/apache/hive/beeline/hs2connection/BeelineWithHS2ConnectionFileTestBase.java +++ itests/hive-unit/src/test/java/org/apache/hive/beeline/hs2connection/BeelineWithHS2ConnectionFileTestBase.java @@ -161,7 +161,7 @@ public void before() throws Exception { hiveConf = new HiveConf(); miniHS2 = getNewMiniHS2(); confOverlay = new HashMap(); - confOverlay.put(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + // confOverlay.put(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); confOverlay.put(ConfVars.HIVE_SERVER2_TRANSPORT_MODE.varname, "binary"); } diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/AbstractJdbcTriggersTest.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/AbstractJdbcTriggersTest.java index 235e6c36ed..a94f883c84 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/AbstractJdbcTriggersTest.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/AbstractJdbcTriggersTest.java @@ -64,7 +64,7 @@ public static void beforeTest() throws Exception { System.out.println("Setting hive-site: " + HiveConf.getHiveSiteLocation()); conf = new HiveConf(); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + // conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); conf.setVar(ConfVars.HIVE_SERVER2_TEZ_DEFAULT_QUEUES, "default"); conf.setTimeVar(ConfVars.HIVE_TRIGGER_VALIDATION_INTERVAL_MS, 100, TimeUnit.MILLISECONDS); @@ -169,4 +169,4 @@ WMTrigger wmTriggerFromTrigger(Trigger trigger) { result.setActionExpression(trigger.getAction().toString()); return result; } -} \ No newline at end of file +} diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java index dd24f0261f..c97d0df636 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java @@ -82,12 +82,10 @@ private static HiveConf createHiveConf() { public static void beforeTest() throws Exception { Class.forName(MiniHS2.getJdbcDriverName()); conf = createHiveConf(); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); String dataFileDir = conf.get("test.data.files").replace('\\', '/') .replace("c:", ""); dataFilePath = new Path(dataFileDir, "kv1.txt"); DriverManager.setLoginTimeout(0); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); miniHS2 = new MiniHS2(conf, MiniClusterType.MR); Map overlayProps = new HashMap(); overlayProps.put(ConfVars.HIVE_SERVER2_SESSION_HOOK.varname, diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHA.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHA.java index 84644d1d89..5518c32ba7 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHA.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHA.java @@ -68,12 +68,10 @@ public void run(HiveSessionHookContext sessionHookContext) throws HiveSQLExcepti public static void beforeTest() throws Exception { Class.forName(MiniHS2.getJdbcDriverName()); conf = new HiveConf(); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); String dataFileDir = conf.get("test.data.files").replace('\\', '/') .replace("c:", ""); dataFilePath = new Path(dataFileDir, "kv1.txt"); DriverManager.setLoginTimeout(0); - conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); miniHS2 = new MiniHS2.Builder().withConf(conf).withMiniMR().withHA().build(); Map overlayProps = new HashMap(); overlayProps.put(ConfVars.HIVE_SERVER2_SESSION_HOOK.varname, diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java index f5ed735c1e..448f71278f 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java @@ -201,7 +201,6 @@ private static void startMiniHS2(HiveConf conf) throws Exception { } private static void startMiniHS2(HiveConf conf, boolean httpMode) throws Exception { - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setBoolVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED, false); conf.setBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER, false); MiniHS2.Builder builder = new MiniHS2.Builder().withConf(conf).cleanupLocalDirOnStartup(false); diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlap.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlap.java index 71aee8f959..0e7ba4b7bb 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlap.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlap.java @@ -116,7 +116,6 @@ public static void beforeTest() throws Exception { } conf = new HiveConf(); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); conf.addResource(new URL("file://" + new File(confDir).toURI().getPath() diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java index 2156f4b4de..bbb82c29ed 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java @@ -91,12 +91,10 @@ private static HiveConf createHiveConf() { public static void beforeTest() throws Exception { Class.forName(MiniHS2.getJdbcDriverName()); conf = createHiveConf(); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); String dataFileDir = conf.get("test.data.files").replace('\\', '/') .replace("c:", ""); dataFilePath = new Path(dataFileDir, "kv1.txt"); DriverManager.setLoginTimeout(0); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); miniHS2 = new MiniHS2(conf, MiniClusterType.MR); Map overlayProps = new HashMap(); overlayProps.put(ConfVars.HIVE_SERVER2_SESSION_HOOK.varname, diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestNoSaslAuth.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestNoSaslAuth.java index 84f1168b28..ef578d8844 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestNoSaslAuth.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestNoSaslAuth.java @@ -59,7 +59,6 @@ public void run(HiveSessionHookContext sessionHookContext) public static void beforeTest() throws Exception { Class.forName(MiniHS2.getJdbcDriverName()); HiveConf conf = new HiveConf(); - conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); conf.setVar(ConfVars.HIVE_SERVER2_SESSION_HOOK, NoSaslSessionHook.class.getName()); diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestServiceDiscoveryWithMiniHS2.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestServiceDiscoveryWithMiniHS2.java index dc59f4b7fb..842ab8abc3 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestServiceDiscoveryWithMiniHS2.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestServiceDiscoveryWithMiniHS2.java @@ -56,7 +56,6 @@ public static void beforeTest() throws Exception { zkServer = new TestingServer(); Class.forName(MiniHS2.getJdbcDriverName()); hiveConf = new HiveConf(); - hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); // Set up zookeeper dynamic service discovery configs enableZKServiceDiscoveryConfigs(hiveConf); dataFileDir = hiveConf.get("test.data.files").replace('\\', '/').replace("c:", ""); diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersMoveWorkloadManager.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersMoveWorkloadManager.java index a983855667..2c58ac51fc 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersMoveWorkloadManager.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersMoveWorkloadManager.java @@ -64,7 +64,6 @@ public static void beforeTest() throws Exception { System.out.println("Setting hive-site: " + HiveConf.getHiveSiteLocation()); conf = new HiveConf(); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); conf.setTimeVar(ConfVars.HIVE_TRIGGER_VALIDATION_INTERVAL_MS, 100, TimeUnit.MILLISECONDS); conf.setVar(ConfVars.HIVE_SERVER2_TEZ_INTERACTIVE_QUEUE, "default"); diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java index 0506f67621..aa1b8d2a27 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java @@ -49,7 +49,6 @@ public static void beforeTest() throws Exception { System.out.println("Setting hive-site: " + HiveConf.getHiveSiteLocation()); conf = new HiveConf(); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); conf.setTimeVar(ConfVars.HIVE_TRIGGER_VALIDATION_INTERVAL_MS, 100, TimeUnit.MILLISECONDS); conf.setVar(ConfVars.HIVE_SERVER2_TEZ_INTERACTIVE_QUEUE, "default"); diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestXSRFFilter.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestXSRFFilter.java index 88a403a0d5..ce6ea2c18c 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestXSRFFilter.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestXSRFFilter.java @@ -66,7 +66,6 @@ public static void afterClass() throws IOException { private void initHS2(boolean enableXSRFFilter) throws Exception { Class.forName(MiniHS2.getJdbcDriverName()); HiveConf conf = new HiveConf(); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); miniHS2 = new MiniHS2.Builder().withConf(conf).cleanupLocalDirOnStartup(false).build(); dataFileDir = conf.get("test.data.files").replace('\\', '/').replace("c:", ""); kvDataFilePath = new Path(dataFileDir, "kv1.txt"); diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestCLIAuthzSessionContext.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestCLIAuthzSessionContext.java index 9a44dfab1d..ab4dc70242 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestCLIAuthzSessionContext.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestCLIAuthzSessionContext.java @@ -63,7 +63,6 @@ public static void beforeTest() throws Exception { conf.setVar(ConfVars.HIVE_AUTHORIZATION_MANAGER, MockedHiveAuthorizerFactory.class.getName()); conf.setVar(ConfVars.HIVE_AUTHENTICATOR_MANAGER, SessionStateUserAuthenticator.class.getName()); conf.setBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED, true); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); // once SessionState for thread is set, CliDriver picks conf from it CliSessionState ss = new CliSessionState(conf); diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestHS2AuthzContext.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestHS2AuthzContext.java index 273ec36d81..fc3e63a40b 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestHS2AuthzContext.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestHS2AuthzContext.java @@ -78,7 +78,6 @@ public static void beforeTest() throws Exception { conf.setVar(ConfVars.HIVE_AUTHORIZATION_MANAGER, MockedHiveAuthorizerFactory.class.getName()); conf.setVar(ConfVars.HIVE_AUTHENTICATOR_MANAGER, SessionStateUserAuthenticator.class.getName()); conf.setBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED, true); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); miniHS2 = new MiniHS2(conf); diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestHS2AuthzSessionContext.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestHS2AuthzSessionContext.java index eb29e228e1..622fb23c1a 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestHS2AuthzSessionContext.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestHS2AuthzSessionContext.java @@ -64,7 +64,6 @@ public static void beforeTest() throws Exception { conf.setVar(ConfVars.HIVE_AUTHORIZATION_MANAGER, MockedHiveAuthorizerFactory.class.getName()); conf.setVar(ConfVars.HIVE_AUTHENTICATOR_MANAGER, SessionStateUserAuthenticator.class.getName()); conf.setBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED, true); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); miniHS2 = new MiniHS2(conf); diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcMetadataApiAuth.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcMetadataApiAuth.java index 692bfa0d89..64ecbdc567 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcMetadataApiAuth.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcMetadataApiAuth.java @@ -106,7 +106,6 @@ public static void beforeTest() throws Exception { conf.setVar(ConfVars.HIVE_AUTHORIZATION_MANAGER, TestAuthorizerFactory.class.getName()); conf.setVar(ConfVars.HIVE_AUTHENTICATOR_MANAGER, SessionStateUserAuthenticator.class.getName()); conf.setBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED, true); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); miniHS2 = new MiniHS2(conf); diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcWithSQLAuthUDFBlacklist.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcWithSQLAuthUDFBlacklist.java index 5070c765c9..a2e6c4a8d0 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcWithSQLAuthUDFBlacklist.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcWithSQLAuthUDFBlacklist.java @@ -48,7 +48,6 @@ public void startHS2(HiveConf conf) throws Exception { conf.setVar(ConfVars.HIVE_AUTHORIZATION_MANAGER, SQLStdHiveAuthorizerFactory.class.getName()); conf.setVar(ConfVars.HIVE_AUTHENTICATOR_MANAGER, SessionStateUserAuthenticator.class.getName()); conf.setBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED, true); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); miniHS2 = new MiniHS2(conf); diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcWithSQLAuthorization.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcWithSQLAuthorization.java index 5e653ec75c..b5656a009f 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcWithSQLAuthorization.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestJdbcWithSQLAuthorization.java @@ -50,7 +50,6 @@ public static void beforeTest() throws Exception { conf.setVar(ConfVars.HIVE_AUTHORIZATION_MANAGER, SQLStdHiveAuthorizerFactory.class.getName()); conf.setVar(ConfVars.HIVE_AUTHENTICATOR_MANAGER, SessionStateUserAuthenticator.class.getName()); conf.setBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED, true); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); miniHS2 = new MiniHS2(conf); diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/StartMiniHS2Cluster.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/StartMiniHS2Cluster.java index d176db4279..d3894411bf 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/StartMiniHS2Cluster.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/StartMiniHS2Cluster.java @@ -61,7 +61,7 @@ public void testRunCluster() throws Exception { break; } HiveConf conf = new HiveConf(); - conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + // conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setBoolVar(HiveConf.ConfVars.HIVE_RPC_QUERY_PLAN, true); for (; idx < confFiles.length; ++idx) { diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHiveServer2.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHiveServer2.java index d8c6beaee4..4218f99fc8 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHiveServer2.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHiveServer2.java @@ -45,7 +45,6 @@ public static void beforeTest() throws Exception { miniHS2 = new MiniHS2(new HiveConf()); confOverlay = new HashMap(); - confOverlay.put(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); miniHS2.start(confOverlay); } diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHiveServer2SessionTimeout.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHiveServer2SessionTimeout.java index c4da73e208..d44e3c68ba 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHiveServer2SessionTimeout.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHiveServer2SessionTimeout.java @@ -49,7 +49,6 @@ public static void beforeTest() throws Exception { @Before public void setUp() throws Exception { confOverlay = new HashMap(); - confOverlay.put(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); confOverlay.put(ConfVars.HIVE_SERVER2_SESSION_CHECK_INTERVAL.varname, "3s"); confOverlay.put(ConfVars.HIVE_SERVER2_IDLE_OPERATION_TIMEOUT.varname, "3s"); miniHS2.start(confOverlay); diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHs2Metrics.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHs2Metrics.java index 7337e9cbee..8157abb70b 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHs2Metrics.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHs2Metrics.java @@ -75,10 +75,8 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, public static void setup() throws Exception { miniHS2 = new MiniHS2(new HiveConf()); confOverlay = new HashMap(); - confOverlay.put(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); confOverlay.put(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, MetricCheckingHook.class.getName()); confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED.varname, "true"); - confOverlay.put(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); miniHS2.start(confOverlay); } diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestMiniHS2.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestMiniHS2.java index 18892e77d7..8bc72769f8 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestMiniHS2.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestMiniHS2.java @@ -67,7 +67,6 @@ public void testConfInSession() throws Exception { hiveConf.set(ZK_TIMEOUT_KEY, ZK_TIMEOUT); // check the config used very often! - hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); miniHS2 = new MiniHS2(hiveConf); miniHS2.start(new HashMap()); @@ -78,8 +77,6 @@ public void testConfInSession() throws Exception { checkConfVal(DUMMY_CONF_KEY, DUMMY_CONF_KEY + "=" + DUMMY_CONF_VAL, stmt); checkConfVal(ZK_TIMEOUT_KEY, ZK_TIMEOUT_KEY + "=" + ZK_TIMEOUT, stmt); - checkConfVal(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, - ConfVars.HIVE_SUPPORT_CONCURRENCY.varname + "=" + "false", stmt); stmt.close(); hs2Conn.close(); diff --git itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java index c58767fc75..a3bcde11ce 100644 --- itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java +++ itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java @@ -71,7 +71,6 @@ public static void startServices() throws Exception { hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS, 1); hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS, 1); hiveConf.setBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI, true); - hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); // Setting hive.server2.enable.doAs to True ensures that HS2 performs the query operation as // the connected user instead of the user running HS2. diff --git itests/hive-unit/src/test/java/org/apache/hive/service/TestHS2ImpersonationWithRemoteMS.java itests/hive-unit/src/test/java/org/apache/hive/service/TestHS2ImpersonationWithRemoteMS.java index fd89921d34..5660b9cf11 100644 --- itests/hive-unit/src/test/java/org/apache/hive/service/TestHS2ImpersonationWithRemoteMS.java +++ itests/hive-unit/src/test/java/org/apache/hive/service/TestHS2ImpersonationWithRemoteMS.java @@ -49,7 +49,6 @@ public static void startServices() throws Exception { hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS, 1); hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS, 1); hiveConf.setBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI, true); - hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); miniHS2 = new MiniHS2.Builder() .withMiniMR() diff --git itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithMr.java itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithMr.java index 830ffc2bbd..b51bca8a84 100644 --- itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithMr.java +++ itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithMr.java @@ -66,7 +66,6 @@ public static void setUpBeforeClass() throws Exception { hiveConf.set(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "verbose"); miniHS2 = new MiniHS2(hiveConf); confOverlay = new HashMap(); - confOverlay.put(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); miniHS2.start(confOverlay); } diff --git itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java index 388486d970..f4963452d5 100644 --- itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java +++ itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java @@ -62,7 +62,6 @@ public static void setUpBeforeClass() throws Exception { hiveConf.setBoolVar(ConfVars.TEZ_EXEC_SUMMARY, false); miniHS2 = new MiniHS2(hiveConf, MiniClusterType.TEZ); confOverlay = new HashMap(); - confOverlay.put(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); miniHS2.start(confOverlay); } } diff --git itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java index 8febe3e79f..4f685174af 100644 --- itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java +++ itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java @@ -72,7 +72,6 @@ public static void setUpBeforeClass() throws Exception { hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "execution"); miniHS2 = new MiniHS2(hiveConf); confOverlay = new HashMap(); - confOverlay.put(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); miniHS2.start(confOverlay); } diff --git itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIServiceFeatures.java itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIServiceFeatures.java index 1911d2ce17..6b5250a819 100644 --- itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIServiceFeatures.java +++ itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIServiceFeatures.java @@ -137,7 +137,6 @@ public static void setUpBeforeClass() throws Exception { hiveConf.setVar(ConfVars.HIVE_SERVER2_AUTHENTICATION, HiveAuthConstants.AuthTypes.NOSASL.toString()); hiveConf.setVar(ConfVars.HIVE_SERVER2_TRANSPORT_MODE, transportMode); hiveConf.setVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PATH, thriftHttpPath); - hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); hiveConf.setVar(ConfVars.HIVE_AUTHORIZATION_MANAGER, MockedHiveAuthorizerFactory.class.getName()); hiveConf.setVar(ConfVars.HIVE_AUTHENTICATOR_MANAGER, SessionStateUserAuthenticator.class.getName()); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreGetMetaConf.java metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreGetMetaConf.java index f71911eb46..00eb683638 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreGetMetaConf.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreGetMetaConf.java @@ -85,7 +85,6 @@ public static void startMetaStoreServer() throws Exception { + msPort); hiveConf.setVar(HiveConf.ConfVars.PREEXECHOOKS, ""); hiveConf.setVar(HiveConf.ConfVars.POSTEXECHOOKS, ""); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 10); System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java index 86462ff7b1..3f5a27c863 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java @@ -98,8 +98,6 @@ public static void startMetaStoreServer() throws Exception { hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, - "false"); hiveConf.set(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS.name(), MockPartitionExpressionForMetastore.class.getCanonicalName()); System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java index b163a1e265..4f6429b72b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java @@ -260,7 +260,7 @@ public void closeOp(boolean abort) throws HiveException { // There's always just one file that we have merged. // The union/DP/etc. should already be account for in the path. Utilities.writeMmCommitManifest(Lists.newArrayList(outPath), - tmpPath.getParent(), fs, taskId, conf.getTxnId(), conf.getStmtId(), null); + tmpPath.getParent(), fs, taskId, conf.getTxnId(), conf.getStmtId(), null, false); LOG.info("Merged into " + finalPath + "(" + fss.getLen() + " bytes)."); } } @@ -340,7 +340,7 @@ public void jobCloseOp(Configuration hconf, boolean success) // We don't expect missing buckets from mere (actually there should be no buckets), // so just pass null as bucketing context. Union suffix should also be accounted for. Utilities.handleMmTableFinalPath(outputDir.getParent(), null, hconf, success, - dpLevels, lbLevels, null, mmWriteId, stmtId, reporter, isMmTable, false); + dpLevels, lbLevels, null, mmWriteId, stmtId, reporter, isMmTable, false, false); // TODO: MMIOW check ppath } } catch (IOException e) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index b4989f1509..219d1adf25 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -337,7 +337,7 @@ public void initializeBucketPaths(int filesIdx, String taskId, boolean isNativeT } outPaths[filesIdx] = getTaskOutPath(taskId); } else { - String subdirPath = AcidUtils.deltaSubdir(txnId, txnId, stmtId); + String subdirPath = AcidUtils.baseOrDeltaSubdir(conf.getInsertOverwrite(), txnId, txnId, stmtId); if (unionPath != null) { // Create the union directory inside the MM directory. subdirPath += Path.SEPARATOR + unionPath; @@ -1324,7 +1324,7 @@ public void closeOp(boolean abort) throws HiveException { } if (conf.isMmTable()) { Utilities.writeMmCommitManifest( - commitPaths, specPath, fs, taskId, conf.getTransactionId(), conf.getStatementId(), unionPath); + commitPaths, specPath, fs, taskId, conf.getTransactionId(), conf.getStatementId(), unionPath, conf.getInsertOverwrite()); } // Only publish stats if this operator's flag was set to gather stats if (conf.isGatherStats()) { @@ -1383,7 +1383,7 @@ public void jobCloseOp(Configuration hconf, boolean success) conf.getTableInfo(), numBuckets, conf.getCompressed()); Utilities.handleMmTableFinalPath(specPath, unionSuffix, hconf, success, dpLevels, lbLevels, mbc, conf.getTransactionId(), conf.getStatementId(), reporter, - conf.isMmTable(), conf.isMmCtas()); + conf.isMmTable(), conf.isMmCtas(), conf.getInsertOverwrite()); } } } catch (IOException e) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java index 65b2f87357..8732348a1b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java @@ -234,7 +234,7 @@ private void mvFileToFinalPath(Path specPath, Configuration hconf, Utilities.FILE_OP_LOGGER.info("Moving tmp dir: " + tmpPath + " to: " + intermediatePath + "(spec " + specPath + ")"); Utilities.rename(fs, tmpPath, intermediatePath); // Step2: remove any tmp file or double-committed output files - Utilities.removeTempOrDuplicateFiles(fs, intermediatePath); + Utilities.removeTempOrDuplicateFiles(fs, intermediatePath, false); // Step3: move to the file destination Utilities.FILE_OP_LOGGER.info("Moving tmp dir: " + intermediatePath + " to: " + specPath); Utilities.renameOrMoveFiles(fs, intermediatePath, specPath); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index d7397e0921..b4cc70384a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -1501,7 +1501,7 @@ public static void mvFileToFinalPath(Path specPath, Configuration hconf, perfLogger.PerfLogBegin("FileSinkOperator", "RemoveTempOrDuplicateFiles"); // remove any tmp file or double-committed output files List emptyBuckets = Utilities.removeTempOrDuplicateFiles( - fs, statuses, dpCtx, conf, hconf, filesKept); + fs, statuses, dpCtx, conf, hconf, filesKept, false); perfLogger.PerfLogEnd("FileSinkOperator", "RemoveTempOrDuplicateFiles"); // create empty buckets if necessary if (!emptyBuckets.isEmpty()) { @@ -1592,23 +1592,23 @@ private static void addFilesToPathSet(Collection files, Set fi /** * Remove all temporary files and duplicate (double-committed) files from a given directory. */ - public static void removeTempOrDuplicateFiles(FileSystem fs, Path path) throws IOException { - removeTempOrDuplicateFiles(fs, path, null,null,null); + public static void removeTempOrDuplicateFiles(FileSystem fs, Path path, boolean isBaseDir) throws IOException { + removeTempOrDuplicateFiles(fs, path, null,null,null, isBaseDir); } public static List removeTempOrDuplicateFiles(FileSystem fs, Path path, - DynamicPartitionCtx dpCtx, FileSinkDesc conf, Configuration hconf) throws IOException { + DynamicPartitionCtx dpCtx, FileSinkDesc conf, Configuration hconf, boolean isBaseDir) throws IOException { if (path == null) { return null; } FileStatus[] stats = HiveStatsUtils.getFileStatusRecurse(path, ((dpCtx == null) ? 1 : dpCtx.getNumDPCols()), fs); - return removeTempOrDuplicateFiles(fs, stats, dpCtx, conf, hconf); + return removeTempOrDuplicateFiles(fs, stats, dpCtx, conf, hconf, isBaseDir); } public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] fileStats, - DynamicPartitionCtx dpCtx, FileSinkDesc conf, Configuration hconf) throws IOException { - return removeTempOrDuplicateFiles(fs, fileStats, dpCtx, conf, hconf, null); + DynamicPartitionCtx dpCtx, FileSinkDesc conf, Configuration hconf, boolean isBaseDir) throws IOException { + return removeTempOrDuplicateFiles(fs, fileStats, dpCtx, conf, hconf, null, isBaseDir); } /** @@ -1617,12 +1617,12 @@ public static void removeTempOrDuplicateFiles(FileSystem fs, Path path) throws I * @return a list of path names corresponding to should-be-created empty buckets. */ public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] fileStats, - DynamicPartitionCtx dpCtx, FileSinkDesc conf, Configuration hconf, Set filesKept) + DynamicPartitionCtx dpCtx, FileSinkDesc conf, Configuration hconf, Set filesKept, boolean isBaseDir) throws IOException { int dpLevels = dpCtx == null ? 0 : dpCtx.getNumDPCols(), numBuckets = (conf != null && conf.getTable() != null) ? conf.getTable().getNumBuckets() : 0; return removeTempOrDuplicateFiles( - fs, fileStats, null, dpLevels, numBuckets, hconf, null, 0, false, filesKept); + fs, fileStats, null, dpLevels, numBuckets, hconf, null, 0, false, filesKept, isBaseDir); } private static boolean removeEmptyDpDirectory(FileSystem fs, Path path) throws IOException { @@ -1641,7 +1641,7 @@ private static boolean removeEmptyDpDirectory(FileSystem fs, Path path) throws I public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] fileStats, String unionSuffix, int dpLevels, int numBuckets, Configuration hconf, Long txnId, - int stmtId, boolean isMmTable, Set filesKept) throws IOException { + int stmtId, boolean isMmTable, Set filesKept, boolean isBaseDir) throws IOException { if (fileStats == null) { return null; } @@ -1660,7 +1660,7 @@ private static boolean removeEmptyDpDirectory(FileSystem fs, Path path) throws I if (isMmTable) { Path mmDir = parts[i].getPath(); - if (!mmDir.getName().equals(AcidUtils.deltaSubdir(txnId, txnId, stmtId))) { + if (!mmDir.getName().equals(AcidUtils.baseOrDeltaSubdir(isBaseDir, txnId, txnId, stmtId))) { throw new IOException("Unexpected non-MM directory name " + mmDir); } @@ -1684,7 +1684,7 @@ private static boolean removeEmptyDpDirectory(FileSystem fs, Path path) throws I if (fileStats.length == 0) { return result; } - Path mmDir = extractNonDpMmDir(txnId, stmtId, items); + Path mmDir = extractNonDpMmDir(txnId, stmtId, items, isBaseDir); taskIDToFile = removeTempOrDuplicateFilesNonMm( fs.listStatus(new Path(mmDir, unionSuffix)), fs); if (filesKept != null && taskIDToFile != null) { @@ -1702,7 +1702,7 @@ private static boolean removeEmptyDpDirectory(FileSystem fs, Path path) throws I addFilesToPathSet(taskIDToFile.values(), filesKept); } } else { - Path mmDir = extractNonDpMmDir(txnId, stmtId, items); + Path mmDir = extractNonDpMmDir(txnId, stmtId, items, isBaseDir); taskIDToFile = removeTempOrDuplicateFilesNonMm(fs.listStatus(mmDir), fs); if (filesKept != null && taskIDToFile != null) { addFilesToPathSet(taskIDToFile.values(), filesKept); @@ -1714,12 +1714,12 @@ private static boolean removeEmptyDpDirectory(FileSystem fs, Path path) throws I return result; } - private static Path extractNonDpMmDir(Long txnId, int stmtId, FileStatus[] items) throws IOException { + private static Path extractNonDpMmDir(Long txnId, int stmtId, FileStatus[] items, boolean isBaseDir) throws IOException { if (items.length > 1) { throw new IOException("Unexpected directories for non-DP MM: " + Arrays.toString(items)); } Path mmDir = items[0].getPath(); - if (!mmDir.getName().equals(AcidUtils.deltaSubdir(txnId, txnId, stmtId))) { + if (!mmDir.getName().equals(AcidUtils.baseOrDeltaSubdir(isBaseDir, txnId, txnId, stmtId))) { throw new IOException("Unexpected non-MM directory " + mmDir); } Utilities.FILE_OP_LOGGER.trace("removeTempOrDuplicateFiles processing files in MM directory {}", mmDir); @@ -4056,7 +4056,7 @@ private static void tryDelete(FileSystem fs, Path path) { } public static Path[] getMmDirectoryCandidates(FileSystem fs, Path path, int dpLevels, - int lbLevels, PathFilter filter, long txnId, int stmtId, Configuration conf) + int lbLevels, PathFilter filter, long txnId, int stmtId, Configuration conf, boolean isBaseDir) throws IOException { int skipLevels = dpLevels + lbLevels; if (filter == null) { @@ -4071,7 +4071,7 @@ private static void tryDelete(FileSystem fs, Path path) { || (HiveConf.getBoolVar(conf, ConfVars.HIVE_MM_AVOID_GLOBSTATUS_ON_S3) && isS3(fs))) { return getMmDirectoryCandidatesRecursive(fs, path, skipLevels, filter); } - return getMmDirectoryCandidatesGlobStatus(fs, path, skipLevels, filter, txnId, stmtId); + return getMmDirectoryCandidatesGlobStatus(fs, path, skipLevels, filter, txnId, stmtId, isBaseDir); } private static boolean isS3(FileSystem fs) { @@ -4148,7 +4148,7 @@ private static boolean isS3(FileSystem fs) { } private static Path[] getMmDirectoryCandidatesGlobStatus(FileSystem fs, - Path path, int skipLevels, PathFilter filter, long txnId, int stmtId) throws IOException { + Path path, int skipLevels, PathFilter filter, long txnId, int stmtId, boolean isBaseDir) throws IOException { StringBuilder sb = new StringBuilder(path.toUri().getPath()); for (int i = 0; i < skipLevels; i++) { sb.append(Path.SEPARATOR).append('*'); @@ -4158,7 +4158,7 @@ private static boolean isS3(FileSystem fs) { // sb.append(Path.SEPARATOR).append(AcidUtils.deltaSubdir(txnId, txnId)).append("_*"); throw new AssertionError("GlobStatus should not be called without a statement ID"); } else { - sb.append(Path.SEPARATOR).append(AcidUtils.deltaSubdir(txnId, txnId, stmtId)); + sb.append(Path.SEPARATOR).append(AcidUtils.baseOrDeltaSubdir(isBaseDir, txnId, txnId, stmtId)); } Path pathPattern = new Path(path, sb.toString()); return statusToPath(fs.globStatus(pathPattern, filter)); @@ -4166,9 +4166,9 @@ private static boolean isS3(FileSystem fs) { private static void tryDeleteAllMmFiles(FileSystem fs, Path specPath, Path manifestDir, int dpLevels, int lbLevels, JavaUtils.IdPathFilter filter, - long txnId, int stmtId, Configuration conf) throws IOException { + long txnId, int stmtId, Configuration conf, boolean isBaseDir) throws IOException { Path[] files = getMmDirectoryCandidates( - fs, specPath, dpLevels, lbLevels, filter, txnId, stmtId, conf); + fs, specPath, dpLevels, lbLevels, filter, txnId, stmtId, conf, isBaseDir); if (files != null) { for (Path path : files) { Utilities.FILE_OP_LOGGER.info("Deleting {} on failure", path); @@ -4181,12 +4181,12 @@ private static void tryDeleteAllMmFiles(FileSystem fs, Path specPath, Path manif public static void writeMmCommitManifest(List commitPaths, Path specPath, FileSystem fs, - String taskId, Long txnId, int stmtId, String unionSuffix) throws HiveException { + String taskId, Long txnId, int stmtId, String unionSuffix, boolean isInsertOverwrite) throws HiveException { if (commitPaths.isEmpty()) { return; } // We assume one FSOP per task (per specPath), so we create it in specPath. - Path manifestPath = getManifestDir(specPath, txnId, stmtId, unionSuffix); + Path manifestPath = getManifestDir(specPath, txnId, stmtId, unionSuffix, isInsertOverwrite); manifestPath = new Path(manifestPath, taskId + MANIFEST_EXTENSION); Utilities.FILE_OP_LOGGER.info("Writing manifest to {} with {}", manifestPath, commitPaths); try { @@ -4205,8 +4205,10 @@ public static void writeMmCommitManifest(List commitPaths, Path specPath, } } - private static Path getManifestDir(Path specPath, long txnId, int stmtId, String unionSuffix) { - Path manifestPath = new Path(specPath, "_tmp." + AcidUtils.deltaSubdir(txnId, txnId, stmtId)); + private static Path getManifestDir(Path specPath, long txnId, int stmtId, String unionSuffix, boolean isInsertOverwrite) { + Path manifestPath = new Path(specPath, "_tmp." + + AcidUtils.baseOrDeltaSubdir(isInsertOverwrite, txnId, txnId, stmtId)); + return (unionSuffix == null) ? manifestPath : new Path(manifestPath, unionSuffix); } @@ -4223,13 +4225,13 @@ public MissingBucketsContext(TableDesc tableInfo, int numBuckets, boolean isComp public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Configuration hconf, boolean success, int dpLevels, int lbLevels, MissingBucketsContext mbc, long txnId, int stmtId, - Reporter reporter, boolean isMmTable, boolean isMmCtas) throws IOException, HiveException { + Reporter reporter, boolean isMmTable, boolean isMmCtas, boolean isInsertOverwrite) throws IOException, HiveException { FileSystem fs = specPath.getFileSystem(hconf); - Path manifestDir = getManifestDir(specPath, txnId, stmtId, unionSuffix); + Path manifestDir = getManifestDir(specPath, txnId, stmtId, unionSuffix, isInsertOverwrite); if (!success) { JavaUtils.IdPathFilter filter = new JavaUtils.IdPathFilter(txnId, stmtId, true); tryDeleteAllMmFiles(fs, specPath, manifestDir, dpLevels, lbLevels, - filter, txnId, stmtId, hconf); + filter, txnId, stmtId, hconf, isInsertOverwrite); return; } @@ -4252,13 +4254,13 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con } Utilities.FILE_OP_LOGGER.debug("Looking for files in: {}", specPath); - JavaUtils.IdPathFilter filter = new JavaUtils.IdPathFilter(txnId, stmtId, true); + JavaUtils.IdPathFilter filter = new JavaUtils.IdPathFilter(txnId, stmtId, true, false, isInsertOverwrite); if (isMmCtas && !fs.exists(specPath)) { Utilities.FILE_OP_LOGGER.info("Creating table directory for CTAS with no output at {}", specPath); FileUtils.mkdir(fs, specPath, hconf); } Path[] files = getMmDirectoryCandidates( - fs, specPath, dpLevels, lbLevels, filter, txnId, stmtId, hconf); + fs, specPath, dpLevels, lbLevels, filter, txnId, stmtId, hconf, isInsertOverwrite); ArrayList mmDirectories = new ArrayList<>(); if (files != null) { for (Path path : files) { @@ -4319,7 +4321,7 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con } List emptyBuckets = Utilities.removeTempOrDuplicateFiles(fs, finalResults, unionSuffix, dpLevels, mbc == null ? 0 : mbc.numBuckets, hconf, txnId, stmtId, - isMmTable, null); + isMmTable, null, isInsertOverwrite); // create empty buckets if necessary if (!emptyBuckets.isEmpty()) { assert mbc != null; diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 4c0b71f65f..6d0008200d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -204,6 +204,19 @@ public static String deleteDeltaSubdir(long min, long max, int statementId) { public static String baseDir(long txnId) { return BASE_PREFIX + String.format(DELTA_DIGITS, txnId); } + + /** + * Return a base or delta directory string + * according to the given "baseDirRequired". + */ + public static String baseOrDeltaSubdir(boolean baseDirRequired, long min, long max, int statementId) { + if (!baseDirRequired) { + return deltaSubdir(min, max, statementId); + } else { + return baseDir(min); + } + } + /** * Create a filename for a bucket file. * @param directory the partition directory @@ -1211,7 +1224,19 @@ public static boolean isAcidTable(CreateTableDesc table) { public static boolean isFullAcidTable(Table table) { return isAcidTable(table) && !AcidUtils.isInsertOnlyTable(table.getParameters()); } - + + public static boolean isFullAcidTable(CreateTableDesc td) { + if (td == null || td.getTblProps() == null) { + return false; + } + String tableIsTransactional = td.getTblProps().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL); + if (tableIsTransactional == null) { + tableIsTransactional = td.getTblProps().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL.toUpperCase()); + } + return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true") && + !AcidUtils.isInsertOnlyTable(td.getTblProps()); + } + /** * Sets the acidOperationalProperties in the configuration object argument. * @param conf Mutable configuration object diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index 6a1dc729f3..31e605808f 100755 --- ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -549,7 +549,14 @@ private static void processForWriteIds(Path dir, JobConf conf, } else if (!hadAcidState) { AcidUtils.Directory dirInfo = AcidUtils.getAcidState(currDir, conf, validTxnList, Ref.from(false), true, null); hadAcidState = true; - // TODO [MM gap]: for IOW, we also need to count in base dir, if any + + // Find the base, created for IOW. + Path base = dirInfo.getBaseDirectory(); + if (base != null) { + finalPaths.add(base); + } + + // Find the parsed delta files. for (AcidUtils.ParsedDelta delta : dirInfo.getCurrentDirectories()) { Utilities.FILE_OP_LOGGER.debug("Adding input " + delta.getPath()); finalPaths.add(delta.getPath()); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 3e9fff195f..fdd4555123 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1774,12 +1774,6 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par Utilities.FILE_OP_LOGGER.trace("maybe deleting stuff from " + oldPartPath + " (new " + newPartPath + ") for replace"); } - if ((loadFileType == LoadFileType.REPLACE_ALL) && oldPartPath != null) { - boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge")); - deleteOldPathForReplace(newPartPath, oldPartPath, getConf(), isAutoPurge, - new JavaUtils.IdPathFilter(txnId, stmtId, false, true), true, - tbl.isStoredAsSubDirectories() ? tbl.getSkewedColNames().size() : 0); - } } else { // Either a non-MM query, or a load into MM table from an external source. PathFilter filter = FileUtils.HIDDEN_FILES_PATH_FILTER; @@ -1796,7 +1790,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par if ((loadFileType == LoadFileType.REPLACE_ALL) || (oldPart == null && !isAcid)) { boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge")); replaceFiles(tbl.getPath(), loadPath, destPath, oldPartPath, getConf(), - isSrcLocal, isAutoPurge, newFiles, filter, isMmTableWrite); + isSrcLocal, isAutoPurge, newFiles, filter, isMmTableWrite?true:false); } else { FileSystem fs = tbl.getDataLocation().getFileSystem(conf); copyFiles(conf, loadPath, destPath, fs, isSrcLocal, isAcid, @@ -2083,7 +2077,7 @@ private void constructOneLBLocationMap(FileStatus fSta, // where this is used; we always want to load everything; also the only case where // we have multiple statements anyway is union. Path[] leafStatus = Utilities.getMmDirectoryCandidates( - fs, loadPath, numDP, numLB, null, txnId, -1, conf); + fs, loadPath, numDP, numLB, null, txnId, -1, conf, false); for (Path p : leafStatus) { Path dpPath = p.getParent(); // Skip the MM directory that we have found. for (int i = 0; i < numLB; ++i) { @@ -2286,13 +2280,6 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType // Note: this assumes both paths are qualified; which they are, currently. if (isMmTable && loadPath.equals(tbl.getPath())) { Utilities.FILE_OP_LOGGER.debug("not moving " + loadPath + " to " + tbl.getPath()); - if (loadFileType == LoadFileType.REPLACE_ALL) { - Path tableDest = tbl.getPath(); - boolean isAutopurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge")); - deleteOldPathForReplace(tableDest, tableDest, sessionConf, isAutopurge, - new JavaUtils.IdPathFilter(txnId, stmtId, false, true), true, - tbl.isStoredAsSubDirectories() ? tbl.getSkewedColNames().size() : 0); - } newFiles = listFilesCreatedByQuery(loadPath, txnId, stmtId); } else { // Either a non-MM query, or a load into MM table from an external source. @@ -2309,7 +2296,7 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType if (loadFileType == LoadFileType.REPLACE_ALL) { boolean isAutopurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge")); replaceFiles(tblPath, loadPath, destPath, tblPath, - sessionConf, isSrcLocal, isAutopurge, newFiles, filter, isMmTable); + sessionConf, isSrcLocal, isAutopurge, newFiles, filter, isMmTable?true:false); } else { try { FileSystem fs = tbl.getDataLocation().getFileSystem(sessionConf); @@ -3835,7 +3822,7 @@ private static void moveAcidFiles(String deltaFileType, PathFilter pathFilter, F */ protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, HiveConf conf, boolean isSrcLocal, boolean purge, List newFiles, PathFilter deletePathFilter, - boolean isMmTable) throws HiveException { + boolean isMmTableOverwrite) throws HiveException { try { FileSystem destFs = destf.getFileSystem(conf); @@ -3856,7 +3843,7 @@ protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, if (oldPath != null) { // Note: we assume lbLevels is 0 here. Same as old code for non-MM. // For MM tables, this can only be a LOAD command. Does LOAD even support LB? - deleteOldPathForReplace(destf, oldPath, conf, purge, deletePathFilter, isMmTable, 0); + deleteOldPathForReplace(destf, oldPath, conf, purge, deletePathFilter, isMmTableOverwrite, 0); } // first call FileUtils.mkdir to make sure that destf directory exists, if not, it creates @@ -3902,7 +3889,7 @@ protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, } private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf, boolean purge, - PathFilter pathFilter, boolean isMmTable, int lbLevels) throws HiveException { + PathFilter pathFilter, boolean isMmTableOverwrite, int lbLevels) throws HiveException { Utilities.FILE_OP_LOGGER.debug("Deleting old paths for replace in " + destPath + " and old path " + oldPath); boolean isOldPathUnderDestf = false; @@ -3914,32 +3901,13 @@ private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf, // But not sure why we changed not to delete the oldPath in HIVE-8750 if it is // not the destf or its subdir? isOldPathUnderDestf = isSubDir(oldPath, destPath, oldFs, destFs, false); - if (isOldPathUnderDestf || isMmTable) { - if (lbLevels == 0 || !isMmTable) { + if (isOldPathUnderDestf || isMmTableOverwrite) { + if (lbLevels == 0 || !isMmTableOverwrite) { cleanUpOneDirectoryForReplace(oldPath, oldFs, pathFilter, conf, purge); - } else { - // We need to clean up different MM IDs from each LB directory separately. - // Avoid temporary directories in the immediate table/part dir. - // Note: we could just find directories with any MM directories inside? - // the rest doesn't have to be cleaned up. Play it safe. - String mask = "[^._]*"; - for (int i = 0; i < lbLevels - 1; ++i) { - mask += Path.SEPARATOR + "*"; - } - Path glob = new Path(oldPath, mask); - FileStatus[] lbDirs = oldFs.globStatus(glob); - for (FileStatus lbDir : lbDirs) { - Path lbPath = lbDir.getPath(); - if (!lbDir.isDirectory()) { - throw new HiveException("Unexpected path during overwrite: " + lbPath); - } - Utilities.FILE_OP_LOGGER.info("Cleaning up LB directory " + lbPath); - cleanUpOneDirectoryForReplace(lbPath, oldFs, pathFilter, conf, purge); - } } } } catch (IOException e) { - if (isOldPathUnderDestf || isMmTable) { + if (isOldPathUnderDestf || isMmTableOverwrite) { // if oldPath is a subdir of destf but it could not be cleaned throw new HiveException("Directory " + oldPath.toString() + " could not be cleaned up.", e); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java index ab71073560..f0083f2066 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java @@ -64,6 +64,7 @@ private final Set destCubes; private final Set destGroupingSets; private final Map destToHaving; + private final Map destToOpType; // insertIntoTables/insertOverwriteTables map a table's fullName to its ast; private final Map insertIntoTables; private final Map insertOverwriteTables; @@ -135,6 +136,7 @@ public QBParseInfo(String alias, boolean isSubQ) { destToSortby = new HashMap(); destToOrderby = new HashMap(); destToLimit = new HashMap>(); + destToOpType = new HashMap<>(); insertIntoTables = new HashMap(); insertOverwriteTables = new HashMap(); destRollups = new HashSet(); @@ -155,7 +157,7 @@ public QBParseInfo(String alias, boolean isSubQ) { } - /* +/* * If a QB is such that the aggregation expressions need to be handled by * the Windowing PTF; we invoke this function to clear the AggExprs on the dest. */ @@ -180,6 +182,18 @@ public void addAggregationExprsForClause(String clause, public void addInsertIntoTable(String fullName, ASTNode ast) { insertIntoTables.put(fullName.toLowerCase(), ast); } + + public void setDestToOpType(String clause, boolean value) { + destToOpType.put(clause, value); + } + + public boolean isDestToOpTypeInsertOverwrite(String clause) { + if (destToOpType.containsKey(clause)) { + return destToOpType.get(clause); + } else { + return false; + } + } /** * See also {@link #getInsertOverwriteTables()} diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index b323edeb74..5dd3583215 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -1506,6 +1506,7 @@ public boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1, PlannerContext plan String fullTableName = getUnescapedName((ASTNode) ast.getChild(0).getChild(0), SessionState.get().getCurrentDatabase()); qbp.getInsertOverwriteTables().put(fullTableName.toLowerCase(), ast); + qbp.setDestToOpType(ctx_1.dest, true); } } } @@ -6781,7 +6782,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) Integer dest_type = qbm.getDestTypeForAlias(dest); Table dest_tab = null; // destination table if any - boolean destTableIsAcid = false; // should the destination table be written to using ACID + boolean destTableIsAcid = false; // true for full ACID table and MM table + boolean destTableIsFullAcid = false; // should the destination table be written to using ACID boolean destTableIsTemporary = false; boolean destTableIsMaterialization = false; Partition dest_part = null;// destination partition if any @@ -6802,7 +6804,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) case QBMetaData.DEST_TABLE: { dest_tab = qbm.getDestTableForAlias(dest); - destTableIsAcid = AcidUtils.isFullAcidTable(dest_tab); + destTableIsAcid = AcidUtils.isAcidTable(dest_tab); + destTableIsFullAcid = AcidUtils.isFullAcidTable(dest_tab); destTableIsTemporary = dest_tab.isTemporary(); // Is the user trying to insert into a external tables @@ -6852,7 +6855,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) // NOTE: specify Dynamic partitions in dest_tab for WriteEntity if (!isNonNativeTable) { AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID; - if (destTableIsAcid) { + if (destTableIsFullAcid) { acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest); checkAcidConstraints(qb, table_desc, dest_tab); } @@ -6895,7 +6898,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) dest_part = qbm.getDestPartitionForAlias(dest); dest_tab = dest_part.getTable(); - destTableIsAcid = AcidUtils.isFullAcidTable(dest_tab); + destTableIsAcid = AcidUtils.isAcidTable(dest_tab); + destTableIsFullAcid = AcidUtils.isFullAcidTable(dest_tab); checkExternalTable(dest_tab); @@ -6928,7 +6932,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) dest_part.getSkewedColValues(), dest_part.getSkewedColValueLocationMaps(), dest_part.isStoredAsSubDirectories(), conf); AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID; - if (destTableIsAcid) { + if (destTableIsFullAcid) { acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest); checkAcidConstraints(qb, table_desc, dest_tab); } @@ -6945,7 +6949,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old // deltas and base and leave them up to the cleaner to clean up LoadFileType loadType = (!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(), - dest_tab.getTableName()) && !destTableIsAcid) + dest_tab.getTableName()) && !destTableIsAcid) // // Both Full-acid and MM tables are excluded. ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING; ltd.setLoadFileType(loadType); ltd.setLbCtx(lbCtx); @@ -7019,6 +7023,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) } destTableIsAcid = tblDesc != null && AcidUtils.isAcidTable(tblDesc); + destTableIsFullAcid = tblDesc != null && AcidUtils.isFullAcidTable(tblDesc); boolean isDestTempFile = true; if (!ctx.isMRTmpFileURI(dest_path.toUri().toString())) { @@ -7109,7 +7114,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) (dest_tab.getSortCols() != null && dest_tab.getSortCols().size() > 0))); // If this table is working with ACID semantics, turn off merging - canBeMerged &= !destTableIsAcid; + canBeMerged &= !destTableIsFullAcid; // Generate the partition columns from the parent input if (dest_type.intValue() == QBMetaData.DEST_TABLE @@ -7120,7 +7125,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) FileSinkDesc fileSinkDesc = createFileSinkDesc(dest, table_desc, dest_part, dest_path, currentTableId, destTableIsAcid, destTableIsTemporary, destTableIsMaterialization, queryTmpdir, rsCtx, dpCtx, lbCtx, fsRS, - canBeMerged, dest_tab, txnId, isMmCtas, dest_type); + canBeMerged, dest_tab, txnId, isMmCtas, dest_type, qb); if (isMmCtas) { // Add FSD so that the LoadTask compilation could fix up its path to avoid the move. tableDesc.setWriter(fileSinkDesc); @@ -7231,7 +7236,7 @@ private FileSinkDesc createFileSinkDesc(String dest, TableDesc table_desc, boolean destTableIsMaterialization, Path queryTmpdir, SortBucketRSCtx rsCtx, DynamicPartitionCtx dpCtx, ListBucketingCtx lbCtx, RowSchema fsRS, boolean canBeMerged, Table dest_tab, Long mmWriteId, boolean isMmCtas, - Integer dest_type) throws SemanticException { + Integer dest_type, QB qb) throws SemanticException { boolean isInsertOverwrite = false; switch (dest_type) { case QBMetaData.DEST_PARTITION: @@ -7240,7 +7245,8 @@ private FileSinkDesc createFileSinkDesc(String dest, TableDesc table_desc, //INSERT [OVERWRITE] path String destTableFullName = dest_tab.getCompleteName().replace('@', '.'); Map iowMap = qb.getParseInfo().getInsertOverwriteTables(); - if (iowMap.containsKey(destTableFullName)) { + if (iowMap.containsKey(destTableFullName) && + qb.getParseInfo().isDestToOpTypeInsertOverwrite(dest)) { isInsertOverwrite = true; } break; diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java new file mode 100644 index 0000000000..15a6a8a50c --- /dev/null +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java @@ -0,0 +1,613 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql; + +import java.io.File; +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; +import org.apache.hadoop.hive.metastore.api.CompactionRequest; +import org.apache.hadoop.hive.metastore.api.CompactionType; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.OpenTxnRequest; +import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; +import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; +import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; +import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; +import org.apache.hadoop.hive.metastore.txn.AcidCompactionHistoryService; +import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; +import org.apache.hadoop.hive.metastore.txn.TxnStore; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; +import org.apache.hadoop.hive.ql.io.AcidOutputFormat; +import org.apache.hadoop.hive.ql.io.BucketCodec; +import org.apache.hadoop.hive.ql.io.HiveInputFormat; +import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.metastore.txn.AcidOpenTxnsCounterService; +import org.apache.hadoop.hive.ql.txn.compactor.Cleaner; +import org.apache.hadoop.hive.ql.txn.compactor.Initiator; +import org.apache.hadoop.hive.ql.txn.compactor.Worker; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.rules.TestName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Tests here are for micro-managed tables: + * specifically INSERT OVERWRITE statements and Major/Minor Compactions. + */ +public class TestTxnCommandsForMmTable extends TxnCommandsBaseForTests { + static final private Logger LOG = LoggerFactory.getLogger(TestTxnCommandsForMmTable.class); + protected static final String TEST_DATA_DIR = new File(System.getProperty("java.io.tmpdir") + + File.separator + TestTxnCommands.class.getCanonicalName() + + "-" + System.currentTimeMillis() + ).getPath().replaceAll("\\\\", "/"); + protected static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse"; + @Override + String getTestDataDir() { + return TEST_DATA_DIR; + } + + enum TableExtended { + NONACIDPART("nonAcidPart", "p"), + MMTBL("mmTbl"), + MMTBL2("mmTbl2"), + MMTBLPART("mmTblPart","p"); + + final String name; + final String partitionColumns; + @Override + public String toString() { + return name; + } + String getPartitionColumns() { + return partitionColumns; + } + TableExtended(String name) { + this(name, null); + } + TableExtended(String name, String partitionColumns) { + this.name = name; + this.partitionColumns = partitionColumns; + } + } + + @Override + @Before + public void setUp() throws Exception { + super.setUpInternal(); + setUpInternalExtended(false); + } + + void setUpInternalExtended(boolean isOrcFormat) throws Exception { + hiveConf.setBoolVar(HiveConf.ConfVars.DYNAMICPARTITIONING, true); + hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + hiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); + hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + hiveConf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); + + runStatementOnDriver("create table " + TableExtended.NONACIDPART + "(a int, b int) partitioned by (p string) stored as orc TBLPROPERTIES ('transactional'='false')"); + if (!isOrcFormat) { + runStatementOnDriver("create table " + TableExtended.MMTBL + "(a int, b int) TBLPROPERTIES ('transactional'='true', 'transactional_properties'='insert_only')"); + runStatementOnDriver("create table " + TableExtended.MMTBL2 + "(a int, b int) TBLPROPERTIES ('transactional'='true', 'transactional_properties'='insert_only')"); + runStatementOnDriver("create table " + TableExtended.MMTBLPART + "(a int, b int) partitioned by (p string) TBLPROPERTIES ('transactional'='true', 'transactional_properties'='insert_only')"); + } else { + runStatementOnDriver("create table " + TableExtended.MMTBL + "(a int, b int) stored as orc TBLPROPERTIES ('transactional'='true', 'transactional_properties'='insert_only')"); + runStatementOnDriver("create table " + TableExtended.MMTBL2 + "(a int, b int) stored as orc TBLPROPERTIES ('transactional'='true', 'transactional_properties'='insert_only')"); + runStatementOnDriver("create table " + TableExtended.MMTBLPART + "(a int, b int) partitioned by (p string) stored as orc TBLPROPERTIES ('transactional'='true', 'transactional_properties'='insert_only')"); + } + } + protected void dropTables() throws Exception { + super.dropTables(); + for(TestTxnCommandsForMmTable.TableExtended t : TestTxnCommandsForMmTable.TableExtended.values()) { + runStatementOnDriver("drop table if exists " + t); + } + } + /** + * Test compaction for Micro-managed table + * 1. Regular compaction shouldn't impact any valid subdirectories of MM tables + * 2. Compactions will only remove subdirectories for aborted transactions of MM tables, if any + * @throws Exception + */ + @Test + public void testMmTableCompaction() throws Exception { + // 1. Insert some rows into MM table + runStatementOnDriver("insert into " + TableExtended.MMTBL + "(a,b) values(1,2)"); + runStatementOnDriver("insert into " + TableExtended.MMTBL + "(a,b) values(3,4)"); + // There should be 2 delta directories + verifyDirAndResult(2); + + // 2. Perform a MINOR compaction. Since nothing was aborted, subdirs should stay. + runStatementOnDriver("alter table "+ TableExtended.MMTBL + " compact 'MINOR'"); + runWorker(hiveConf); + verifyDirAndResult(2); + + // 3. Let a transaction be aborted + hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + runStatementOnDriver("insert into " + TableExtended.MMTBL + "(a,b) values(5,6)"); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + // There should be 3 delta directories. The new one is the aborted one. + verifyDirAndResult(3); + + // 4. Perform a MINOR compaction again. This time it will remove the subdir for aborted transaction. + runStatementOnDriver("alter table "+ TableExtended.MMTBL + " compact 'MINOR'"); + runWorker(hiveConf); + // The worker should remove the subdir for aborted transaction + verifyDirAndResult(2); + + // 5. Run Cleaner. Shouldn't impact anything. + runCleaner(hiveConf); + verifyDirAndResult(2); + } + + /** + * Test a scenario, on a micro-managed table, where an IOW comes in + * after a MAJOR compaction, and then a MINOR compaction is initiated. + * + * @throws Exception + */ + @Test + public void testInsertOverwriteForMmTable() throws Exception { + FileSystem fs = FileSystem.get(hiveConf); + FileStatus[] status; + + // 1. Insert two rows to an MM table + runStatementOnDriver("insert into " + TableExtended.MMTBL + "(a,b) values(1,2)"); + runStatementOnDriver("insert into " + TableExtended.MMTBL + "(a,b) values(3,4)"); + status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + + (TableExtended.MMTBL).toString().toLowerCase()), FileUtils.STAGING_DIR_PATH_FILTER); + // There should be 2 delta dirs in the location + Assert.assertEquals(2, status.length); + for (int i = 0; i < status.length; i++) { + Assert.assertTrue(status[i].getPath().getName().matches("delta_.*")); + } + + // 2. Perform a major compaction. + runStatementOnDriver("alter table "+ TableExtended.MMTBL + " compact 'MAJOR'"); + runWorker(hiveConf); + status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + + (TableExtended.MMTBL).toString().toLowerCase()), FileUtils.STAGING_DIR_PATH_FILTER); + // There should be 2 delta dirs. + Assert.assertEquals(2, status.length); + boolean sawBase = false; + int deltaCount = 0; + for (int i = 0; i < status.length; i++) { + String dirName = status[i].getPath().getName(); + if (dirName.matches("delta_.*")) { + deltaCount++; + } else { + sawBase = true; + Assert.assertTrue(dirName.matches("base_.*")); + } + } + Assert.assertEquals(2, deltaCount); + Assert.assertFalse(sawBase); + // Verify query result + int [][] resultData = new int[][] {{1,2},{3,4}}; + List rs = runStatementOnDriver("select a,b from " + TableExtended.MMTBL + " order by a,b"); + Assert.assertEquals(stringifyValues(resultData), rs); + + // 3. INSERT OVERWRITE + // Prepare data for the source table + runStatementOnDriver("insert into " + Table.NONACIDORCTBL + "(a,b) values(5,6),(7,8)"); + // Insert overwrite MM table from source table + runStatementOnDriver("insert overwrite table " + TableExtended.MMTBL + " select a,b from " + Table.NONACIDORCTBL); + status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + + (TableExtended.MMTBL).toString().toLowerCase()), FileUtils.STAGING_DIR_PATH_FILTER); + // There should be 2 delta dirs, plus 1 base dir in the location + Assert.assertEquals(3, status.length); + int baseCount = 0; + deltaCount = 0; + for (int i = 0; i < status.length; i++) { + String dirName = status[i].getPath().getName(); + if (dirName.matches("delta_.*")) { + deltaCount++; + } else { + baseCount++; + } + } + Assert.assertEquals(2, deltaCount); + Assert.assertEquals(1, baseCount); + + // Verify query result + resultData = new int[][] {{5,6},{7,8}}; + rs = runStatementOnDriver("select a,b from " + TableExtended.MMTBL + " order by a,b"); + Assert.assertEquals(stringifyValues(resultData), rs); + + // 4. Perform a minor compaction. Nothing should change. + // Both deltas and the base dir should have the same name. + // Re-verify directory layout and query result by using the same logic as above + runStatementOnDriver("alter table "+ TableExtended.MMTBL + " compact 'MINOR'"); + runWorker(hiveConf); + status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + + (TableExtended.MMTBL).toString().toLowerCase()), FileUtils.STAGING_DIR_PATH_FILTER); + // There should be 2 delta dirs, plus 1 base dir in the location + Assert.assertEquals(3, status.length); + baseCount = 0; + deltaCount = 0; + for (int i = 0; i < status.length; i++) { + String dirName = status[i].getPath().getName(); + if (dirName.matches("delta_.*")) { + deltaCount++; + } else { + Assert.assertTrue(dirName.matches("base_.*")); + baseCount++; + } + } + Assert.assertEquals(2, deltaCount); + Assert.assertEquals(1, baseCount); + + // Verify query result + rs = runStatementOnDriver("select a,b from " + TableExtended.MMTBL + " order by a,b"); + Assert.assertEquals(stringifyValues(resultData), rs); + + // 5. Run Cleaner. It should remove the 2 delta dirs and 1 old base dir. + runCleaner(hiveConf); + // There should be only 1 directory left: base_xxxxxxx. + // The delta dirs should have been cleaned up. + status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + + (TableExtended.MMTBL).toString().toLowerCase()), FileUtils.STAGING_DIR_PATH_FILTER); + Assert.assertEquals(1, status.length); + Assert.assertTrue(status[0].getPath().getName().matches("base_.*")); + // Verify query result + rs = runStatementOnDriver("select a,b from " + TableExtended.MMTBL + " order by a,b"); + Assert.assertEquals(stringifyValues(resultData), rs); + } + + /** + * Test a scenario, on a partitioned micro-managed table, that an IOW comes in + * before a MAJOR compaction happens. + * + * @throws Exception + */ + @Test + public void testInsertOverwriteForPartitionedMmTable() throws Exception { + FileSystem fs = FileSystem.get(hiveConf); + FileStatus[] status; + + // 1. Insert two rows to a partitioned MM table. + int[][] valuesOdd = {{5,6},{7,8}}; + int[][] valuesEven = {{2,1},{4,3}}; + runStatementOnDriver("insert into " + TableExtended.MMTBLPART + " PARTITION(p='odd') " + makeValuesClause(valuesOdd)); + runStatementOnDriver("insert into " + TableExtended.MMTBLPART + " PARTITION(p='even') " + makeValuesClause(valuesEven)); + + // Verify dirs + String[] pStrings = {"/p=odd", "/p=even"}; + + for(int i=0; i < pStrings.length; i++) { + status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + + (TableExtended.MMTBLPART).toString().toLowerCase() + pStrings[i]), FileUtils.STAGING_DIR_PATH_FILTER); + // There should be 1 delta dir per partition location + Assert.assertEquals(1, status.length); + Assert.assertTrue(status[0].getPath().getName().matches("delta_.*")); + } + + // 2. INSERT OVERWRITE + // Prepare data for the source table + int[][] newValsOdd = {{5,5},{11,11}}; + int[][] newValsEven = {{2,2}}; + + runStatementOnDriver("insert into " + TableExtended.NONACIDPART + " PARTITION(p='odd') " + makeValuesClause(newValsOdd)); + runStatementOnDriver("insert into " + TableExtended.NONACIDPART + " PARTITION(p='even') " + makeValuesClause(newValsEven)); + + // Insert overwrite MM table from source table + List rs = null; + String s = "insert overwrite table " + TableExtended.MMTBLPART + " PARTITION(p='odd') " + + " select a,b from " + TableExtended.NONACIDPART + " where " + TableExtended.NONACIDPART + ".p='odd'"; + rs = runStatementOnDriver("explain formatted " + s); + LOG.info("Explain formatted: " + rs.toString()); + runStatementOnDriver(s); + + s = "insert overwrite table " + TableExtended.MMTBLPART + " PARTITION(p='even') " + + " select a,b from " + TableExtended.NONACIDPART + " where " + TableExtended.NONACIDPART + ".p='even'"; + runStatementOnDriver(s); + + // Verify resulting dirs. + boolean sawBase = false; + String[] baseDirs = {"", ""}; + int deltaCount = 0; + for(int h=0; h < pStrings.length; h++) { + status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + + (TableExtended.MMTBLPART).toString().toLowerCase() + pStrings[h]), FileUtils.STAGING_DIR_PATH_FILTER); + // There should be 1 delta dir, plus a base dir in the location + Assert.assertEquals(2, status.length); + for (int i = 0; i < status.length; i++) { + String dirName = status[i].getPath().getName(); + if (dirName.matches("delta_.*")) { + deltaCount++; + } else { + sawBase = true; + baseDirs[h] = dirName; + Assert.assertTrue(baseDirs[i].matches("base_.*")); + } + } + Assert.assertEquals(1, deltaCount); + Assert.assertTrue(sawBase); + deltaCount = 0; + sawBase = false; + } + + // Verify query result + rs = runStatementOnDriver("select a,b from " + TableExtended.MMTBLPART + " where p='even' order by a,b"); + int [][] rExpectedEven = new int[][] {{2,2}}; + Assert.assertEquals(stringifyValues(rExpectedEven), rs); + + rs = runStatementOnDriver("select a,b from " + TableExtended.MMTBLPART + " where p='odd' order by a,b"); + int [][] rExpectedOdd = new int[][] {{5,5},{11,11}}; + Assert.assertEquals(stringifyValues(rExpectedOdd), rs); + + // 3. Perform a major compaction. Nothing should change. + // Both deltas and base dirs should have the same name. + // Re-verify directory layout and query result by using the same logic as above + runStatementOnDriver("alter table "+ TableExtended.MMTBLPART + " PARTITION(p='odd') " + " compact 'MAJOR'" ); + runWorker(hiveConf); + runStatementOnDriver("alter table "+ TableExtended.MMTBLPART + " PARTITION(p='even') " + " compact 'MAJOR'" ); + runWorker(hiveConf); + + for(int h=0; h < pStrings.length; h++) { + status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + + (TableExtended.MMTBLPART).toString().toLowerCase() + pStrings[h]), FileUtils.STAGING_DIR_PATH_FILTER); + // There should be 2 delta dirs, plus a base dir in the location + Assert.assertEquals(2, status.length); + sawBase = false; + deltaCount = 0; + for (int i = 0; i < status.length; i++) { + String dirName = status[i].getPath().getName(); + if (dirName.matches("delta_.*")) { + deltaCount++; + } else { + sawBase = true; + Assert.assertTrue("BASE ERROR: " + dirName, dirName.matches("base_.*")); + Assert.assertEquals(baseDirs[h], dirName); + } + } + Assert.assertEquals(1, deltaCount); + Assert.assertTrue(sawBase); + deltaCount = 0; + sawBase = false; + } + + // Verify query result + rs = runStatementOnDriver("select a,b from " + TableExtended.MMTBLPART + " order by a,b"); + int[][] rExpected = new int[][] {{2,2},{5,5},{11,11}}; + Assert.assertEquals(stringifyValues(rExpected), rs); + + // 4. Run Cleaner. It should remove the 2 delta dirs. + runCleaner(hiveConf); + + // There should be only 1 directory left: base_xxxxxxx. + // The delta dirs should have been cleaned up. + for(int h=0; h < pStrings.length; h++) { + status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + + (TableExtended.MMTBLPART).toString().toLowerCase() + pStrings[h]), FileUtils.STAGING_DIR_PATH_FILTER); + Assert.assertEquals(1, status.length); + Assert.assertTrue(status[0].getPath().getName().matches("base_.*")); + Assert.assertEquals(baseDirs[h], status[0].getPath().getName()); + } + // Verify query result + rs = runStatementOnDriver("select a,b from " + TableExtended.MMTBLPART + " order by a,b"); + Assert.assertEquals(stringifyValues(rExpected), rs); + } + + /** + * Test a scenario, on a dynamically partitioned micro-managed table, that an IOW comes in + * before a MAJOR compaction happens. + * + * @throws Exception + */ + @Test + public void testInsertOverwriteWithDynamicPartition() throws Exception { + FileSystem fs = FileSystem.get(hiveConf); + FileStatus[] status; + + // 1. Insert two rows to a partitioned MM table. + int[][] valuesOdd = {{5,6},{7,8}}; + int[][] valuesEven = {{2,1},{4,3}}; + runStatementOnDriver("insert into " + TableExtended.MMTBLPART + " PARTITION(p='odd') " + makeValuesClause(valuesOdd)); + runStatementOnDriver("insert into " + TableExtended.MMTBLPART + " PARTITION(p='even') " + makeValuesClause(valuesEven)); + + // Verify dirs + String[] pStrings = {"/p=odd", "/p=even"}; + + for(int i=0; i < pStrings.length; i++) { + status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + + (TableExtended.MMTBLPART).toString().toLowerCase() + pStrings[i]), FileUtils.STAGING_DIR_PATH_FILTER); + // There should be 1 delta dir per partition location + Assert.assertEquals(1, status.length); + Assert.assertTrue(status[0].getPath().getName().matches("delta_.*")); + } + + // 2. INSERT OVERWRITE + // Prepare data for the source table + int[][] newValsOdd = {{5,5},{11,11}}; + int[][] newValsEven = {{2,2}}; + + runStatementOnDriver("insert into " + TableExtended.NONACIDPART + " PARTITION(p='odd') " + makeValuesClause(newValsOdd)); + runStatementOnDriver("insert into " + TableExtended.NONACIDPART + " PARTITION(p='even') " + makeValuesClause(newValsEven)); + + runStatementOnDriver("insert overwrite table " + TableExtended.MMTBLPART + " partition(p) select a,b,p from " + TableExtended.NONACIDPART); + + // Verify resulting dirs. + boolean sawBase = false; + String[] baseDirs = {"", ""}; + int deltaCount = 0; + for(int h=0; h < pStrings.length; h++) { + status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + + (TableExtended.MMTBLPART).toString().toLowerCase() + pStrings[h]), FileUtils.STAGING_DIR_PATH_FILTER); + // There should be 1 delta dir, plus a base dir in the location + Assert.assertEquals(2, status.length); // steve + + for (int i = 0; i < status.length; i++) { + String dirName = status[i].getPath().getName(); + if (dirName.matches("delta_.*")) { + deltaCount++; + } else { + sawBase = true; + baseDirs[h] = dirName; + Assert.assertTrue(baseDirs[h].matches("base_.*")); + } + } + Assert.assertEquals(1, deltaCount); + Assert.assertTrue(sawBase); + deltaCount = 0; + sawBase = false; + } + + // Verify query result + List rs = null; + rs = runStatementOnDriver("select a,b from " + TableExtended.MMTBLPART + " where p='even' order by a,b"); + int [][] rExpectedEven = new int[][] {{2,2}}; + Assert.assertEquals(stringifyValues(rExpectedEven), rs); + + rs = runStatementOnDriver("select a,b from " + TableExtended.MMTBLPART + " where p='odd' order by a,b"); + int [][] rExpectedOdd = new int[][] {{5,5},{11,11}}; + Assert.assertEquals(stringifyValues(rExpectedOdd), rs); + + // Verify query result + rs = runStatementOnDriver("select a,b from " + TableExtended.MMTBLPART + " order by a,b"); + int[][] rExpected = new int[][] {{2,2},{5,5},{11,11}}; + Assert.assertEquals(stringifyValues(rExpected), rs); + } + + @Test + public void testInsertOverwriteWithUnionAll() throws Exception { + FileSystem fs = FileSystem.get(hiveConf); + FileStatus[] status; + + // 1. Insert two rows to an MM table + runStatementOnDriver("insert into " + TableExtended.MMTBL + "(a,b) values(1,2)"); + runStatementOnDriver("insert into " + TableExtended.MMTBL + "(a,b) values(3,4)"); + status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + + (TableExtended.MMTBL).toString().toLowerCase()), FileUtils.STAGING_DIR_PATH_FILTER); + // There should be 2 delta dirs in the location + Assert.assertEquals(2, status.length); + for (int i = 0; i < status.length; i++) { + Assert.assertTrue(status[i].getPath().getName().matches("delta_.*")); + } + + // 2. Insert Overwrite. + int[][] values = {{1,2},{2,4},{5,6},{6,8},{9,10}}; + runStatementOnDriver("insert into " + Table.NONACIDORCTBL + TestTxnCommands2.makeValuesClause(values)); + + runStatementOnDriver("insert overwrite table " + TableExtended.MMTBL + " select a,b from " + Table.NONACIDORCTBL + " where a between 1 and 3 union all select a,b from " + Table.NONACIDORCTBL + " where a between 5 and 7"); + + // Verify resulting dirs. + boolean sawBase = false; + String baseDir = ""; + int deltaCount = 0; + + status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + + (TableExtended.MMTBL).toString().toLowerCase()), FileUtils.STAGING_DIR_PATH_FILTER); + // There should be 2 delta dirs, plus a base dir in the location + Assert.assertEquals(3, status.length); + + for (int i = 0; i < status.length; i++) { + String dirName = status[i].getPath().getName(); + if (dirName.matches("delta_.*")) { + deltaCount++; + } else { + sawBase = true; + baseDir = dirName; + Assert.assertTrue(baseDir.matches("base_.*")); + } + } + Assert.assertEquals(2, deltaCount); + Assert.assertTrue(sawBase); + + List rs = runStatementOnDriver("select a,b from " + TableExtended.MMTBL + " order by a,b"); + int[][] rExpected = new int[][] {{1,2},{2,4},{5,6},{6,8}}; + Assert.assertEquals(stringifyValues(rExpected), rs); + + // 4. Perform a major compaction. + runStatementOnDriver("alter table "+ TableExtended.MMTBL + " compact 'MAJOR'"); + runWorker(hiveConf); + + // 5. Run Cleaner. It should remove the 2 delta dirs. + runCleaner(hiveConf); + + rs = runStatementOnDriver("select a,b from " + TableExtended.MMTBL + " order by a,b"); + Assert.assertEquals(stringifyValues(rExpected), rs); + + // Verify resulting dirs. + status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + + (TableExtended.MMTBL).toString().toLowerCase()), FileUtils.STAGING_DIR_PATH_FILTER); + // There should be one base dir in the location + Assert.assertEquals(1, status.length); + + sawBase = false; + deltaCount = 0; + for (int i = 0; i < status.length; i++) { + String dirName = status[i].getPath().getName(); + if (dirName.matches("delta_.*")) { + deltaCount++; + } else { + sawBase = true; + baseDir = dirName; + Assert.assertTrue(baseDir.matches("base_.*")); + } + } + Assert.assertEquals(0, deltaCount); + Assert.assertTrue(sawBase); + + rs = runStatementOnDriver("select a,b from " + TableExtended.MMTBL + " order by a,b"); + Assert.assertEquals(stringifyValues(rExpected), rs); + } + + private void verifyDirAndResult(int expectedDeltas) throws Exception { + FileSystem fs = FileSystem.get(hiveConf); + // Verify the content of subdirs + FileStatus[] status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" + + (TableExtended.MMTBL).toString().toLowerCase()), FileUtils.STAGING_DIR_PATH_FILTER); + int sawDeltaTimes = 0; + for (int i = 0; i < status.length; i++) { + Assert.assertTrue(status[i].getPath().getName().matches("delta_.*")); + sawDeltaTimes++; + FileStatus[] files = fs.listStatus(status[i].getPath(), FileUtils.STAGING_DIR_PATH_FILTER); + Assert.assertEquals(1, files.length); + Assert.assertTrue(files[0].getPath().getName().equals("000000_0")); + } + Assert.assertEquals(expectedDeltas, sawDeltaTimes); + + // Verify query result + int [][] resultData = new int[][] {{1,2}, {3,4}}; + List rs = runStatementOnDriver("select a,b from " + TableExtended.MMTBL + " order by a,b"); + Assert.assertEquals(stringifyValues(resultData), rs); + } +} diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForOrcMmTable.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForOrcMmTable.java new file mode 100644 index 0000000000..1a701752f5 --- /dev/null +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForOrcMmTable.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.junit.Before; +import org.junit.Test; + +/** + * Same as TestTxnCommands2 but tests ACID tables with vectorization turned on by + * default, and having 'transactional_properties' set to 'default'. This specifically tests the + * fast VectorizedOrcAcidRowBatchReader for ACID tables with split-update turned on. + */ +public class TestTxnCommandsForOrcMmTable extends TestTxnCommandsForMmTable { + + public TestTxnCommandsForOrcMmTable() { + super(); + } + + @Override + @Before + public void setUp() throws Exception { + setUpInternal(); + setUpInternalExtended(true); + } +} diff --git ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java index 9f31eb1054..1c2b73fe29 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java +++ ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; import org.apache.hadoop.hive.ql.io.HiveInputFormat; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; @@ -69,7 +70,7 @@ public String toString() { public void setUp() throws Exception { setUpInternal(); } - void setUpInternal() throws Exception { + protected void setUpInternal() throws Exception { hiveConf = new HiveConf(this.getClass()); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); @@ -101,7 +102,7 @@ void setUpInternal() throws Exception { runStatementOnDriver("create temporary table " + Table.ACIDTBL2 + "(a int, b int, c int) clustered by (c) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='true')"); runStatementOnDriver("create table " + Table.NONACIDNONBUCKET + "(a int, b int) stored as orc"); } - private void dropTables() throws Exception { + protected void dropTables() throws Exception { for(TxnCommandsBaseForTests.Table t : TxnCommandsBaseForTests.Table.values()) { runStatementOnDriver("drop table if exists " + t); } @@ -134,6 +135,14 @@ String getWarehouseDir() { String makeValuesClause(int[][] rows) { return TestTxnCommands2.makeValuesClause(rows); } + + void runWorker(HiveConf hiveConf) throws MetaException { + TestTxnCommands2.runWorker(hiveConf); + } + + void runCleaner(HiveConf hiveConf) throws MetaException { + TestTxnCommands2.runCleaner(hiveConf); + } List runStatementOnDriver(String stmt) throws Exception { CommandProcessorResponse cpr = d.run(stmt); diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java index 9a22c54b12..61f5d1aa02 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java @@ -218,7 +218,7 @@ public void testRemoveTempOrDuplicateFilesOnMrWithDp() throws Exception { Path tempDirPath = setupTempDirWithSingleOutputFile(hconf); FileSinkDesc conf = getFileSinkDesc(tempDirPath); - List paths = Utilities.removeTempOrDuplicateFiles(localFs, tempDirPath, dpCtx, conf, hconf); + List paths = Utilities.removeTempOrDuplicateFiles(localFs, tempDirPath, dpCtx, conf, hconf, false); String expectedScheme = tempDirPath.toUri().getScheme(); String expectedAuthority = tempDirPath.toUri().getAuthority(); diff --git ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java index a89e5e0dc0..5ebc480e49 100644 --- ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java +++ ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java @@ -90,7 +90,6 @@ public String redactQuery(String query) { } private static Driver createDriver(HiveConf conf) { - HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); SessionState.start(conf); Driver driver = new Driver(conf); return driver; diff --git ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java index 4d19a715c6..71c2c0d91b 100644 --- ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java +++ ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java @@ -49,7 +49,6 @@ @BeforeClass public static void setUpBeforeClass() { conf = new HiveConf(TestQueryHooks.class); - conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); } diff --git ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java index 976d83d55f..c614a3951e 100644 --- ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java +++ ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java @@ -136,7 +136,6 @@ public void testCombine() throws Exception { .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_REWORK_MAPREDWORK, true); - HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); Driver drv = new Driver(hiveConf); String tblName = "text_symlink_text"; diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java index 406bdea96a..ff3f7fd8b5 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java @@ -390,7 +390,6 @@ public void concurrencyFalse() throws Exception { HiveConf badConf = new HiveConf(); badConf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); - badConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); boolean sawException = false; try { TxnManagerFactory.getTxnManagerFactory().getTxnManager(badConf); diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java index 913b60c353..57c17addc3 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java @@ -63,7 +63,6 @@ @Before public void setUp() throws Exception { - conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, true); conf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, DummyTxnManager.class.getName()); conf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/zookeeper/TestZookeeperLockManager.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/zookeeper/TestZookeeperLockManager.java index a7a76a42cb..822ff85ce3 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/zookeeper/TestZookeeperLockManager.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/zookeeper/TestZookeeperLockManager.java @@ -129,7 +129,6 @@ public void testMetrics() throws Exception{ conf.setVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_QUORUM, "localhost"); conf.setVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CLIENT_PORT, String.valueOf(server.getPort())); conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED, true); - conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER, MetricsReporting.JSON_FILE.name() + "," + MetricsReporting.JMX.name()); MetricsFactory.init(conf); CodahaleMetrics metrics = (CodahaleMetrics) MetricsFactory.getInstance(); diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/TestColumnAccess.java ql/src/test/org/apache/hadoop/hive/ql/parse/TestColumnAccess.java index 7bda832a3b..078a421979 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/TestColumnAccess.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/TestColumnAccess.java @@ -187,7 +187,6 @@ private static Driver createDriver() { conf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); - HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_COLLECT_SCANCOLS, true); SessionState.start(conf); Driver driver = new Driver(conf); diff --git ql/src/test/org/apache/hadoop/hive/ql/plan/TestReadEntityDirect.java ql/src/test/org/apache/hadoop/hive/ql/plan/TestReadEntityDirect.java index 79ce2f1769..4aa0dfb944 100644 --- ql/src/test/org/apache/hadoop/hive/ql/plan/TestReadEntityDirect.java +++ ql/src/test/org/apache/hadoop/hive/ql/plan/TestReadEntityDirect.java @@ -183,7 +183,6 @@ private static Driver createDriver() { "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); conf.setVar(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK, CheckInputReadEntityDirect.class.getName()); - HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); SessionState.start(conf); Driver driver = new Driver(conf); return driver; diff --git ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java index 234f9796c9..d9bb6f2ffa 100644 --- ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java +++ ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java @@ -62,8 +62,6 @@ public static void onetimeSetup() throws Exception { "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); conf.setVar(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK, CheckInputReadEntity.class.getName()); - HiveConf - .setBoolVar(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); SessionState.start(conf); driver = new Driver(conf); } diff --git ql/src/test/results/clientpositive/llap/mm_all.q.out ql/src/test/results/clientpositive/llap/mm_all.q.out index cfbe659634..03c129347f 100644 --- ql/src/test/results/clientpositive/llap/mm_all.q.out +++ ql/src/test/results/clientpositive/llap/mm_all.q.out @@ -1365,6 +1365,12 @@ POSTHOOK: query: select * from multi0_2_mm order by key, key2 POSTHOOK: type: QUERY POSTHOOK: Input: default@multi0_2_mm #### A masked pattern was here #### +455 97 +455 98 +456 0 +456 10 +457 100 +457 103 PREHOOK: query: from intermediate insert into table multi0_1_mm select p, key insert overwrite table multi0_2_mm select key, p @@ -1417,6 +1423,12 @@ POSTHOOK: query: select * from multi0_2_mm order by key, key2 POSTHOOK: type: QUERY POSTHOOK: Input: default@multi0_2_mm #### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 PREHOOK: query: drop table multi0_1_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@multi0_1_mm @@ -1541,17 +1553,11 @@ POSTHOOK: Input: default@multi1_mm@p=2 100 457 2 103 457 1 103 457 2 -455 97 1 455 97 2 -455 98 1 455 98 2 -456 0 1 456 0 2 -456 10 1 456 10 2 -457 100 1 457 100 2 -457 103 1 457 103 2 PREHOOK: query: from intermediate insert into table multi1_mm partition(p) select p, key, p @@ -1621,22 +1627,16 @@ POSTHOOK: Input: default@multi1_mm@p=457 103 457 1 103 457 1 103 457 2 -455 97 1 455 97 2 455 97 455 -455 98 1 455 98 2 455 98 455 -456 0 1 456 0 2 456 0 456 -456 10 1 456 10 2 456 10 456 -457 100 1 457 100 2 457 100 457 -457 103 1 457 103 2 457 103 457 PREHOOK: query: from intermediate @@ -1705,27 +1705,21 @@ POSTHOOK: Input: default@multi1_mm@p=457 103 457 1 103 457 2 455 97 1 -455 97 1 455 97 2 455 97 455 455 98 1 -455 98 1 455 98 2 455 98 455 456 0 1 -456 0 1 456 0 2 456 0 456 456 10 1 -456 10 1 456 10 2 456 10 456 457 100 1 -457 100 1 457 100 2 457 100 457 457 103 1 -457 103 1 457 103 2 457 103 457 PREHOOK: query: drop table multi1_mm diff --git ql/src/test/results/clientpositive/mm_all.q.out ql/src/test/results/clientpositive/mm_all.q.out index 5ad5957c5d..490c67f2ed 100644 --- ql/src/test/results/clientpositive/mm_all.q.out +++ ql/src/test/results/clientpositive/mm_all.q.out @@ -1384,6 +1384,12 @@ POSTHOOK: query: select * from multi0_2_mm order by key, key2 POSTHOOK: type: QUERY POSTHOOK: Input: default@multi0_2_mm #### A masked pattern was here #### +455 97 +455 98 +456 0 +456 10 +457 100 +457 103 PREHOOK: query: from intermediate insert into table multi0_1_mm select p, key insert overwrite table multi0_2_mm select key, p @@ -1436,6 +1442,12 @@ POSTHOOK: query: select * from multi0_2_mm order by key, key2 POSTHOOK: type: QUERY POSTHOOK: Input: default@multi0_2_mm #### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 PREHOOK: query: drop table multi0_1_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@multi0_1_mm @@ -1560,17 +1572,11 @@ POSTHOOK: Input: default@multi1_mm@p=2 100 457 2 103 457 1 103 457 2 -455 97 1 455 97 2 -455 98 1 455 98 2 -456 0 1 456 0 2 -456 10 1 456 10 2 -457 100 1 457 100 2 -457 103 1 457 103 2 PREHOOK: query: from intermediate insert into table multi1_mm partition(p) select p, key, p @@ -1640,22 +1646,16 @@ POSTHOOK: Input: default@multi1_mm@p=457 103 457 1 103 457 1 103 457 2 -455 97 1 455 97 2 455 97 455 -455 98 1 455 98 2 455 98 455 -456 0 1 456 0 2 456 0 456 -456 10 1 456 10 2 456 10 456 -457 100 1 457 100 2 457 100 457 -457 103 1 457 103 2 457 103 457 PREHOOK: query: from intermediate @@ -1724,27 +1724,21 @@ POSTHOOK: Input: default@multi1_mm@p=457 103 457 1 103 457 2 455 97 1 -455 97 1 455 97 2 455 97 455 455 98 1 -455 98 1 455 98 2 455 98 455 456 0 1 -456 0 1 456 0 2 456 0 456 456 10 1 -456 10 1 456 10 2 456 10 456 457 100 1 -457 100 1 457 100 2 457 100 457 457 103 1 -457 103 1 457 103 2 457 103 457 PREHOOK: query: drop table multi1_mm diff --git service/src/test/org/apache/hive/service/cli/CLIServiceTest.java service/src/test/org/apache/hive/service/cli/CLIServiceTest.java index bc6648e408..410cf3e7dc 100644 --- service/src/test/org/apache/hive/service/cli/CLIServiceTest.java +++ service/src/test/org/apache/hive/service/cli/CLIServiceTest.java @@ -156,7 +156,7 @@ public void testExecuteStatement() throws Exception { OperationHandle opHandle; String queryString = "SET " + HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname - + " = false"; + + " = true"; opHandle = client.executeStatement(sessionHandle, queryString, confOverlay); client.closeOperation(opHandle); @@ -213,7 +213,7 @@ public void testExecuteStatementAsync() throws Exception { // Change lock manager, otherwise unit-test doesn't go through queryString = "SET " + HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname - + " = false"; + + " = true"; opHandle = client.executeStatement(sessionHandle, queryString, confOverlay); client.closeOperation(opHandle); @@ -499,7 +499,7 @@ private SessionHandle openSession(Map confOverlay) SessionState.get().setIsHiveServerQuery(true); // Pretend we are in HS2. String queryString = "SET " + HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname - + " = false"; + + " = true"; client.executeStatement(sessionHandle, queryString, confOverlay); return sessionHandle; } @@ -587,8 +587,8 @@ public void testConfOverlay() throws Exception { String tabName = "TEST_CONF_EXEC"; String tabNameVar = "tabNameVar"; - String setLockMgr = "SET " + HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname - + " = false"; + String setLockMgr = "SET " + HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname + + " = true"; OperationHandle opHandle = client.executeStatement(sessionHandle, setLockMgr, null); client.closeOperation(opHandle); diff --git service/src/test/org/apache/hive/service/cli/operation/TestQueryLifeTimeHooksWithSQLOperation.java service/src/test/org/apache/hive/service/cli/operation/TestQueryLifeTimeHooksWithSQLOperation.java index c4f5451d9d..dd2f491cbf 100644 --- service/src/test/org/apache/hive/service/cli/operation/TestQueryLifeTimeHooksWithSQLOperation.java +++ service/src/test/org/apache/hive/service/cli/operation/TestQueryLifeTimeHooksWithSQLOperation.java @@ -47,7 +47,6 @@ @Test public void testQueryInfoInHookContext() throws IllegalAccessException, ClassNotFoundException, InstantiationException, HiveSQLException { HiveConf conf = new HiveConf(TestQueryHooks.class); - conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); conf.set(HiveConf.ConfVars.HIVE_QUERY_LIFETIME_HOOKS.varname, QueryInfoVerificationHook.class.getName()); diff --git service/src/test/org/apache/hive/service/cli/session/TestSessionManagerMetrics.java service/src/test/org/apache/hive/service/cli/session/TestSessionManagerMetrics.java index 646159f1e4..c60ede7c02 100644 --- service/src/test/org/apache/hive/service/cli/session/TestSessionManagerMetrics.java +++ service/src/test/org/apache/hive/service/cli/session/TestSessionManagerMetrics.java @@ -74,7 +74,6 @@ public void setup() throws Exception { conf.setVar(HiveConf.ConfVars.HIVE_SERVER2_SESSION_CHECK_INTERVAL, "3s"); conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED, true); - conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER, MetricsReporting.JSON_FILE.name() + "," + MetricsReporting.JMX.name()); conf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, false); MetricsFactory.init(conf); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index d18ddc89cb..c6e6deaa2b 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -819,7 +819,7 @@ public static ConfVars getMetaConf(String name) { "no transactions."), // Metastore always support concurrency, but certain ACID tests depend on this being set. We // need to do the work to detangle this - HIVE_SUPPORT_CONCURRENCY("hive.support.concurrency", "hive.support.concurrency", false, + HIVE_SUPPORT_CONCURRENCY("hive.support.concurrency", "hive.support.concurrency", true, "Whether Hive supports concurrency control or not. \n" + "A ZooKeeper instance must be up and running when using zookeeper Hive lock manager "),