diff --git contrib/src/test/queries/clientpositive/url_hook.q contrib/src/test/queries/clientpositive/url_hook.q
index 512e579fb8..b8f4c9f4c0 100644
--- contrib/src/test/queries/clientpositive/url_hook.q
+++ contrib/src/test/queries/clientpositive/url_hook.q
@@ -2,6 +2,8 @@
add jar ${system:maven.local.repository}/org/apache/hive/hive-contrib/${system:hive.version}/hive-contrib-${system:hive.version}.jar;
SHOW TABLES 'src';
+set datanucleus.schema.autoCreateAll=true;
set hive.metastore.ds.connection.url.hook=org.apache.hadoop.hive.contrib.metastore.hooks.SampleURLHook;
-- changes to dummy derby store.. should return empty result
SHOW TABLES 'src';
+set datanucleus.schema.autoCreateAll=false;
diff --git itests/hive-blobstore/pom.xml itests/hive-blobstore/pom.xml
index 09955c55f3..45ec3dfb8e 100644
--- itests/hive-blobstore/pom.xml
+++ itests/hive-blobstore/pom.xml
@@ -299,6 +299,24 @@
org.apache.maven.plugins
maven-antrun-plugin
+
+ setup-metastore-scripts
+ process-test-resources
+
+ run
+
+
+
+
+
+
+
+
+
+
+
+
+
generate-tests-sources
generate-test-sources
diff --git itests/qtest-accumulo/pom.xml itests/qtest-accumulo/pom.xml
index a35d2a8a10..bf143e0828 100644
--- itests/qtest-accumulo/pom.xml
+++ itests/qtest-accumulo/pom.xml
@@ -444,6 +444,24 @@
org.apache.maven.plugins
maven-antrun-plugin
+
+ setup-metastore-scripts
+ process-test-resources
+
+ run
+
+
+
+
+
+
+
+
+
+
+
+
+
generate-tests-sources
generate-test-sources
diff --git itests/qtest-druid/pom.xml itests/qtest-druid/pom.xml
index cc0cceff68..9695486633 100644
--- itests/qtest-druid/pom.xml
+++ itests/qtest-druid/pom.xml
@@ -293,6 +293,30 @@
+
+ org.apache.maven.plugins
+ maven-antrun-plugin
+
+
+ setup-metastore-scripts
+ process-test-resources
+
+ run
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git itests/qtest-kudu/pom.xml itests/qtest-kudu/pom.xml
index f23399fa37..f3086265a9 100644
--- itests/qtest-kudu/pom.xml
+++ itests/qtest-kudu/pom.xml
@@ -359,6 +359,30 @@
+
+ org.apache.maven.plugins
+ maven-antrun-plugin
+
+
+ setup-metastore-scripts
+ process-test-resources
+
+ run
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliAdapter.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliAdapter.java
index fcfc79059a..42c1174dc7 100644
--- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliAdapter.java
+++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliAdapter.java
@@ -107,7 +107,7 @@ public void evaluate() throws Throwable {
}
// override this if e.g. a metastore dependent init logic is needed
- protected void beforeClassSpec() {
+ protected void beforeClassSpec() throws Exception{
}
public final TestRule buildTestRule() {
diff --git itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java
index 5b08f8b894..292e7abf16 100644
--- itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java
+++ itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java
@@ -124,10 +124,15 @@ boolean getBooleanPropertyValue(String name, boolean defaultValue) {
}
return Boolean.parseBoolean(value);
}
+ @Override
+ public void beforeClass() throws Exception {
+ // We use the beforeClassSpec, because we want to HMS to be ready before the miniHS2 start
+ // See CliAdapter.buildClassRule
+ }
@Override
@BeforeClass
- public void beforeClass() throws Exception {
+ public void beforeClassSpec() throws Exception {
overwrite = getBooleanPropertyValue("test.output.overwrite", Boolean.FALSE);
useSharedDatabase = getBooleanPropertyValue("test.beeline.shared.database", Boolean.FALSE);
diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMetaStoreHandler.java itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMetaStoreHandler.java
index b86d736a89..0efdf1a55b 100644
--- itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMetaStoreHandler.java
+++ itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMetaStoreHandler.java
@@ -64,6 +64,8 @@ public QTestMetaStoreHandler setMetaStoreConfiguration(HiveConf conf) {
MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_DRIVER, rule.getJdbcDriver());
MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_USER_NAME, rule.getHiveUser());
MetastoreConf.setVar(conf, MetastoreConf.ConfVars.PWD, rule.getHivePassword());
+ // In this case we can disable auto_create which is enabled by default for every test
+ MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.AUTO_CREATE_ALL, false);
LOG.info(String.format("set metastore connection to url: %s",
MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY)));
@@ -104,7 +106,6 @@ public void afterTest(QTestUtil qt) throws Exception {
// special qtest logic, which doesn't fit quite well into Derby.after()
if (isDerby()) {
TxnDbUtil.cleanDb(qt.getConf());
- TxnDbUtil.prepDb(qt.getConf());
}
}
@@ -113,5 +114,6 @@ public void setSystemProperties() {
System.setProperty(MetastoreConf.ConfVars.CONNECTION_DRIVER.getVarname(), rule.getJdbcDriver());
System.setProperty(MetastoreConf.ConfVars.CONNECTION_USER_NAME.getVarname(), rule.getHiveUser());
System.setProperty(MetastoreConf.ConfVars.PWD.getVarname(), rule.getHivePassword());
+ System.setProperty(MetastoreConf.ConfVars.AUTO_CREATE_ALL.getVarname(), "false");
}
}
diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index 38e4bac2cc..034d7f89ee 100644
--- itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -243,11 +243,16 @@ private void setMetaStoreProperties() {
setMetastoreConfPropertyFromSystemProperty(MetastoreConf.ConfVars.CONNECTION_DRIVER);
setMetastoreConfPropertyFromSystemProperty(MetastoreConf.ConfVars.CONNECTION_USER_NAME);
setMetastoreConfPropertyFromSystemProperty(MetastoreConf.ConfVars.PWD);
+ setMetastoreConfPropertyFromSystemProperty(MetastoreConf.ConfVars.AUTO_CREATE_ALL);
}
private void setMetastoreConfPropertyFromSystemProperty(MetastoreConf.ConfVars var) {
if (System.getProperty(var.getVarname()) != null) {
- MetastoreConf.setVar(conf, var, System.getProperty(var.getVarname()));
+ if (var.getDefaultVal().getClass() == Boolean.class) {
+ MetastoreConf.setBoolVar(conf, var, Boolean.getBoolean(System.getProperty(var.getVarname())));
+ } else {
+ MetastoreConf.setVar(conf, var, System.getProperty(var.getVarname()));
+ }
}
}
diff --git pom.xml pom.xml
index b29c06c69e..5a26671446 100644
--- pom.xml
+++ pom.xml
@@ -84,6 +84,8 @@
+ set-this-to-colon-separated-full-path-list-of-jars-to-run-integration-tests
+
${maven.test.classpath}
file://
@@ -1371,6 +1373,7 @@
${test.conf.dir}
${basedir}/${hive.path.to.root}/conf
+ ${itest.jdbc.jars}
US/Pacific
diff --git ql/pom.xml ql/pom.xml
index a0e77a1d32..588545c023 100644
--- ql/pom.xml
+++ ql/pom.xml
@@ -912,6 +912,24 @@
run
+
+ setup-metastore-scripts
+ process-test-resources
+
+ run
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
index 15fcfc0e35..7c8903fbae 100644
--- ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
+++ ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
@@ -583,7 +583,6 @@ public void addDynamicPartitions() throws Exception {
@Before
public void setUp() throws Exception {
- TxnDbUtil.prepDb(conf);
txnHandler = TxnUtils.getTxnStore(conf);
}
diff --git ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
index f512c1df19..3916e88a9d 100644
--- ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
+++ ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
@@ -1754,7 +1754,6 @@ private void updateLocks(Connection conn) throws SQLException {
@Before
public void setUp() throws Exception {
- TxnDbUtil.prepDb(conf);
txnHandler = TxnUtils.getTxnStore(conf);
}
diff --git ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNoConnectionPool.java ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNoConnectionPool.java
index ebe4880e3a..ed2d485b42 100644
--- ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNoConnectionPool.java
+++ ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNoConnectionPool.java
@@ -29,11 +29,10 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.sql.SQLException;
import java.util.List;
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
/**
* This test checks that the transaction handler works when the connection pool is set to none.
@@ -48,14 +47,6 @@
@Before
public void setUp() throws Exception {
conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, "None");
- TxnDbUtil.setConfValues(conf);
- try {
- TxnDbUtil.prepDb(conf);
- } catch (SQLException e) {
- // Usually this means we've already created the tables, so clean them and then try again
- tearDown();
- TxnDbUtil.prepDb(conf);
- }
txnHandler = TxnUtils.getTxnStore(conf);
}
diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/ITestDbTxnManager.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/ITestDbTxnManager.java
new file mode 100644
index 0000000000..a085e9ff6f
--- /dev/null
+++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/ITestDbTxnManager.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.lockmgr;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.dbinstall.rules.DatabaseRule;
+import org.apache.hadoop.hive.metastore.dbinstall.rules.Derby;
+import org.apache.hadoop.hive.metastore.dbinstall.rules.Mssql;
+import org.apache.hadoop.hive.metastore.dbinstall.rules.Mysql;
+import org.apache.hadoop.hive.metastore.dbinstall.rules.Oracle;
+import org.apache.hadoop.hive.metastore.dbinstall.rules.Postgres;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test class to run DbTxnManager tests against different dbms types.
+ * Example: mvn test -Dtest=ITestDbTxnManager -Dtest.metastore.db=postgres -Ditest.jdbc.jars=yourPathtoJdbcDriver
+ */
+public class ITestDbTxnManager extends TestDbTxnManager2 {
+
+ private static final String SYS_PROP_METASTORE_DB = "test.metastore.db";
+ private static final Logger LOG = LoggerFactory.getLogger(TestDbTxnManager2.class);
+ private static DatabaseRule rule;
+
+
+ @BeforeClass
+ public static void setupDb() throws Exception {
+ String metastoreType =
+ System.getProperty(SYS_PROP_METASTORE_DB) == null ? "derby" : System.getProperty(SYS_PROP_METASTORE_DB)
+ .toLowerCase();
+ rule = getDatabaseRule(metastoreType).setVerbose(false);
+
+ conf.setVar(HiveConf.ConfVars.METASTOREDBTYPE, metastoreType.toUpperCase());
+
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY, rule.getJdbcUrl());
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_DRIVER, rule.getJdbcDriver());
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_USER_NAME, rule.getHiveUser());
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.PWD, rule.getHivePassword());
+ // In this case we disable auto_create which is enabled by default for every test
+ MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.AUTO_CREATE_ALL, false);
+
+
+ LOG.info("Set metastore connection to url: {}",
+ MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY));
+ // Start the docker container and create the hive user
+ rule.before();
+ rule.createUser();
+ // We do not run the install script, it will be called anyway before every test in prepDb
+ }
+
+ @AfterClass
+ public static void tearDownDb() {
+ rule.after();
+ }
+
+ private static DatabaseRule getDatabaseRule(String metastoreType) {
+ switch (metastoreType) {
+ case "postgres":
+ return new Postgres();
+ case "oracle":
+ return new Oracle();
+ case "mysql":
+ return new Mysql();
+ case "mssql":
+ return new Mssql();
+ default:
+ return new Derby();
+ }
+ }
+}
diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
index 497cedd61f..f90396b2a3 100644
--- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
+++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
@@ -47,6 +47,7 @@
import org.apache.hadoop.hive.ql.processors.CommandProcessorException;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.junit.Before;
+import org.junit.BeforeClass;
import org.junit.ComparisonFailure;
import org.junit.Rule;
import org.junit.Test;
@@ -82,7 +83,8 @@
* each thread.
*/
public class TestDbTxnManager2 {
- private static HiveConf conf = new HiveConf(Driver.class);
+ protected static HiveConf conf = new HiveConf(Driver.class);
+
private HiveTxnManager txnMgr;
private Context ctx;
private Driver driver;
@@ -95,6 +97,11 @@ public TestDbTxnManager2() {
TxnDbUtil.setConfValues(conf);
}
+ @BeforeClass
+ public static void setUpDB() throws Exception{
+ TxnDbUtil.prepDb(conf);
+ }
+
@Before
public void setUp() throws Exception {
conf.setBoolVar(HiveConf.ConfVars.TXN_WRITE_X_LOCK, false);
@@ -102,7 +109,6 @@ public void setUp() throws Exception {
ctx = new Context(conf);
driver = new Driver(new QueryState.Builder().withHiveConf(conf).nonIsolated().build());
TxnDbUtil.cleanDb(conf);
- TxnDbUtil.prepDb(conf);
SessionState ss = SessionState.get();
ss.initTxnMgr(conf);
txnMgr = ss.getTxnMgr();
@@ -2046,15 +2052,21 @@ private void testMerge3Way(boolean causeConflict, boolean sharedWrite) throws Ex
if (causeConflict) {
Assert.assertNotNull("didn't get exception", expectedException);
try {
- Assert.assertEquals("Transaction manager has aborted the transaction txnid:11. Reason: " +
- "Aborting [txnid:11,11] due to a write conflict on default/target/p=1/q=3 " +
- "committed by [txnid:10,11] u/u", expectedException.getMessage());
- } catch (ComparisonFailure ex) {
- //the 2 txns have 2 conflicts between them so check for either failure since which one is
- //reported (among the 2) is not deterministic
Assert.assertEquals("Transaction manager has aborted the transaction txnid:11. Reason: " +
"Aborting [txnid:11,11] due to a write conflict on default/target/p=1/q=2 " +
"committed by [txnid:10,11] d/d", expectedException.getMessage());
+ } catch (ComparisonFailure ex) {
+ //the 2 txns have 3 conflicts between them so check for either failure since which one is
+ //reported (among the 3) is not deterministic
+ try {
+ Assert.assertEquals("Transaction manager has aborted the transaction txnid:11. Reason: "
+ + "Aborting [txnid:11,11] due to a write conflict on default/target/p=2/q=2 "
+ + "committed by [txnid:10,11] d/d", expectedException.getMessage());
+ } catch (ComparisonFailure ex2) {
+ Assert.assertEquals("Transaction manager has aborted the transaction txnid:11. Reason: " +
+ "Aborting [txnid:11,11] due to a write conflict on default/target/p=1/q=3 " +
+ "committed by [txnid:10,11] u/u", expectedException.getMessage());
+ }
}
Assert.assertEquals(
"COMPLETED_TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId2) + "): " +
diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java
index 3e56ad513c..b798cdd041 100644
--- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java
+++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java
@@ -72,4 +72,19 @@ public static boolean needsInBatching(DatabaseProduct dbType) {
public static boolean hasJoinOperationOrderBug(DatabaseProduct dbType) {
return dbType == DERBY || dbType == ORACLE || dbType == POSTGRES;
}
+
+ public static String getHiveSchemaPostfix(DatabaseProduct dbType) {
+ switch (dbType) {
+ case SQLSERVER:
+ return "mssql";
+ case DERBY:
+ case MYSQL:
+ case POSTGRES:
+ case ORACLE:
+ return dbType.name().toLowerCase();
+ case OTHER:
+ default:
+ return null;
+ }
+ }
}
diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
index 385f9d72cd..97a083399a 100644
--- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
+++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
@@ -17,6 +17,9 @@
*/
package org.apache.hadoop.hive.metastore.txn;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.InputStream;
import java.sql.Connection;
import java.sql.Driver;
import java.sql.PreparedStatement;
@@ -29,12 +32,18 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumMap;
+import java.util.HashSet;
import java.util.List;
import java.util.Properties;
+import java.util.Scanner;
+import java.util.Set;
import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.IMetaStoreSchemaInfo;
+import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfoFactory;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
@@ -83,319 +92,43 @@ public static void setConfValues(Configuration conf) {
MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, true);
}
+ /**
+ * Prepares the metastore database for unit tests.
+ * Runs the latest init schema against the database configured in the CONNECT_URL_KEY param.
+ * Ignores any duplication (table, index etc.) So it can be called multiple times for the same database.
+ * @param conf Metastore configuration
+ * @throws Exception Initialization failure
+ */
public static synchronized void prepDb(Configuration conf) throws Exception {
- // This is a bogus hack because it copies the contents of the SQL file
- // intended for creating derby databases, and thus will inexorably get
- // out of date with it. I'm open to any suggestions on how to make this
- // read the file in a build friendly way.
-
+ LOG.info("Creating transactional tables");
Connection conn = null;
Statement stmt = null;
try {
conn = getConnection(conf);
+ String s = conn.getMetaData().getDatabaseProductName();
+ DatabaseProduct dbProduct = DatabaseProduct.determineDatabaseProduct(s);
stmt = conn.createStatement();
- stmt.execute("CREATE TABLE TXNS (" +
- " TXN_ID bigint PRIMARY KEY," +
- " TXN_STATE char(1) NOT NULL," +
- " TXN_STARTED bigint NOT NULL," +
- " TXN_LAST_HEARTBEAT bigint NOT NULL," +
- " TXN_USER varchar(128) NOT NULL," +
- " TXN_HOST varchar(128) NOT NULL," +
- " TXN_TYPE integer)");
-
- stmt.execute("CREATE TABLE TXN_COMPONENTS (" +
- " TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID)," +
- " TC_DATABASE varchar(128) NOT NULL," +
- " TC_TABLE varchar(128)," +
- " TC_PARTITION varchar(767)," +
- " TC_OPERATION_TYPE char(1) NOT NULL," +
- " TC_WRITEID bigint)");
- stmt.execute("CREATE TABLE COMPLETED_TXN_COMPONENTS (" +
- " CTC_TXNID bigint NOT NULL," +
- " CTC_DATABASE varchar(128) NOT NULL," +
- " CTC_TABLE varchar(128)," +
- " CTC_PARTITION varchar(767)," +
- " CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL," +
- " CTC_WRITEID bigint," +
- " CTC_UPDATE_DELETE char(1) NOT NULL)");
- stmt.execute("CREATE TABLE NEXT_TXN_ID (" + " NTXN_NEXT bigint NOT NULL)");
- stmt.execute("INSERT INTO NEXT_TXN_ID VALUES(1)");
-
- stmt.execute("CREATE TABLE TXN_TO_WRITE_ID (" +
- " T2W_TXNID bigint NOT NULL," +
- " T2W_DATABASE varchar(128) NOT NULL," +
- " T2W_TABLE varchar(256) NOT NULL," +
- " T2W_WRITEID bigint NOT NULL)");
- stmt.execute("CREATE TABLE NEXT_WRITE_ID (" +
- " NWI_DATABASE varchar(128) NOT NULL," +
- " NWI_TABLE varchar(256) NOT NULL," +
- " NWI_NEXT bigint NOT NULL)");
-
- stmt.execute("CREATE TABLE MIN_HISTORY_LEVEL (" +
- " MHL_TXNID bigint NOT NULL," +
- " MHL_MIN_OPEN_TXNID bigint NOT NULL," +
- " PRIMARY KEY(MHL_TXNID))");
-
- stmt.execute("CREATE TABLE HIVE_LOCKS (" +
- " HL_LOCK_EXT_ID bigint NOT NULL," +
- " HL_LOCK_INT_ID bigint NOT NULL," +
- " HL_TXNID bigint NOT NULL," +
- " HL_DB varchar(128) NOT NULL," +
- " HL_TABLE varchar(128)," +
- " HL_PARTITION varchar(767)," +
- " HL_LOCK_STATE char(1) NOT NULL," +
- " HL_LOCK_TYPE char(1) NOT NULL," +
- " HL_LAST_HEARTBEAT bigint NOT NULL," +
- " HL_ACQUIRED_AT bigint," +
- " HL_USER varchar(128) NOT NULL," +
- " HL_HOST varchar(128) NOT NULL," +
- " HL_HEARTBEAT_COUNT integer," +
- " HL_AGENT_INFO varchar(128)," +
- " HL_BLOCKEDBY_EXT_ID bigint," +
- " HL_BLOCKEDBY_INT_ID bigint," +
- " PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID))");
- stmt.execute("CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID)");
-
- stmt.execute("CREATE TABLE NEXT_LOCK_ID (" + " NL_NEXT bigint NOT NULL)");
- stmt.execute("INSERT INTO NEXT_LOCK_ID VALUES(1)");
-
- stmt.execute("CREATE TABLE COMPACTION_QUEUE (" +
- " CQ_ID bigint PRIMARY KEY," +
- " CQ_DATABASE varchar(128) NOT NULL," +
- " CQ_TABLE varchar(128) NOT NULL," +
- " CQ_PARTITION varchar(767)," +
- " CQ_STATE char(1) NOT NULL," +
- " CQ_TYPE char(1) NOT NULL," +
- " CQ_TBLPROPERTIES varchar(2048)," +
- " CQ_WORKER_ID varchar(128)," +
- " CQ_START bigint," +
- " CQ_RUN_AS varchar(128)," +
- " CQ_HIGHEST_WRITE_ID bigint," +
- " CQ_META_INFO varchar(2048) for bit data," +
- " CQ_HADOOP_JOB_ID varchar(32)," +
- " CQ_ERROR_MESSAGE clob," +
- " CQ_NEXT_TXN_ID bigint)");
-
- stmt.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)");
- stmt.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)");
-
- stmt.execute("CREATE TABLE COMPLETED_COMPACTIONS (" +
- " CC_ID bigint PRIMARY KEY," +
- " CC_DATABASE varchar(128) NOT NULL," +
- " CC_TABLE varchar(128) NOT NULL," +
- " CC_PARTITION varchar(767)," +
- " CC_STATE char(1) NOT NULL," +
- " CC_TYPE char(1) NOT NULL," +
- " CC_TBLPROPERTIES varchar(2048)," +
- " CC_WORKER_ID varchar(128)," +
- " CC_START bigint," +
- " CC_END bigint," +
- " CC_RUN_AS varchar(128)," +
- " CC_HIGHEST_WRITE_ID bigint," +
- " CC_META_INFO varchar(2048) for bit data," +
- " CC_HADOOP_JOB_ID varchar(32)," +
- " CC_ERROR_MESSAGE clob)");
-
- stmt.execute("CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS ("
- + "CC_DATABASE,CC_TABLE,CC_PARTITION)");
-
- stmt.execute("CREATE TABLE AUX_TABLE (" +
- " MT_KEY1 varchar(128) NOT NULL," +
- " MT_KEY2 bigint NOT NULL," +
- " MT_COMMENT varchar(255)," +
- " PRIMARY KEY(MT_KEY1, MT_KEY2))");
-
- stmt.execute("CREATE TABLE WRITE_SET (" +
- " WS_DATABASE varchar(128) NOT NULL," +
- " WS_TABLE varchar(128) NOT NULL," +
- " WS_PARTITION varchar(767)," +
- " WS_TXNID bigint NOT NULL," +
- " WS_COMMIT_ID bigint NOT NULL," +
- " WS_OPERATION_TYPE char(1) NOT NULL)"
- );
-
- stmt.execute("CREATE TABLE REPL_TXN_MAP (" +
- " RTM_REPL_POLICY varchar(256) NOT NULL, " +
- " RTM_SRC_TXN_ID bigint NOT NULL, " +
- " RTM_TARGET_TXN_ID bigint NOT NULL, " +
- " PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID))"
- );
-
- stmt.execute("CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (" +
- " MRL_TXN_ID BIGINT NOT NULL, " +
- " MRL_DB_NAME VARCHAR(128) NOT NULL, " +
- " MRL_TBL_NAME VARCHAR(256) NOT NULL, " +
- " MRL_LAST_HEARTBEAT BIGINT NOT NULL, " +
- " PRIMARY KEY(MRL_TXN_ID))"
- );
-
- try {
- stmt.execute("CREATE TABLE \"APP\".\"TBLS\" (\"TBL_ID\" BIGINT NOT NULL, " +
- " \"CREATE_TIME\" INTEGER NOT NULL, \"DB_ID\" BIGINT, \"LAST_ACCESS_TIME\" INTEGER NOT NULL, " +
- " \"OWNER\" VARCHAR(767), \"OWNER_TYPE\" VARCHAR(10), \"RETENTION\" INTEGER NOT NULL, " +
- " \"SD_ID\" BIGINT, \"TBL_NAME\" VARCHAR(256), \"TBL_TYPE\" VARCHAR(128), " +
- " \"VIEW_EXPANDED_TEXT\" LONG VARCHAR, \"VIEW_ORIGINAL_TEXT\" LONG VARCHAR, " +
- " \"IS_REWRITE_ENABLED\" CHAR(1) NOT NULL DEFAULT \'N\', " +
- " \"WRITE_ID\" BIGINT DEFAULT 0, " +
- " PRIMARY KEY (TBL_ID))"
- );
- } catch (SQLException e) {
- if (e.getMessage() != null && e.getMessage().contains("already exists")) {
- LOG.info("TBLS table already exist, ignoring");
- } else {
- throw e;
- }
- }
-
- try {
- stmt.execute("CREATE TABLE \"APP\".\"DBS\" (\"DB_ID\" BIGINT NOT NULL, \"DESC\" " +
- "VARCHAR(4000), \"DB_LOCATION_URI\" VARCHAR(4000) NOT NULL, \"NAME\" VARCHAR(128), " +
- "\"OWNER_NAME\" VARCHAR(128), \"OWNER_TYPE\" VARCHAR(10), " +
- "\"CTLG_NAME\" VARCHAR(256) NOT NULL, PRIMARY KEY (DB_ID))");
- } catch (SQLException e) {
- if (e.getMessage() != null && e.getMessage().contains("already exists")) {
- LOG.info("TBLS table already exist, ignoring");
- } else {
- throw e;
- }
- }
-
- try {
- stmt.execute("CREATE TABLE \"APP\".\"PARTITIONS\" (" +
- " \"PART_ID\" BIGINT NOT NULL, \"CREATE_TIME\" INTEGER NOT NULL, " +
- " \"LAST_ACCESS_TIME\" INTEGER NOT NULL, \"PART_NAME\" VARCHAR(767), " +
- " \"SD_ID\" BIGINT, \"TBL_ID\" BIGINT, " +
- " \"WRITE_ID\" BIGINT DEFAULT 0, " +
- " PRIMARY KEY (PART_ID))"
- );
- } catch (SQLException e) {
- if (e.getMessage() != null && e.getMessage().contains("already exists")) {
- LOG.info("PARTITIONS table already exist, ignoring");
- } else {
- throw e;
- }
- }
-
- try {
- stmt.execute("CREATE TABLE \"APP\".\"TABLE_PARAMS\" (" +
- " \"TBL_ID\" BIGINT NOT NULL, \"PARAM_KEY\" VARCHAR(256) NOT NULL, " +
- " \"PARAM_VALUE\" CLOB, " +
- " PRIMARY KEY (TBL_ID, PARAM_KEY))"
- );
- } catch (SQLException e) {
- if (e.getMessage() != null && e.getMessage().contains("already exists")) {
- LOG.info("TABLE_PARAMS table already exist, ignoring");
- } else {
- throw e;
- }
- }
-
- try {
- stmt.execute("CREATE TABLE \"APP\".\"PARTITION_PARAMS\" (" +
- " \"PART_ID\" BIGINT NOT NULL, \"PARAM_KEY\" VARCHAR(256) NOT NULL, " +
- " \"PARAM_VALUE\" CLOB, " +
- " PRIMARY KEY (PART_ID, PARAM_KEY))"
- );
- } catch (SQLException e) {
- if (e.getMessage() != null && e.getMessage().contains("already exists")) {
- LOG.info("PARTITION_PARAMS table already exist, ignoring");
- } else {
- throw e;
- }
- }
-
- try {
- stmt.execute("CREATE TABLE \"APP\".\"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\" VARCHAR(256) NOT " +
-
- "NULL, \"NEXT_VAL\" BIGINT NOT NULL)"
- );
- } catch (SQLException e) {
- if (e.getMessage() != null && e.getMessage().contains("already exists")) {
- LOG.info("SEQUENCE_TABLE table already exist, ignoring");
- } else {
- throw e;
- }
- }
-
- try {
- stmt.execute("CREATE TABLE \"APP\".\"NOTIFICATION_SEQUENCE\" (\"NNI_ID\" BIGINT NOT NULL, " +
-
- "\"NEXT_EVENT_ID\" BIGINT NOT NULL)"
- );
- } catch (SQLException e) {
- if (e.getMessage() != null && e.getMessage().contains("already exists")) {
- LOG.info("NOTIFICATION_SEQUENCE table already exist, ignoring");
- } else {
- throw e;
- }
- }
-
- try {
- stmt.execute("CREATE TABLE \"APP\".\"NOTIFICATION_LOG\" (\"NL_ID\" BIGINT NOT NULL, " +
- "\"DB_NAME\" VARCHAR(128), \"EVENT_ID\" BIGINT NOT NULL, \"EVENT_TIME\" INTEGER NOT" +
-
- " NULL, \"EVENT_TYPE\" VARCHAR(32) NOT NULL, \"MESSAGE\" CLOB, \"TBL_NAME\" " +
- "VARCHAR" +
- "(256), \"MESSAGE_FORMAT\" VARCHAR(16))"
- );
- } catch (SQLException e) {
- if (e.getMessage() != null && e.getMessage().contains("already exists")) {
- LOG.info("NOTIFICATION_LOG table already exist, ignoring");
- } else {
- throw e;
- }
+ if (checkDbPrepared(stmt)) {
+ return;
}
-
- stmt.execute("INSERT INTO \"APP\".\"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\", \"NEXT_VAL\") " +
- "SELECT * FROM (VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', " +
- "1)) tmp_table WHERE NOT EXISTS ( SELECT \"NEXT_VAL\" FROM \"APP\"" +
- ".\"SEQUENCE_TABLE\" WHERE \"SEQUENCE_NAME\" = 'org.apache.hadoop.hive.metastore" +
- ".model.MNotificationLog')");
-
- stmt.execute("INSERT INTO \"APP\".\"NOTIFICATION_SEQUENCE\" (\"NNI_ID\", \"NEXT_EVENT_ID\")" +
- " SELECT * FROM (VALUES (1,1)) tmp_table WHERE NOT EXISTS ( SELECT " +
- "\"NEXT_EVENT_ID\" FROM \"APP\".\"NOTIFICATION_SEQUENCE\")");
-
- try {
- stmt.execute("CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (" +
- "WNL_ID bigint NOT NULL," +
- "WNL_TXNID bigint NOT NULL," +
- "WNL_WRITEID bigint NOT NULL," +
- "WNL_DATABASE varchar(128) NOT NULL," +
- "WNL_TABLE varchar(128) NOT NULL," +
- "WNL_PARTITION varchar(1024) NOT NULL," +
- "WNL_TABLE_OBJ clob NOT NULL," +
- "WNL_PARTITION_OBJ clob," +
- "WNL_FILES clob," +
- "WNL_EVENT_TIME integer NOT NULL," +
- "PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION))"
- );
- } catch (SQLException e) {
- if (e.getMessage() != null && e.getMessage().contains("already exists")) {
- LOG.info("TXN_WRITE_NOTIFICATION_LOG table already exist, ignoring");
- } else {
- throw e;
- }
+ String schemaRootPath = getSchemaRootPath();
+ IMetaStoreSchemaInfo metaStoreSchemaInfo =
+ MetaStoreSchemaInfoFactory.get(conf, schemaRootPath, DatabaseProduct.getHiveSchemaPostfix(dbProduct));
+ String initFile = metaStoreSchemaInfo.generateInitFileName(null);
+ try (InputStream is = new FileInputStream(
+ metaStoreSchemaInfo.getMetaStoreScriptDir() + File.separator + initFile)) {
+ LOG.info("Reinitializing the metastore db with {} on the database {}", initFile,
+ MetastoreConf.getVar(conf, ConfVars.CONNECT_URL_KEY));
+ importSQL(stmt, is);
}
-
- stmt.execute("INSERT INTO \"APP\".\"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\", \"NEXT_VAL\") " +
- "SELECT * FROM (VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', " +
- "1)) tmp_table WHERE NOT EXISTS ( SELECT \"NEXT_VAL\" FROM \"APP\"" +
- ".\"SEQUENCE_TABLE\" WHERE \"SEQUENCE_NAME\" = 'org.apache.hadoop.hive.metastore" +
- ".model.MTxnWriteNotificationLog')");
} catch (SQLException e) {
try {
- conn.rollback();
+ if (conn != null) {
+ conn.rollback();
+ }
} catch (SQLException re) {
LOG.error("Error rolling back: " + re.getMessage());
}
-
- // Another thread might have already created these tables.
- if (e.getMessage() != null && e.getMessage().contains("already exists")) {
- LOG.info("Txn tables already exist, returning");
- return;
- }
-
// This might be a deadlock, if so, let's retry
if (e instanceof SQLTransactionRollbackException && deadlockCnt++ < 5) {
LOG.warn("Caught deadlock, retrying db creation");
@@ -409,7 +142,82 @@ public static synchronized void prepDb(Configuration conf) throws Exception {
}
}
+ private static boolean checkDbPrepared(Statement stmt) {
+ /*
+ * If the transactional tables are already there we don't want to run everything again
+ */
+ try {
+ stmt.execute("SELECT * FROM \"TXNS\"");
+ } catch (SQLException e) {
+ return false;
+ }
+ return true;
+ }
+
+ private static void importSQL(Statement stmt, InputStream in) throws SQLException {
+ Set knownErrors = getAlreadyExistsErrorCodes();
+ Scanner s = new Scanner(in, "UTF-8");
+ s.useDelimiter("(;(\r)?\n)|(--.*\n)");
+ while (s.hasNext()) {
+ String line = s.next();
+
+ if (line.trim().length() > 0) {
+ try {
+ stmt.execute(line);
+ } catch (SQLException e) {
+ if (knownErrors.contains(e.getSQLState())) {
+ LOG.debug("Ignoring sql error {}", e.getMessage());
+ } else {
+ throw e;
+ }
+ }
+ }
+ }
+ }
+
+ private static Set getAlreadyExistsErrorCodes() {
+ // function already exists, table already exists, index already exists, duplicate key
+ Set knownErrors = new HashSet<>();
+ // derby
+ knownErrors.addAll(Arrays.asList("X0Y68", "X0Y32", "X0Y44", "42Z93", "23505"));
+ // postgres
+ knownErrors.addAll(Arrays.asList("42P07", "42P16", "42710"));
+ // mssql
+ knownErrors.addAll(Arrays.asList("S0000", "S0001", "23000"));
+ // mysql
+ knownErrors.addAll(Arrays.asList("42S01", "HY000"));
+ // oracle
+ knownErrors.addAll(Arrays.asList("42000"));
+ return knownErrors;
+ }
+ private static Set getTableNotExistsErrorCodes() {
+ Set knownErrors = new HashSet<>();
+ knownErrors.addAll(Arrays.asList("42X05", "42P01", "42S02", "S0002", "42000"));
+ return knownErrors;
+ }
+
+ private static String getSchemaRootPath() {
+ String hiveRoot = System.getProperty("hive.root");
+ if (StringUtils.isNotEmpty(hiveRoot)) {
+ return ensurePathEndsInSlash(hiveRoot) + "standalone-metastore/metastore-server/target/tmp/";
+ } else {
+ return ensurePathEndsInSlash(System.getProperty("test.tmp.dir", "target/tmp"));
+ }
+ }
+
+ private static String ensurePathEndsInSlash(String path) {
+ if (path == null) {
+ throw new NullPointerException("Path cannot be null");
+ }
+ if (path.endsWith(File.separator)) {
+ return path;
+ } else {
+ return path + File.separator;
+ }
+ }
+
public static void cleanDb(Configuration conf) throws Exception {
+ LOG.info("Cleaning transactional tables");
int retryCount = 0;
while(++retryCount <= 3) {
boolean success = true;
@@ -420,24 +228,31 @@ public static void cleanDb(Configuration conf) throws Exception {
stmt = conn.createStatement();
// We want to try these, whether they succeed or fail.
- success &= dropIndex(stmt, "HL_TXNID_INDEX", retryCount);
-
- success &= dropTable(stmt, "TXN_COMPONENTS", retryCount);
- success &= dropTable(stmt, "COMPLETED_TXN_COMPONENTS", retryCount);
- success &= dropTable(stmt, "TXNS", retryCount);
- success &= dropTable(stmt, "NEXT_TXN_ID", retryCount);
- success &= dropTable(stmt, "TXN_TO_WRITE_ID", retryCount);
- success &= dropTable(stmt, "NEXT_WRITE_ID", retryCount);
- success &= dropTable(stmt, "MIN_HISTORY_LEVEL", retryCount);
- success &= dropTable(stmt, "HIVE_LOCKS", retryCount);
- success &= dropTable(stmt, "NEXT_LOCK_ID", retryCount);
- success &= dropTable(stmt, "COMPACTION_QUEUE", retryCount);
- success &= dropTable(stmt, "NEXT_COMPACTION_QUEUE_ID", retryCount);
- success &= dropTable(stmt, "COMPLETED_COMPACTIONS", retryCount);
- success &= dropTable(stmt, "AUX_TABLE", retryCount);
- success &= dropTable(stmt, "WRITE_SET", retryCount);
- success &= dropTable(stmt, "REPL_TXN_MAP", retryCount);
- success &= dropTable(stmt, "MATERIALIZATION_REBUILD_LOCKS", retryCount);
+ success &= truncateTable(conn, stmt, "TXN_COMPONENTS");
+ success &= truncateTable(conn, stmt, "COMPLETED_TXN_COMPONENTS");
+ success &= truncateTable(conn, stmt, "TXNS");
+ success &= truncateTable(conn, stmt, "NEXT_TXN_ID");
+ success &= truncateTable(conn, stmt, "TXN_TO_WRITE_ID");
+ success &= truncateTable(conn, stmt, "NEXT_WRITE_ID");
+ success &= truncateTable(conn, stmt, "HIVE_LOCKS");
+ success &= truncateTable(conn, stmt, "NEXT_LOCK_ID");
+ success &= truncateTable(conn, stmt, "COMPACTION_QUEUE");
+ success &= truncateTable(conn, stmt, "NEXT_COMPACTION_QUEUE_ID");
+ success &= truncateTable(conn, stmt, "COMPLETED_COMPACTIONS");
+ success &= truncateTable(conn, stmt, "AUX_TABLE");
+ success &= truncateTable(conn, stmt, "WRITE_SET");
+ success &= truncateTable(conn, stmt, "REPL_TXN_MAP");
+ success &= truncateTable(conn, stmt, "MATERIALIZATION_REBUILD_LOCKS");
+ try {
+ stmt.executeUpdate("INSERT INTO \"NEXT_TXN_ID\" VALUES(1)");
+ stmt.executeUpdate("INSERT INTO \"NEXT_LOCK_ID\" VALUES(1)");
+ stmt.executeUpdate("INSERT INTO \"NEXT_COMPACTION_QUEUE_ID\" VALUES(1)");
+ } catch (SQLException e) {
+ if (!getTableNotExistsErrorCodes().contains(e.getSQLState())) {
+ LOG.error("Error initializing NEXT_TXN_ID");
+ success = false;
+ }
+ }
/*
* Don't drop NOTIFICATION_LOG, SEQUENCE_TABLE and NOTIFICATION_SEQUENCE as its used by other
* table which are not txn related to generate primary key. So if these tables are dropped
@@ -454,42 +269,28 @@ public static void cleanDb(Configuration conf) throws Exception {
throw new RuntimeException("Failed to clean up txn tables");
}
- private static boolean dropIndex(Statement stmt, String index, int retryCount) {
+ private static boolean truncateTable(Connection conn, Statement stmt, String name) {
try {
- stmt.execute("DROP INDEX " + index);
- } catch (SQLException e) {
- if (!("42X65".equals(e.getSQLState()) && 30000 == e.getErrorCode())) {
- //42X65/3000 means index doesn't exist
- LOG.error("Unable to drop index {} {} State={} code={} retryCount={}",
- index, e.getMessage(), e.getSQLState(), e.getErrorCode(), retryCount);
- return false;
+ // We can not use actual truncate due to some foreign keys, but we don't expect much data during tests
+ String dbProduct = conn.getMetaData().getDatabaseProductName();
+ DatabaseProduct databaseProduct = determineDatabaseProduct(dbProduct);
+ if (databaseProduct == POSTGRES) {
+ stmt.execute("DELETE FROM \"" + name + "\"");
+ } else {
+ stmt.execute("DELETE FROM " + name);
}
- }
- return true;
- }
- private static boolean dropTable(Statement stmt, String name, int retryCount) throws SQLException {
- for (int i = 0; i < 3; i++) {
- try {
- stmt.execute("DROP TABLE " + name);
- LOG.debug("Successfully dropped table " + name);
+ LOG.debug("Successfully truncated table " + name);
+ return true;
+ } catch (SQLException e) {
+ if (getTableNotExistsErrorCodes().contains(e.getSQLState())) {
+ LOG.debug("Not truncating " + name + " because it doesn't exist");
+ //failed because object doesn't exist
return true;
- } catch (SQLException e) {
- if ("42Y55".equals(e.getSQLState()) && 30000 == e.getErrorCode()) {
- LOG.debug("Not dropping " + name + " because it doesn't exist");
- //failed because object doesn't exist
- return true;
- }
- if ("X0Y25".equals(e.getSQLState()) && 30000 == e.getErrorCode()) {
- // Intermittent failure
- LOG.warn("Intermittent drop failure, retrying, try number " + i);
- continue;
- }
- LOG.error("Unable to drop table " + name + ": " + e.getMessage() +
- " State=" + e.getSQLState() + " code=" + e.getErrorCode() + " retryCount=" + retryCount);
}
+ LOG.error("Unable to truncate table " + name + ": " + e.getMessage() + " State=" + e.getSQLState() + " code=" + e
+ .getErrorCode());
}
- LOG.error("Failed to drop table, don't know why");
return false;
}
diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 4a6fa6f620..903429bd1f 100644
--- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -306,7 +306,7 @@ public TxnHandler() {
* This is logically part of c'tor and must be called prior to any other method.
* Not physically part of c'tor due to use of reflection
*/
- public void setConf(Configuration conf) {
+ public void setConf(Configuration conf){
this.conf = conf;
checkQFileTestHack();
@@ -4133,7 +4133,7 @@ public int compare(LockInfo info1, LockInfo info2) {
// we are checking to the desired action.
private static Map>> jumpTable;
- private void checkQFileTestHack() {
+ private void checkQFileTestHack(){
boolean hackOn = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) ||
MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEZ_TEST);
if (hackOn) {
@@ -4143,11 +4143,7 @@ private void checkQFileTestHack() {
try {
TxnDbUtil.prepDb(conf);
} catch (Exception e) {
- // We may have already created the tables and thus don't need to redo it.
- if (e.getMessage() != null && !e.getMessage().contains("already exists")) {
- throw new RuntimeException("Unable to set up transaction database for" +
- " testing: " + e.getMessage(), e);
- }
+ throw new RuntimeException("Unable to set up transaction database for" + " testing: " + e.getMessage(), e);
}
}
}
diff --git standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
index 1ace9d3ef0..366b6f02c1 100644
--- standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
+++ standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
@@ -234,7 +234,9 @@ CREATE TABLE "APP"."CTLGS" (
"CREATE_TIME" INTEGER);
-- Insert a default value. The location is TBD. Hive will fix this when it starts
-INSERT INTO "APP"."CTLGS" VALUES (1, 'hive', 'Default catalog for Hive', 'TBD', NULL);
+INSERT INTO "APP"."CTLGS"
+ ("CTLG_ID", "NAME", "DESC", "LOCATION_URI", "CREATE_TIME")
+ VALUES (1, 'hive', 'Default catalog for Hive', 'TBD', NULL);
-- ----------------------------------------------
-- DML Statements
diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
index 5f3db52c2f..a6d0ac2a1f 100644
--- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
+++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
@@ -41,6 +41,7 @@
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
+import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.Rule;
import org.junit.experimental.categories.Category;
@@ -68,8 +69,8 @@
@Category(MetastoreUnitTest.class)
public class TestHiveMetaStoreTxns {
- private final Configuration conf = MetastoreConf.newMetastoreConf();
- private IMetaStoreClient client;
+ private static Configuration conf = MetastoreConf.newMetastoreConf();
+ private static IMetaStoreClient client;
private Connection conn;
@Rule
@@ -391,13 +392,18 @@ public void testGetValidWriteIds() throws TException {
Assert.assertEquals(writeIdList.getMinOpenWriteId().longValue(), 2);
}
- @Before
- public void setUp() throws Exception {
+ @BeforeClass
+ public static void setUpDB() throws Exception {
conf.setBoolean(ConfVars.HIVE_IN_TEST.getVarname(), true);
MetaStoreTestUtils.setConfForStandloneMode(conf);
TxnDbUtil.setConfValues(conf);
TxnDbUtil.prepDb(conf);
client = new HiveMetaStoreClient(conf);
+ }
+
+ @Before
+ public void setUp() throws Exception {
+
String connectionStr = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY);
conn = DriverManager.getConnection(connectionStr);
diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/rules/Mysql.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/rules/Mysql.java
index c537d95470..afa8f2a781 100644
--- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/rules/Mysql.java
+++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/rules/Mysql.java
@@ -54,7 +54,7 @@ public String getJdbcDriver() {
@Override
public String getJdbcUrl() {
- return "jdbc:mysql://localhost:3306/" + HIVE_DB;
+ return "jdbc:mysql://localhost:3306/" + HIVE_DB + "?sessionVariables=sql_mode=ANSI_QUOTES";
}
@Override