org.apache.hadoop
hadoop-common
${hadoop.version}
diff --git a/common/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java
similarity index 100%
rename from common/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java
rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RunnableConfigurable.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RunnableConfigurable.java
new file mode 100644
index 0000000000..9fa5cabb0b
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RunnableConfigurable.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configurable;
+
+/**
+ * Combination of Runnable and Configurable
+ */
+public interface RunnableConfigurable extends Configurable, Runnable {
+}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/datasource/BoneCPDataSourceProvider.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/BoneCPDataSourceProvider.java
similarity index 93%
rename from metastore/src/java/org/apache/hadoop/hive/metastore/datasource/BoneCPDataSourceProvider.java
rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/BoneCPDataSourceProvider.java
index 34765b0b2f..6a2f7704d3 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/datasource/BoneCPDataSourceProvider.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/BoneCPDataSourceProvider.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -47,9 +47,8 @@ public DataSource create(Configuration hdpConfig) throws SQLException {
String driverUrl = DataSourceProvider.getMetastoreJdbcDriverUrl(hdpConfig);
String user = DataSourceProvider.getMetastoreJdbcUser(hdpConfig);
String passwd = DataSourceProvider.getMetastoreJdbcPasswd(hdpConfig);
- int maxPoolSize = hdpConfig.getInt(
- MetastoreConf.ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS.varname,
- ((Long)MetastoreConf.ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS.defaultVal).intValue());
+ int maxPoolSize = MetastoreConf.getIntVar(hdpConfig,
+ MetastoreConf.ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS);
Properties properties = DataSourceProvider.getPrefixedProperties(hdpConfig, BONECP);
long connectionTimeout = hdpConfig.getLong(CONNECTION_TIMEOUT_PROPERTY, 30000L);
@@ -82,8 +81,8 @@ public boolean mayReturnClosedConnection() {
@Override
public boolean supports(Configuration configuration) {
String poolingType =
- configuration.get(
- MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE.varname).toLowerCase();
+ MetastoreConf.getVar(configuration,
+ MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE).toLowerCase();
if (BONECP.equals(poolingType)) {
int boneCpPropsNr = DataSourceProvider.getPrefixedProperties(configuration, BONECP).size();
LOG.debug("Found " + boneCpPropsNr + " nr. of bonecp specific configurations");
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProvider.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProvider.java
similarity index 94%
rename from metastore/src/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProvider.java
rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProvider.java
index ad1763e121..17ff8d1bd3 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProvider.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProvider.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -61,7 +61,7 @@ static Properties getPrefixedProperties(Configuration hdpConfig, String factoryP
}
static String getMetastoreJdbcUser(Configuration conf) {
- return conf.get(MetastoreConf.ConfVars.CONNECTION_USER_NAME.varname);
+ return MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECTION_USER_NAME);
}
static String getMetastoreJdbcPasswd(Configuration conf) throws SQLException {
@@ -73,7 +73,7 @@ static String getMetastoreJdbcPasswd(Configuration conf) throws SQLException {
}
static String getMetastoreJdbcDriverUrl(Configuration conf) throws SQLException {
- return conf.get(MetastoreConf.ConfVars.CONNECTURLKEY.varname);
+ return MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECTURLKEY);
}
}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProviderFactory.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProviderFactory.java
similarity index 99%
rename from metastore/src/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProviderFactory.java
rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProviderFactory.java
index 1eb792ce45..e3c18e3358 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProviderFactory.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/DataSourceProviderFactory.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/datasource/HikariCPDataSourceProvider.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/HikariCPDataSourceProvider.java
similarity index 93%
rename from metastore/src/java/org/apache/hadoop/hive/metastore/datasource/HikariCPDataSourceProvider.java
rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/HikariCPDataSourceProvider.java
index 9b3d6d5d70..9fc369754c 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/datasource/HikariCPDataSourceProvider.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/HikariCPDataSourceProvider.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -46,9 +46,8 @@ public DataSource create(Configuration hdpConfig) throws SQLException {
String driverUrl = DataSourceProvider.getMetastoreJdbcDriverUrl(hdpConfig);
String user = DataSourceProvider.getMetastoreJdbcUser(hdpConfig);
String passwd = DataSourceProvider.getMetastoreJdbcPasswd(hdpConfig);
- int maxPoolSize = hdpConfig.getInt(
- MetastoreConf.ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS.varname,
- ((Long)MetastoreConf.ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS.defaultVal).intValue());
+ int maxPoolSize = MetastoreConf.getIntVar(hdpConfig,
+ MetastoreConf.ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS);
Properties properties = replacePrefix(
DataSourceProvider.getPrefixedProperties(hdpConfig, HIKARI));
@@ -77,8 +76,8 @@ public boolean mayReturnClosedConnection() {
@Override
public boolean supports(Configuration configuration) {
String poolingType =
- configuration.get(
- MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE.varname).toLowerCase();
+ MetastoreConf.getVar(configuration,
+ MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE).toLowerCase();
if (HIKARI.equals(poolingType)) {
int hikariPropsNr = DataSourceProvider.getPrefixedProperties(configuration, HIKARI).size();
LOG.debug("Found " + hikariPropsNr + " nr. of hikari specific configurations");
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/datasource/package-info.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/package-info.java
similarity index 99%
rename from metastore/src/java/org/apache/hadoop/hive/metastore/datasource/package-info.java
rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/package-info.java
index 86d6a26e06..9a4f22a67a 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/datasource/package-info.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/datasource/package-info.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java
similarity index 92%
rename from metastore/src/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java
rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java
index 0c0bfefc3e..8268af9559 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java
@@ -18,9 +18,11 @@ Licensed to the Apache Software Foundation (ASF) under one
package org.apache.hadoop.hive.metastore.tools;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.DatabaseProduct;
import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -36,9 +38,9 @@ Licensed to the Apache Software Foundation (ASF) under one
public final class SQLGenerator {
static final private Logger LOG = LoggerFactory.getLogger(SQLGenerator.class.getName());
private final DatabaseProduct dbProduct;
- private final HiveConf conf;
+ private final Configuration conf;
- public SQLGenerator(DatabaseProduct dbProduct, HiveConf conf) {
+ public SQLGenerator(DatabaseProduct dbProduct, Configuration conf) {
this.dbProduct = dbProduct;
this.conf = conf;
}
@@ -62,8 +64,7 @@ public SQLGenerator(DatabaseProduct dbProduct, HiveConf conf) {
//http://www.oratable.com/oracle-insert-all/
//https://livesql.oracle.com/apex/livesql/file/content_BM1LJQ87M5CNIOKPOWPV6ZGR3.html
for (int numRows = 0; numRows < rows.size(); numRows++) {
- if (numRows % conf
- .getIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE) == 0) {
+ if (numRows % MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE) == 0) {
if (numRows > 0) {
sb.append(" select * from dual");
insertStmts.add(sb.toString());
@@ -84,8 +85,7 @@ public SQLGenerator(DatabaseProduct dbProduct, HiveConf conf) {
case POSTGRES:
case SQLSERVER:
for (int numRows = 0; numRows < rows.size(); numRows++) {
- if (numRows % conf
- .getIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE) == 0) {
+ if (numRows % MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE) == 0) {
if (numRows > 0) {
insertStmts.add(sb.substring(0, sb.length() - 1));//exclude trailing comma
}
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java
new file mode 100644
index 0000000000..864b01ac55
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.RunnableConfigurable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Background running thread, periodically updating number of open transactions.
+ * Runs inside Hive Metastore Service.
+ */
+public class AcidOpenTxnsCounterService implements RunnableConfigurable {
+ private static final Logger LOG = LoggerFactory.getLogger(AcidOpenTxnsCounterService.class);
+
+ private Configuration conf;
+
+ @Override
+ public void run() {
+ try {
+ TxnStore txnHandler = TxnUtils.getTxnStore(conf);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Running open txn counter");
+ }
+ txnHandler.countOpenTxns();
+ }
+ catch(Throwable t) {
+ LOG.error("Serious error in {}", Thread.currentThread().getName(), ": {}" + t.getMessage(), t);
+ }
+ }
+
+ @Override
+ public void setConf(Configuration configuration) {
+ conf = configuration;
+ }
+
+ @Override
+ public Configuration getConf() {
+ return conf;
+ }
+}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
similarity index 99%
rename from metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
index 413ce3b74d..41e428be53 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
similarity index 96%
rename from metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
index 60839faa35..e676b91a70 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -18,9 +18,10 @@
package org.apache.hadoop.hive.metastore.txn;
import org.apache.hadoop.hive.common.classification.RetrySemantics;
-import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.CompactionType;
import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -60,7 +61,7 @@ public CompactionTxnHandler() {
@RetrySemantics.ReadOnly
public Set findPotentialCompactions(int maxAborted) throws MetaException {
Connection dbConn = null;
- Set response = new HashSet();
+ Set response = new HashSet<>();
Statement stmt = null;
ResultSet rs = null;
try {
@@ -277,7 +278,7 @@ public void markCompacted(CompactionInfo info) throws MetaException {
@RetrySemantics.ReadOnly
public List findReadyToClean() throws MetaException {
Connection dbConn = null;
- List rc = new ArrayList();
+ List rc = new ArrayList<>();
Statement stmt = null;
ResultSet rs = null;
@@ -387,7 +388,7 @@ public void markCleaned(CompactionInfo info) throws MetaException {
while (rs.next()) txnids.add(rs.getLong(1));
// Remove entries from txn_components, as there may be aborted txn components
if (txnids.size() > 0) {
- List queries = new ArrayList();
+ List queries = new ArrayList<>();
// Prepare prefix and suffix
StringBuilder prefix = new StringBuilder();
@@ -466,7 +467,7 @@ public void cleanEmptyAbortedTxns() throws MetaException {
return;
}
Collections.sort(txnids);//easier to read logs
- List queries = new ArrayList();
+ List queries = new ArrayList<>();
StringBuilder prefix = new StringBuilder();
StringBuilder suffix = new StringBuilder();
@@ -626,7 +627,7 @@ public void revokeTimedoutWorkers(long timeout) throws MetaException {
+ (ci.partName == null ? "" : " AND PARTITION_NAME='" + ci.partName + "'");*/
LOG.debug("Going to execute <" + s + ">");
rs = stmt.executeQuery(s);
- List columns = new ArrayList();
+ List columns = new ArrayList<>();
while (rs.next()) {
columns.add(rs.getString(1));
}
@@ -743,9 +744,9 @@ public void purgeCompactionHistory() throws MetaException {
CompactionInfo ci = new CompactionInfo(rs.getLong(1), rs.getString(2), rs.getString(3), rs.getString(4), rs.getString(5).charAt(0));
if(!ci.getFullPartitionName().equals(lastCompactedEntity)) {
lastCompactedEntity = ci.getFullPartitionName();
- rc = new RetentionCounters(conf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED),
+ rc = new RetentionCounters(MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED),
getFailedCompactionRetention(),
- conf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_SUCCEEDED));
+ MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_HISTORY_RETENTION_SUCCEEDED));
}
checkForDeletion(deleteSet, ci, rc);
}
@@ -755,7 +756,7 @@ public void purgeCompactionHistory() throws MetaException {
return;
}
- List queries = new ArrayList();
+ List queries = new ArrayList<>();
StringBuilder prefix = new StringBuilder();
StringBuilder suffix = new StringBuilder();
@@ -788,12 +789,12 @@ public void purgeCompactionHistory() throws MetaException {
* compaction threshold which prevents new compactions from being scheduled.
*/
private int getFailedCompactionRetention() {
- int failedThreshold = conf.getIntVar(HiveConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD);
- int failedRetention = conf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED);
+ int failedThreshold = MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD);
+ int failedRetention = MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED);
if(failedRetention < failedThreshold) {
- LOG.warn("Invalid configuration " + HiveConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD.varname +
- "=" + failedRetention + " < " + HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED + "=" +
- failedRetention + ". Will use " + HiveConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD.varname +
+ LOG.warn("Invalid configuration " + ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD.varname +
+ "=" + failedRetention + " < " + ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED + "=" +
+ failedRetention + ". Will use " + ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD.varname +
"=" + failedRetention);
failedRetention = failedThreshold;
}
@@ -825,7 +826,7 @@ public boolean checkFailedCompactions(CompactionInfo ci) throws MetaException {
" and CC_STATE != " + quoteChar(ATTEMPTED_STATE) + " order by CC_ID desc");
int numFailed = 0;
int numTotal = 0;
- int failedThreshold = conf.getIntVar(HiveConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD);
+ int failedThreshold = MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD);
while(rs.next() && ++numTotal <= failedThreshold) {
if(rs.getString(1).charAt(0) == FAILED_STATE) {
numFailed++;
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
similarity index 90%
rename from metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
index 0161894740..74b4fe5f02 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -28,10 +28,11 @@
import java.util.Properties;
import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.conf.HiveConf;
/**
* Utility methods for creating and destroying txn database/schema, plus methods for
@@ -56,9 +57,9 @@ private TxnDbUtil() {
*
* @param conf HiveConf to add these values to
*/
- public static void setConfValues(HiveConf conf) {
- conf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, TXN_MANAGER);
- conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, true);
+ public static void setConfValues(Configuration conf) {
+ MetastoreConf.setVar(conf, ConfVars.HIVE_TXN_MANAGER, TXN_MANAGER);
+ MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, true);
}
public static void prepDb() throws Exception {
@@ -227,16 +228,23 @@ public static void cleanDb() throws Exception {
}
private static boolean dropTable(Statement stmt, String name, int retryCount) throws SQLException {
- try {
- stmt.execute("DROP TABLE " + name);
- return true;
- } catch (SQLException e) {
- if("42Y55".equals(e.getSQLState()) && 30000 == e.getErrorCode()) {
- //failed because object doesn't exist
+ for (int i = 0; i < 3; i++) {
+ try {
+ stmt.execute("DROP TABLE " + name);
return true;
+ } catch (SQLException e) {
+ if ("42Y55".equals(e.getSQLState()) && 30000 == e.getErrorCode()) {
+ //failed because object doesn't exist
+ return true;
+ }
+ if ("X0Y25".equals(e.getSQLState()) && 30000 == e.getErrorCode()) {
+ // Intermittent failure
+ LOG.warn("Intermittent drop failure, retrying, try number " + i);
+ continue;
+ }
+ LOG.error("Unable to drop table " + name + ": " + e.getMessage() +
+ " State=" + e.getSQLState() + " code=" + e.getErrorCode() + " retryCount=" + retryCount);
}
- LOG.error("Unable to drop table " + name + ": " + e.getMessage() +
- " State=" + e.getSQLState() + " code=" + e.getErrorCode() + " retryCount=" + retryCount);
}
return false;
}
@@ -322,12 +330,12 @@ public static String queryToString(String query, boolean includeHeader) throws E
}
static Connection getConnection() throws Exception {
- HiveConf conf = new HiveConf();
- String jdbcDriver = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER);
+ Configuration conf = MetastoreConf.newMetastoreConf();
+ String jdbcDriver = MetastoreConf.getVar(conf, ConfVars.CONNECTION_DRIVER);
Driver driver = (Driver) Class.forName(jdbcDriver).newInstance();
Properties prop = new Properties();
- String driverUrl = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORECONNECTURLKEY);
- String user = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME);
+ String driverUrl = MetastoreConf.getVar(conf, ConfVars.CONNECTURLKEY);
+ String user = MetastoreConf.getVar(conf, ConfVars.CONNECTION_USER_NAME);
String passwd = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD);
prop.setProperty("user", user);
prop.setProperty("password", passwd);
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
similarity index 96%
rename from metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index f77900d5d7..eeb59d543f 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -23,28 +23,29 @@
import org.apache.commons.dbcp.DriverManagerConnectionFactory;
import org.apache.commons.dbcp.PoolableConnectionFactory;
import org.apache.commons.lang.NotImplementedException;
-import org.apache.hadoop.hive.common.ServerUtils;
-import org.apache.hadoop.hive.common.classification.InterfaceAudience;
-import org.apache.hadoop.hive.common.classification.InterfaceStability;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.classification.RetrySemantics;
import org.apache.hadoop.hive.metastore.DatabaseProduct;
-import org.apache.hadoop.hive.metastore.HouseKeeperService;
+import org.apache.hadoop.hive.metastore.RunnableConfigurable;
+import org.apache.hadoop.hive.metastore.ThreadPool;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.datasource.BoneCPDataSourceProvider;
import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider;
import org.apache.hadoop.hive.metastore.datasource.HikariCPDataSourceProvider;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.apache.hadoop.hive.metastore.metrics.Metrics;
import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.apache.hadoop.hive.metastore.utils.StringableMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.dbcp.PoolingDataSource;
import org.apache.commons.pool.impl.GenericObjectPool;
-import org.apache.hadoop.hive.common.JavaUtils;
-import org.apache.hadoop.hive.common.StringableMap;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConfUtil;
import org.apache.hadoop.hive.metastore.api.*;
import org.apache.hadoop.util.StringUtils;
@@ -58,6 +59,7 @@
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantLock;
import java.util.regex.Pattern;
@@ -66,8 +68,8 @@
* server.
*
* Note on log messages: Please include txnid:X and lockid info using
- * {@link org.apache.hadoop.hive.common.JavaUtils#txnIdToString(long)}
- * and {@link org.apache.hadoop.hive.common.JavaUtils#lockIdToString(long)} in all messages.
+ * {@link JavaUtils#txnIdToString(long)}
+ * and {@link JavaUtils#lockIdToString(long)} in all messages.
* The txnid:X and lockid:Y matches how Thrift object toString() methods are generated,
* so keeping the format consistent makes grep'ing the logs much easier.
*
@@ -194,15 +196,13 @@ public static OpertaionType fromDataOperationType(DataOperationType dop) {
private static volatile int maxOpenTxns = 0;
// Whether number of open transactions reaches the threshold
private static volatile boolean tooManyOpenTxns = false;
- // The AcidHouseKeeperService for counting open transactions
- private static volatile HouseKeeperService openTxnsCounter = null;
/**
* Number of consecutive deadlocks we have seen
*/
private int deadlockCnt;
private long deadlockRetryInterval;
- protected HiveConf conf;
+ protected Configuration conf;
private static DatabaseProduct dbProduct;
private static SQLGenerator sqlGenerator;
@@ -225,7 +225,9 @@ public static OpertaionType fromDataOperationType(DataOperationType dop) {
* (e.g. via Compactor services)
*/
private final static ConcurrentHashMap derbyKey2Lock = new ConcurrentHashMap<>();
- private static final String hostname = ServerUtils.hostname();
+ private static final String hostname = JavaUtils.hostname();
+
+ private static final AtomicBoolean startedOpenTxnCounter = new AtomicBoolean();
// Private methods should never catch SQLException and then throw MetaException. The public
// methods depend on SQLException coming back so they can detect and handle deadlocks. Private
@@ -244,20 +246,17 @@ public TxnHandler() {
* This is logically part of c'tor and must be called prior to any other method.
* Not physically part of c'tor due to use of relfection
*/
- public void setConf(HiveConf conf) {
+ public void setConf(Configuration conf) {
this.conf = conf;
checkQFileTestHack();
synchronized (TxnHandler.class) {
if (connPool == null) {
- //only do this once per JVM; useful for support
- LOG.info(HiveConfUtil.dumpConfig(conf).toString());
-
Connection dbConn = null;
// Set up the JDBC connection pool
try {
- int maxPoolSize = conf.getIntVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_MAX_CONNECTIONS);
+ int maxPoolSize = MetastoreConf.getIntVar(conf, ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS);
long getConnectionTimeoutMs = 30000;
connPool = setupJdbcConnectionPool(conf, maxPoolSize, getConnectionTimeoutMs);
/*the mutex pools should ideally be somewhat larger since some operations require 1
@@ -283,14 +282,20 @@ public void setConf(HiveConf conf) {
numOpenTxns = Metrics.getOrCreateGauge(MetricsConstants.NUM_OPEN_TXNS);
- timeout = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS);
+ timeout = MetastoreConf.getTimeVar(conf, ConfVars.TXN_TIMEOUT, TimeUnit.MILLISECONDS);
buildJumpTable();
- retryInterval = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HMSHANDLERINTERVAL,
+ retryInterval = MetastoreConf.getTimeVar(conf, ConfVars.HMSHANDLERINTERVAL,
TimeUnit.MILLISECONDS);
- retryLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HMSHANDLERATTEMPTS);
+ retryLimit = MetastoreConf.getIntVar(conf, ConfVars.HMSHANDLERATTEMPTS);
deadlockRetryInterval = retryInterval / 10;
- maxOpenTxns = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_MAX_OPEN_TXNS);
+ maxOpenTxns = MetastoreConf.getIntVar(conf, ConfVars.MAX_OPEN_TXNS);
}
+
+ @Override
+ public Configuration getConf() {
+ return conf;
+ }
+
@Override
@RetrySemantics.ReadOnly
public GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException {
@@ -325,7 +330,7 @@ public GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException {
"initialized, null record found in next_txn_id");
}
close(rs);
- List txnInfos = new ArrayList();
+ List txnInfos = new ArrayList<>();
//need the WHERE clause below to ensure consistent results with READ_COMMITTED
s = "select txn_id, txn_state, txn_user, txn_host, txn_started, txn_last_heartbeat from " +
"TXNS where txn_id <= " + hwm;
@@ -398,7 +403,7 @@ public GetOpenTxnsResponse getOpenTxns() throws MetaException {
"initialized, null record found in next_txn_id");
}
close(rs);
- List openList = new ArrayList();
+ List openList = new ArrayList<>();
//need the WHERE clause below to ensure consistent results with READ_COMMITTED
s = "select txn_id, txn_state from TXNS where txn_id <= " + hwm + " order by txn_id";
LOG.debug("Going to execute query<" + s + ">");
@@ -437,17 +442,6 @@ public GetOpenTxnsResponse getOpenTxns() throws MetaException {
}
}
- private static void startHouseKeeperService(HiveConf conf, Class c){
- try {
- openTxnsCounter = (HouseKeeperService)c.newInstance();
- openTxnsCounter.start(conf);
- } catch (Exception ex) {
- LOG.error("Failed to start {}" , openTxnsCounter.getClass() +
- ". The system will not handle {} " , openTxnsCounter.getServiceDescription(),
- ". Root Cause: ", ex);
- }
- }
-
/**
* Retry-by-caller note:
* Worst case, it will leave an open txn which will timeout.
@@ -455,16 +449,17 @@ private static void startHouseKeeperService(HiveConf conf, Class c){
@Override
@RetrySemantics.Idempotent
public OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException {
- if (openTxnsCounter == null) {
- synchronized (TxnHandler.class) {
- try {
- if (openTxnsCounter == null) {
- startHouseKeeperService(conf, Class.forName("org.apache.hadoop.hive.ql.txn.AcidOpenTxnsCounterService"));
- }
- } catch (ClassNotFoundException e) {
- throw new MetaException(e.getMessage());
- }
- }
+ // We have to do this here rather than in setConf, because
+ // AcidOpenTxnsCounterService's constructor eventually ends up calling back into setConf.
+ if (!startedOpenTxnCounter.getAndSet(true)) {
+ // Initialize the thread pool, because in the non-server based case it won't have been
+ // done yet. If it has already been done, there will be no harm.
+ ThreadPool.initialize(conf);
+ RunnableConfigurable thread = new AcidOpenTxnsCounterService();
+ thread.setConf(conf);
+ ThreadPool.getPool().scheduleAtFixedRate(thread, 100, MetastoreConf.getTimeVar(conf,
+ ConfVars.COUNT_OPEN_TXNS_INTERVAL, TimeUnit.MILLISECONDS),
+ TimeUnit.MILLISECONDS);
}
if (!tooManyOpenTxns && numOpenTxns.get() >= maxOpenTxns) {
@@ -507,8 +502,7 @@ public OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException {
*/
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
// Make sure the user has not requested an insane amount of txns.
- int maxTxns = HiveConf.getIntVar(conf,
- HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH);
+ int maxTxns = MetastoreConf.getIntVar(conf, ConfVars.TXN_MAX_OPEN_BATCH);
if (numTxns > maxTxns) numTxns = maxTxns;
stmt = dbConn.createStatement();
@@ -525,7 +519,7 @@ public OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException {
stmt.executeUpdate(s);
long now = getDbTime(dbConn);
- List txnIds = new ArrayList(numTxns);
+ List txnIds = new ArrayList<>(numTxns);
List rows = new ArrayList<>();
for (long i = first; i < first + numTxns; i++) {
@@ -639,7 +633,7 @@ public void abortTxns(AbortTxnsRequest rqst) throws NoSuchTxnException, MetaExce
* that they read appropriately. In particular, if txns do not overlap, then one follows the other
* (assumig they write the same entity), and thus the 2nd must see changes of the 1st. We ensure
* this by locking in snapshot after
- * {@link #openTxns(OpenTxnRequest)} call is made (see {@link org.apache.hadoop.hive.ql.Driver#acquireLocksAndOpenTxn()})
+ * {@link #openTxns(OpenTxnRequest)} call is made (see org.apache.hadoop.hive.ql.Driver.acquireLocksAndOpenTxn)
* and mutexing openTxn() with commit(). In other words, once a S.commit() starts we must ensure
* that txn T which will be considered a later txn, locks in a snapshot that includes the result
* of S's commit (assuming no other txns).
@@ -1038,7 +1032,7 @@ private ConnectionLockIdPair enqueueLockWithRetry(LockRequest rqst) throws NoSuc
long intLockId = 0;
for (LockComponent lc : rqst.getComponent()) {
if(lc.isSetOperationType() && lc.getOperationType() == DataOperationType.UNSET &&
- (conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) || conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEZ_TEST))) {
+ (MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) || MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEZ_TEST))) {
//old version of thrift client should have (lc.isSetOperationType() == false) but they do not
//If you add a default value to a variable, isSet() for that variable is true regardless of the where the
//message was created (for object variables. It works correctly for boolean vars, e.g. LockComponent.isAcid).
@@ -1288,8 +1282,8 @@ public ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException {
try {
Connection dbConn = null;
ShowLocksResponse rsp = new ShowLocksResponse();
- List elems = new ArrayList();
- List sortedList = new ArrayList();
+ List elems = new ArrayList<>();
+ List sortedList = new ArrayList<>();
Statement stmt = null;
try {
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
@@ -1423,8 +1417,8 @@ public HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst
Connection dbConn = null;
Statement stmt = null;
HeartbeatTxnRangeResponse rsp = new HeartbeatTxnRangeResponse();
- Set nosuch = new HashSet();
- Set aborted = new HashSet();
+ Set nosuch = new HashSet<>();
+ Set aborted = new HashSet<>();
rsp.setNosuch(nosuch);
rsp.setAborted(aborted);
try {
@@ -1627,7 +1621,7 @@ private static String compactorStateToResponse(char s) {
}
@RetrySemantics.ReadOnly
public ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaException {
- ShowCompactResponse response = new ShowCompactResponse(new ArrayList());
+ ShowCompactResponse response = new ShowCompactResponse(new ArrayList<>());
Connection dbConn = null;
Statement stmt = null;
try {
@@ -1775,7 +1769,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table,
String tblName;
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
stmt = dbConn.createStatement();
- List queries = new ArrayList();
+ List queries = new ArrayList<>();
StringBuilder buff = new StringBuilder();
switch (type) {
@@ -2340,8 +2334,8 @@ public int compare(LockType t1, LockType t2) {
private static Map>> jumpTable;
private void checkQFileTestHack() {
- boolean hackOn = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEST) ||
- HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEZ_TEST);
+ boolean hackOn = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) ||
+ MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEZ_TEST);
if (hackOn) {
LOG.info("Hacking in canned values for transaction manager");
// Set up the transaction/locking db in the derby metastore
@@ -2389,7 +2383,7 @@ private int abortTxns(Connection dbConn, List txnids, long max_heartbeat,
stmt = dbConn.createStatement();
//This is an update statement, thus at any Isolation level will take Write locks so will block
//all other ops using S4U on TXNS row.
- List queries = new ArrayList();
+ List queries = new ArrayList<>();
StringBuilder prefix = new StringBuilder();
StringBuilder suffix = new StringBuilder();
@@ -2488,7 +2482,7 @@ private LockResponse checkLock(Connection dbConn, long extLockId)
"hl_lock_int_id, hl_db, hl_table, hl_partition, hl_lock_state, " +
"hl_lock_type, hl_txnid from HIVE_LOCKS where hl_db in (");
- Set strings = new HashSet(locksBeingChecked.size());
+ Set strings = new HashSet<>(locksBeingChecked.size());
//This the set of entities that the statement represented by extLockId wants to update
List writeSet = new ArrayList<>();
@@ -2992,7 +2986,7 @@ private LockInfo getTxnIdFromLockId(Connection dbConn, long extLockId)
LOG.debug("Going to execute query <" + s + ">");
ResultSet rs = stmt.executeQuery(s);
boolean sawAtLeastOne = false;
- List ourLockInfo = new ArrayList();
+ List ourLockInfo = new ArrayList<>();
while (rs.next()) {
ourLockInfo.add(new LockInfo(rs));
sawAtLeastOne = true;
@@ -3033,7 +3027,7 @@ private void timeOutLocks(Connection dbConn, long now) {
return;
}
- List queries = new ArrayList();
+ List queries = new ArrayList<>();
StringBuilder prefix = new StringBuilder();
StringBuilder suffix = new StringBuilder();
@@ -3187,12 +3181,11 @@ public void countOpenTxns() throws MetaException {
}
}
- private static synchronized DataSource setupJdbcConnectionPool(HiveConf conf, int maxPoolSize, long getConnectionTimeoutMs) throws SQLException {
+ private static synchronized DataSource setupJdbcConnectionPool(Configuration conf, int maxPoolSize, long getConnectionTimeoutMs) throws SQLException {
String driverUrl = DataSourceProvider.getMetastoreJdbcDriverUrl(conf);
String user = DataSourceProvider.getMetastoreJdbcUser(conf);
String passwd = DataSourceProvider.getMetastoreJdbcPasswd(conf);
- String connectionPooler = conf.getVar(
- HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE).toLowerCase();
+ String connectionPooler = MetastoreConf.getVar(conf, ConfVars.CONNECTION_POOLING_TYPE).toLowerCase();
if ("bonecp".equals(connectionPooler)) {
doRetryOnConnPool = true; // Enable retries to work around BONECP bug.
@@ -3221,16 +3214,14 @@ private static synchronized DataSource setupJdbcConnectionPool(HiveConf conf, in
private static synchronized void buildJumpTable() {
if (jumpTable != null) return;
- jumpTable =
- new HashMap>>(3);
+ jumpTable = new HashMap<>(3);
// SR: Lock we are trying to acquire is shared read
- Map> m =
- new HashMap>(3);
+ Map> m = new HashMap<>(3);
jumpTable.put(LockType.SHARED_READ, m);
// SR.SR: Lock we are examining is shared read
- Map m2 = new HashMap(2);
+ Map m2 = new HashMap<>(2);
m.put(LockType.SHARED_READ, m2);
// SR.SR.acquired Lock we are examining is acquired; We can acquire
@@ -3244,7 +3235,7 @@ private static synchronized void buildJumpTable() {
m2.put(LockState.WAITING, LockAction.KEEP_LOOKING);
// SR.SW: Lock we are examining is shared write
- m2 = new HashMap(2);
+ m2 = new HashMap<>(2);
m.put(LockType.SHARED_WRITE, m2);
// SR.SW.acquired Lock we are examining is acquired; We can acquire
@@ -3259,7 +3250,7 @@ private static synchronized void buildJumpTable() {
m2.put(LockState.WAITING, LockAction.KEEP_LOOKING);
// SR.E: Lock we are examining is exclusive
- m2 = new HashMap(2);
+ m2 = new HashMap<>(2);
m.put(LockType.EXCLUSIVE, m2);
// No matter whether it has acquired or not, we cannot pass an exclusive.
@@ -3267,11 +3258,11 @@ private static synchronized void buildJumpTable() {
m2.put(LockState.WAITING, LockAction.WAIT);
// SW: Lock we are trying to acquire is shared write
- m = new HashMap>(3);
+ m = new HashMap<>(3);
jumpTable.put(LockType.SHARED_WRITE, m);
// SW.SR: Lock we are examining is shared read
- m2 = new HashMap(2);
+ m2 = new HashMap<>(2);
m.put(LockType.SHARED_READ, m2);
// SW.SR.acquired Lock we are examining is acquired; We need to keep
@@ -3285,7 +3276,7 @@ private static synchronized void buildJumpTable() {
m2.put(LockState.WAITING, LockAction.KEEP_LOOKING);
// SW.SW: Lock we are examining is shared write
- m2 = new HashMap(2);
+ m2 = new HashMap<>(2);
m.put(LockType.SHARED_WRITE, m2);
// Regardless of acquired or waiting, one shared write cannot pass another.
@@ -3293,7 +3284,7 @@ private static synchronized void buildJumpTable() {
m2.put(LockState.WAITING, LockAction.WAIT);
// SW.E: Lock we are examining is exclusive
- m2 = new HashMap(2);
+ m2 = new HashMap<>(2);
m.put(LockType.EXCLUSIVE, m2);
// No matter whether it has acquired or not, we cannot pass an exclusive.
@@ -3301,11 +3292,11 @@ private static synchronized void buildJumpTable() {
m2.put(LockState.WAITING, LockAction.WAIT);
// E: Lock we are trying to acquire is exclusive
- m = new HashMap>(3);
+ m = new HashMap<>(3);
jumpTable.put(LockType.EXCLUSIVE, m);
// E.SR: Lock we are examining is shared read
- m2 = new HashMap(2);
+ m2 = new HashMap<>(2);
m.put(LockType.SHARED_READ, m2);
// Exclusives can never pass
@@ -3313,7 +3304,7 @@ private static synchronized void buildJumpTable() {
m2.put(LockState.WAITING, LockAction.WAIT);
// E.SW: Lock we are examining is shared write
- m2 = new HashMap(2);
+ m2 = new HashMap<>(2);
m.put(LockType.SHARED_WRITE, m2);
// Exclusives can never pass
@@ -3321,7 +3312,7 @@ private static synchronized void buildJumpTable() {
m2.put(LockState.WAITING, LockAction.WAIT);
// E.E: Lock we are examining is exclusive
- m2 = new HashMap(2);
+ m2 = new HashMap<>(2);
m.put(LockType.EXCLUSIVE, m2);
// No matter whether it has acquired or not, we cannot pass an exclusive.
@@ -3331,7 +3322,7 @@ private static synchronized void buildJumpTable() {
/**
* Returns true if {@code ex} should be retried
*/
- static boolean isRetryable(HiveConf conf, Exception ex) {
+ static boolean isRetryable(Configuration conf, Exception ex) {
if(ex instanceof SQLException) {
SQLException sqlException = (SQLException)ex;
if("08S01".equalsIgnoreCase(sqlException.getSQLState())) {
@@ -3343,7 +3334,7 @@ static boolean isRetryable(HiveConf conf, Exception ex) {
return true;
}
- String regex = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_TXN_RETRYABLE_SQLEX_REGEX);
+ String regex = MetastoreConf.getVar(conf, ConfVars.TXN_RETRYABLE_SQLEX_REGEX);
if (regex != null && !regex.isEmpty()) {
String[] patterns = regex.split(",(?=\\S)");
String message = getMessage((SQLException)ex);
@@ -3568,13 +3559,13 @@ public void releaseLocks() {
// Note that this depends on the fact that no-one in this class calls anything but
// getConnection. If you want to use any of the Logger or wrap calls you'll have to
// implement them.
- private final HiveConf conf;
+ private final Configuration conf;
private Driver driver;
private String connString;
private String user;
private String passwd;
- public NoPoolConnectionPool(HiveConf conf) {
+ public NoPoolConnectionPool(Configuration conf) {
this.conf = conf;
}
@@ -3591,10 +3582,10 @@ public Connection getConnection() throws SQLException {
public Connection getConnection(String username, String password) throws SQLException {
// Find the JDBC driver
if (driver == null) {
- String driverName = conf.getVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER);
+ String driverName = MetastoreConf.getVar(conf, ConfVars.CONNECTION_DRIVER);
if (driverName == null || driverName.equals("")) {
String msg = "JDBC driver for transaction db not set in configuration " +
- "file, need to set " + HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER.varname;
+ "file, need to set " + ConfVars.CONNECTION_DRIVER.varname;
LOG.error(msg);
throw new RuntimeException(msg);
}
@@ -3612,7 +3603,7 @@ public Connection getConnection(String username, String password) throws SQLExce
throw new RuntimeException("Unable to find driver " + driverName + ", " + e.getMessage(),
e);
}
- connString = conf.getVar(HiveConf.ConfVars.METASTORECONNECTURLKEY);
+ connString = MetastoreConf.getVar(conf, ConfVars.CONNECTURLKEY);
}
try {
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
similarity index 78%
rename from metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
index 3eb3827d06..96a7f56715 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -18,10 +18,10 @@
package org.apache.hadoop.hive.metastore.txn;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hive.common.classification.InterfaceAudience;
-import org.apache.hadoop.hive.common.classification.InterfaceStability;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.hive.common.classification.RetrySemantics;
-import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.*;
import java.sql.SQLException;
@@ -32,21 +32,10 @@
/**
* A handler to answer transaction related calls that come into the metastore
* server.
- *
- * Note on log messages: Please include txnid:X and lockid info using
- * {@link org.apache.hadoop.hive.common.JavaUtils#txnIdToString(long)}
- * and {@link org.apache.hadoop.hive.common.JavaUtils#lockIdToString(long)} in all messages.
- * The txnid:X and lockid:Y matches how Thrift object toString() methods are generated,
- * so keeping the format consistent makes grep'ing the logs much easier.
- *
- * Note on HIVE_LOCKS.hl_last_heartbeat.
- * For locks that are part of transaction, we set this 0 (would rather set it to NULL but
- * Currently the DB schema has this NOT NULL) and only update/read heartbeat from corresponding
- * transaction in TXNS.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
-public interface TxnStore {
+public interface TxnStore extends Configurable {
enum MUTEX_KEY {Initiator, Cleaner, HouseKeeper, CompactionHistory, CheckLock,
WriteSetCleaner, CompactionScheduler}
@@ -58,9 +47,7 @@
String SUCCEEDED_RESPONSE = "succeeded";
String ATTEMPTED_RESPONSE = "attempted";
- public static final int TIMED_OUT_TXN_ABORT_BATCH_SIZE = 50000;
-
- public void setConf(HiveConf conf);
+ int TIMED_OUT_TXN_ABORT_BATCH_SIZE = 50000;
/**
* Get information about open transactions. This gives extensive information about the
@@ -70,7 +57,7 @@
* @throws MetaException
*/
@RetrySemantics.ReadOnly
- public GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException;
+ GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException;
/**
* Get list of valid transactions. This gives just the list of transactions that are open.
@@ -78,14 +65,14 @@
* @throws MetaException
*/
@RetrySemantics.ReadOnly
- public GetOpenTxnsResponse getOpenTxns() throws MetaException;
+ GetOpenTxnsResponse getOpenTxns() throws MetaException;
/**
* Get the count for open transactions.
* @throws MetaException
*/
@RetrySemantics.ReadOnly
- public void countOpenTxns() throws MetaException;
+ void countOpenTxns() throws MetaException;
/**
* Open a set of transactions
@@ -94,7 +81,7 @@
* @throws MetaException
*/
@RetrySemantics.Idempotent
- public OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException;
+ OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException;
/**
* Abort (rollback) a transaction.
@@ -103,7 +90,7 @@
* @throws MetaException
*/
@RetrySemantics.Idempotent
- public void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException, TxnAbortedException;
+ void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException, TxnAbortedException;
/**
* Abort (rollback) a list of transactions in one request.
@@ -112,7 +99,7 @@
* @throws MetaException
*/
@RetrySemantics.Idempotent
- public void abortTxns(AbortTxnsRequest rqst) throws NoSuchTxnException, MetaException;
+ void abortTxns(AbortTxnsRequest rqst) throws NoSuchTxnException, MetaException;
/**
* Commit a transaction
@@ -122,7 +109,7 @@
* @throws MetaException
*/
@RetrySemantics.Idempotent
- public void commitTxn(CommitTxnRequest rqst)
+ void commitTxn(CommitTxnRequest rqst)
throws NoSuchTxnException, TxnAbortedException, MetaException;
/**
@@ -135,7 +122,7 @@ public void commitTxn(CommitTxnRequest rqst)
* @throws MetaException
*/
@RetrySemantics.CannotRetry
- public LockResponse lock(LockRequest rqst)
+ LockResponse lock(LockRequest rqst)
throws NoSuchTxnException, TxnAbortedException, MetaException;
/**
@@ -149,7 +136,7 @@ public LockResponse lock(LockRequest rqst)
* @throws MetaException
*/
@RetrySemantics.SafeToRetry
- public LockResponse checkLock(CheckLockRequest rqst)
+ LockResponse checkLock(CheckLockRequest rqst)
throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException;
/**
@@ -162,7 +149,7 @@ public LockResponse checkLock(CheckLockRequest rqst)
* @throws MetaException
*/
@RetrySemantics.Idempotent
- public void unlock(UnlockRequest rqst)
+ void unlock(UnlockRequest rqst)
throws NoSuchLockException, TxnOpenException, MetaException;
/**
@@ -172,7 +159,7 @@ public void unlock(UnlockRequest rqst)
* @throws MetaException
*/
@RetrySemantics.ReadOnly
- public ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException;
+ ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException;
/**
* Send a heartbeat for a lock or a transaction
@@ -183,7 +170,7 @@ public void unlock(UnlockRequest rqst)
* @throws MetaException
*/
@RetrySemantics.SafeToRetry
- public void heartbeat(HeartbeatRequest ids)
+ void heartbeat(HeartbeatRequest ids)
throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException;
/**
@@ -193,7 +180,7 @@ public void heartbeat(HeartbeatRequest ids)
* @throws MetaException
*/
@RetrySemantics.SafeToRetry
- public HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst)
+ HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst)
throws MetaException;
/**
@@ -204,7 +191,7 @@ public HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst
* @throws MetaException
*/
@RetrySemantics.Idempotent
- public CompactionResponse compact(CompactionRequest rqst) throws MetaException;
+ CompactionResponse compact(CompactionRequest rqst) throws MetaException;
/**
* Show list of current compactions
@@ -213,7 +200,7 @@ public HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst
* @throws MetaException
*/
@RetrySemantics.ReadOnly
- public ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaException;
+ ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaException;
/**
* Add information on a set of dynamic partitions that participated in a transaction.
@@ -223,7 +210,7 @@ public HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst
* @throws MetaException
*/
@RetrySemantics.SafeToRetry
- public void addDynamicPartitions(AddDynamicPartitions rqst)
+ void addDynamicPartitions(AddDynamicPartitions rqst)
throws NoSuchTxnException, TxnAbortedException, MetaException;
/**
@@ -235,14 +222,14 @@ public void addDynamicPartitions(AddDynamicPartitions rqst)
* @throws MetaException
*/
@RetrySemantics.Idempotent
- public void cleanupRecords(HiveObjectType type, Database db, Table table,
+ void cleanupRecords(HiveObjectType type, Database db, Table table,
Iterator partitionIterator) throws MetaException;
/**
* Timeout transactions and/or locks. This should only be called by the compactor.
*/
@RetrySemantics.Idempotent
- public void performTimeOuts();
+ void performTimeOuts();
/**
* This will look through the completed_txn_components table and look for partitions or tables
@@ -254,7 +241,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table,
* or runAs set since these are only potential compactions not actual ones.
*/
@RetrySemantics.ReadOnly
- public Set findPotentialCompactions(int maxAborted) throws MetaException;
+ Set findPotentialCompactions(int maxAborted) throws MetaException;
/**
* Sets the user to run as. This is for the case
@@ -263,7 +250,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table,
* @param user user to run the jobs as
*/
@RetrySemantics.Idempotent
- public void setRunAs(long cq_id, String user) throws MetaException;
+ void setRunAs(long cq_id, String user) throws MetaException;
/**
* This will grab the next compaction request off of
@@ -272,7 +259,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table,
* @return an info element for this compaction request, or null if there is no work to do now.
*/
@RetrySemantics.ReadOnly
- public CompactionInfo findNextToCompact(String workerId) throws MetaException;
+ CompactionInfo findNextToCompact(String workerId) throws MetaException;
/**
* This will mark an entry in the queue as compacted
@@ -280,7 +267,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table,
* @param info info on the compaction entry to mark as compacted.
*/
@RetrySemantics.SafeToRetry
- public void markCompacted(CompactionInfo info) throws MetaException;
+ void markCompacted(CompactionInfo info) throws MetaException;
/**
* Find entries in the queue that are ready to
@@ -288,7 +275,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table,
* @return information on the entry in the queue.
*/
@RetrySemantics.ReadOnly
- public List findReadyToClean() throws MetaException;
+ List findReadyToClean() throws MetaException;
/**
* This will remove an entry from the queue after
@@ -297,7 +284,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table,
* @param info info on the compaction entry to remove
*/
@RetrySemantics.CannotRetry
- public void markCleaned(CompactionInfo info) throws MetaException;
+ void markCleaned(CompactionInfo info) throws MetaException;
/**
* Mark a compaction entry as failed. This will move it to the compaction history queue with a
@@ -307,7 +294,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table,
* @throws MetaException
*/
@RetrySemantics.CannotRetry
- public void markFailed(CompactionInfo info) throws MetaException;
+ void markFailed(CompactionInfo info) throws MetaException;
/**
* Clean up aborted transactions from txns that have no components in txn_components. The reson such
@@ -315,7 +302,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table,
* abandoned it w/o doing any work) or due to {@link #markCleaned(CompactionInfo)} being called.
*/
@RetrySemantics.SafeToRetry
- public void cleanEmptyAbortedTxns() throws MetaException;
+ void cleanEmptyAbortedTxns() throws MetaException;
/**
* This will take all entries assigned to workers
@@ -327,7 +314,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table,
* so that like hostname% will match the worker id.
*/
@RetrySemantics.Idempotent
- public void revokeFromLocalWorkers(String hostname) throws MetaException;
+ void revokeFromLocalWorkers(String hostname) throws MetaException;
/**
* This call will return all compaction queue
@@ -339,7 +326,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table,
* declared dead.
*/
@RetrySemantics.Idempotent
- public void revokeTimedoutWorkers(long timeout) throws MetaException;
+ void revokeTimedoutWorkers(long timeout) throws MetaException;
/**
* Queries metastore DB directly to find columns in the table which have statistics information.
@@ -348,13 +335,13 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table,
* @throws MetaException
*/
@RetrySemantics.ReadOnly
- public List findColumnsWithStats(CompactionInfo ci) throws MetaException;
+ List findColumnsWithStats(CompactionInfo ci) throws MetaException;
/**
* Record the highest txn id that the {@code ci} compaction job will pay attention to.
*/
@RetrySemantics.Idempotent
- public void setCompactionHighestTxnId(CompactionInfo ci, long highestTxnId) throws MetaException;
+ void setCompactionHighestTxnId(CompactionInfo ci, long highestTxnId) throws MetaException;
/**
* For any given compactable entity (partition, table if not partitioned) the history of compactions
@@ -365,14 +352,14 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table,
* @throws MetaException
*/
@RetrySemantics.SafeToRetry
- public void purgeCompactionHistory() throws MetaException;
+ void purgeCompactionHistory() throws MetaException;
/**
* WriteSet tracking is used to ensure proper transaction isolation. This method deletes the
* transaction metadata once it becomes unnecessary.
*/
@RetrySemantics.SafeToRetry
- public void performWriteSetGC();
+ void performWriteSetGC();
/**
* Determine if there are enough consecutive failures compacting a table or partition that no
@@ -383,16 +370,16 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table,
* @throws MetaException
*/
@RetrySemantics.ReadOnly
- public boolean checkFailedCompactions(CompactionInfo ci) throws MetaException;
+ boolean checkFailedCompactions(CompactionInfo ci) throws MetaException;
@VisibleForTesting
- public int numLocksInLockTable() throws SQLException, MetaException;
+ int numLocksInLockTable() throws SQLException, MetaException;
@VisibleForTesting
long setTimeout(long milliseconds);
@RetrySemantics.Idempotent
- public MutexAPI getMutexAPI();
+ MutexAPI getMutexAPI();
/**
* This is primarily designed to provide coarse grained mutex support to operations running
@@ -401,12 +388,12 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table,
*
* In RDMBS world each {@code LockHandle} uses a java.sql.Connection so use it sparingly.
*/
- public static interface MutexAPI {
+ interface MutexAPI {
/**
* The {@code key} is name of the lock. Will acquire and exclusive lock or block. It retuns
* a handle which must be used to release the lock. Each invocation returns a new handle.
*/
- public LockHandle acquireLock(String key) throws MetaException;
+ LockHandle acquireLock(String key) throws MetaException;
/**
* Same as {@link #acquireLock(String)} but takes an already existing handle as input. This
@@ -414,12 +401,12 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table,
* the same handle will be released together.
* @param handle not NULL
*/
- public void acquireLock(String key, LockHandle handle) throws MetaException;
- public static interface LockHandle {
+ void acquireLock(String key, LockHandle handle) throws MetaException;
+ interface LockHandle {
/**
* Releases all locks associated with this handle.
*/
- public void releaseLocks();
+ void releaseLocks();
}
}
@@ -429,5 +416,5 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table,
* @param id {@link CompactionInfo#id}
*/
@RetrySemantics.Idempotent
- public void setHadoopJobId(String hadoopJobId, long id);
+ void setHadoopJobId(String hadoopJobId, long id);
}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
similarity index 91%
rename from metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
index e6c62d3830..2d998082c1 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -17,17 +17,19 @@
*/
package org.apache.hadoop.hive.metastore.txn;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.ValidCompactorTxnList;
import org.apache.hadoop.hive.common.ValidReadTxnList;
import org.apache.hadoop.hive.common.ValidTxnList;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.TxnInfo;
import org.apache.hadoop.hive.metastore.api.TxnState;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -108,11 +110,10 @@ public static ValidTxnList createValidCompactTxnList(GetOpenTxnsInfoResponse txn
* @param conf configuration
* @return txn store
*/
- public static TxnStore getTxnStore(HiveConf conf) {
- String className = conf.getVar(HiveConf.ConfVars.METASTORE_TXN_STORE_IMPL);
+ public static TxnStore getTxnStore(Configuration conf) {
+ String className = MetastoreConf.getVar(conf, ConfVars.TXN_STORE_IMPL);
try {
- TxnStore handler = ((Class extends TxnHandler>) MetaStoreUtils.getClass(
- className)).newInstance();
+ TxnStore handler = JavaUtils.getClass(className, TxnStore.class).newInstance();
handler.setConf(conf);
return handler;
} catch (Exception e) {
@@ -149,13 +150,13 @@ public static boolean isAcidTable(Table table) {
* e.g. ( id in (1,2,3) OR id in (4,5,6) )
* @param notIn clause to be broken up is NOT IN
*/
- public static void buildQueryWithINClause(HiveConf conf, List queries, StringBuilder prefix,
+ public static void buildQueryWithINClause(Configuration conf, List queries, StringBuilder prefix,
StringBuilder suffix, List inList,
String inColumn, boolean addParens, boolean notIn) {
if (inList == null || inList.size() == 0) {
throw new IllegalArgumentException("The IN list is empty!");
}
- int batchSize = conf.getIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE);
+ int batchSize = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE);
int numWholeBatches = inList.size() / batchSize;
StringBuilder buf = new StringBuilder();
buf.append(prefix);
@@ -227,8 +228,8 @@ public static void buildQueryWithINClause(HiveConf conf, List queries, S
}
/** Estimate if the size of a string will exceed certain limit */
- private static boolean needNewQuery(HiveConf conf, StringBuilder sb) {
- int queryMemoryLimit = conf.getIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH);
+ private static boolean needNewQuery(Configuration conf, StringBuilder sb) {
+ int queryMemoryLimit = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH);
// http://www.javamex.com/tutorials/memory/string_memory_usage.shtml
long sizeInBytes = 8 * (((sb.length() * 2) + 45) / 8);
return sizeInBytes / 1024 > queryMemoryLimit;
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java
index 81f8a8518d..40f739301e 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java
@@ -17,7 +17,16 @@
*/
package org.apache.hadoop.hive.metastore.utils;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+
public class JavaUtils {
+ public static final Logger LOG = LoggerFactory.getLogger(JavaUtils.class);
+
/**
* Standard way of getting classloader in Hive code (outside of Hadoop).
*
@@ -34,4 +43,41 @@ public static ClassLoader getClassLoader() {
}
return classLoader;
}
+
+ @SuppressWarnings(value = "unchecked")
+ public static Class extends T> getClass(String className, Class clazz)
+ throws MetaException {
+ try {
+ return (Class extends T>) Class.forName(className, true, getClassLoader());
+ } catch (ClassNotFoundException e) {
+ throw new MetaException(className + " class not found");
+ }
+ }
+
+ /**
+ * @return name of current host
+ */
+ public static String hostname() {
+ try {
+ return InetAddress.getLocalHost().getHostName();
+ } catch (UnknownHostException e) {
+ LOG.error("Unable to resolve my host name " + e.getMessage());
+ throw new RuntimeException(e);
+ }
+ }
+
+ /**
+ * Utility method for ACID to normalize logging info. Matches
+ * org.apache.hadoop.hive.metastore.api.LockRequest#toString
+ */
+ public static String lockIdToString(long extLockId) {
+ return "lockid:" + extLockId;
+ }
+ /**
+ * Utility method for ACID to normalize logging info. Matches
+ * org.apache.hadoop.hive.metastore.api.LockResponse#toString
+ */
+ public static String txnIdToString(long txnId) {
+ return "txnid:" + txnId;
+ }
}
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/StringableMap.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/StringableMap.java
new file mode 100644
index 0000000000..b3f1749763
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/StringableMap.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.utils;
+
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * A utility class that can convert a HashMap of Properties into a colon separated string,
+ * and can take the same format of string and convert it to a HashMap of Properties.
+ */
+public class StringableMap extends HashMap {
+
+ public StringableMap(String s) {
+ String[] parts = s.split(":", 2);
+ // read that many chars
+ int numElements = Integer.parseInt(parts[0]);
+ s = parts[1];
+ for (int i = 0; i < numElements; i++) {
+ parts = s.split(":", 2);
+ int len = Integer.parseInt(parts[0]);
+ String key = null;
+ if (len > 0) key = parts[1].substring(0, len);
+ parts = parts[1].substring(len).split(":", 2);
+ len = Integer.parseInt(parts[0]);
+ String value = null;
+ if (len > 0) value = parts[1].substring(0, len);
+ s = parts[1].substring(len);
+ put(key, value);
+ }
+ }
+
+ public StringableMap(Map m) {
+ super(m);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder buf = new StringBuilder();
+ buf.append(size());
+ buf.append(':');
+ if (size() > 0) {
+ for (Map.Entry entry : entrySet()) {
+ int length = (entry.getKey() == null) ? 0 : entry.getKey().length();
+ buf.append(entry.getKey() == null ? 0 : length);
+ buf.append(':');
+ if (length > 0) buf.append(entry.getKey());
+ length = (entry.getValue() == null) ? 0 : entry.getValue().length();
+ buf.append(length);
+ buf.append(':');
+ if (length > 0) buf.append(entry.getValue());
+ }
+ }
+ return buf.toString();
+ }
+
+ public Properties toProperties() {
+ Properties props = new Properties();
+ props.putAll(this);
+ return props;
+ }
+}
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/datasource/TestDataSourceProviderFactory.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/datasource/TestDataSourceProviderFactory.java
similarity index 79%
rename from metastore/src/test/org/apache/hadoop/hive/metastore/datasource/TestDataSourceProviderFactory.java
rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/datasource/TestDataSourceProviderFactory.java
index daea544c71..3f21ed15d4 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/datasource/TestDataSourceProviderFactory.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/datasource/TestDataSourceProviderFactory.java
@@ -19,7 +19,9 @@
import com.jolbox.bonecp.BoneCPDataSource;
import com.zaxxer.hikari.HikariDataSource;
-import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@@ -29,13 +31,13 @@
public class TestDataSourceProviderFactory {
- private HiveConf conf;
+ private Configuration conf;
@Before
public void init() {
- conf = new HiveConf();
- conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, "dummyUser");
- conf.setVar(HiveConf.ConfVars.METASTOREPWD, "dummyPass");
+ conf = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setVar(conf, ConfVars.CONNECTION_USER_NAME, "dummyUser");
+ MetastoreConf.setVar(conf, ConfVars.PWD, "dummyPass");
}
@Test
@@ -44,7 +46,7 @@ public void testNoDataSourceCreatedWithoutProps() throws SQLException {
DataSourceProvider dsp = DataSourceProviderFactory.getDataSourceProvider(conf);
Assert.assertNull(dsp);
- conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP);
+ MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP);
dsp = DataSourceProviderFactory.getDataSourceProvider(conf);
Assert.assertNull(dsp);
@@ -53,7 +55,7 @@ public void testNoDataSourceCreatedWithoutProps() throws SQLException {
@Test
public void testCreateBoneCpDataSource() throws SQLException {
- conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP);
+ MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP);
conf.set(BoneCPDataSourceProvider.BONECP + ".firstProp", "value");
conf.set(BoneCPDataSourceProvider.BONECP + ".secondProp", "value");
@@ -67,7 +69,7 @@ public void testCreateBoneCpDataSource() throws SQLException {
@Test
public void testSetBoneCpStringProperty() throws SQLException {
- conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP);
+ MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP);
conf.set(BoneCPDataSourceProvider.BONECP + ".initSQL", "select 1 from dual");
DataSourceProvider dsp = DataSourceProviderFactory.getDataSourceProvider(conf);
@@ -81,7 +83,7 @@ public void testSetBoneCpStringProperty() throws SQLException {
@Test
public void testSetBoneCpNumberProperty() throws SQLException {
- conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP);
+ MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP);
conf.set(BoneCPDataSourceProvider.BONECP + ".acquireRetryDelayInMs", "599");
DataSourceProvider dsp = DataSourceProviderFactory.getDataSourceProvider(conf);
@@ -95,7 +97,7 @@ public void testSetBoneCpNumberProperty() throws SQLException {
@Test
public void testSetBoneCpBooleanProperty() throws SQLException {
- conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP);
+ MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, BoneCPDataSourceProvider.BONECP);
conf.set(BoneCPDataSourceProvider.BONECP + ".disableJMX", "true");
DataSourceProvider dsp = DataSourceProviderFactory.getDataSourceProvider(conf);
@@ -109,7 +111,7 @@ public void testSetBoneCpBooleanProperty() throws SQLException {
@Test
public void testCreateHikariCpDataSource() throws SQLException {
- conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, HikariCPDataSourceProvider.HIKARI);
+ MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, HikariCPDataSourceProvider.HIKARI);
// This is needed to prevent the HikariDataSource from trying to connect to the DB
conf.set(HikariCPDataSourceProvider.HIKARI + ".initializationFailTimeout", "-1");
@@ -123,7 +125,7 @@ public void testCreateHikariCpDataSource() throws SQLException {
@Test
public void testSetHikariCpStringProperty() throws SQLException {
- conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, HikariCPDataSourceProvider.HIKARI);
+ MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, HikariCPDataSourceProvider.HIKARI);
conf.set(HikariCPDataSourceProvider.HIKARI + ".connectionInitSql", "select 1 from dual");
conf.set(HikariCPDataSourceProvider.HIKARI + ".initializationFailTimeout", "-1");
@@ -138,7 +140,7 @@ public void testSetHikariCpStringProperty() throws SQLException {
@Test
public void testSetHikariCpNumberProperty() throws SQLException {
- conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, HikariCPDataSourceProvider.HIKARI);
+ MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, HikariCPDataSourceProvider.HIKARI);
conf.set(HikariCPDataSourceProvider.HIKARI + ".idleTimeout", "59999");
conf.set(HikariCPDataSourceProvider.HIKARI + ".initializationFailTimeout", "-1");
@@ -153,7 +155,7 @@ public void testSetHikariCpNumberProperty() throws SQLException {
@Test
public void testSetHikariCpBooleanProperty() throws SQLException {
- conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, HikariCPDataSourceProvider.HIKARI);
+ MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, HikariCPDataSourceProvider.HIKARI);
conf.set(HikariCPDataSourceProvider.HIKARI + ".allowPoolSuspension", "false");
conf.set(HikariCPDataSourceProvider.HIKARI + ".initializationFailTimeout", "-1");
@@ -164,10 +166,5 @@ public void testSetHikariCpBooleanProperty() throws SQLException {
Assert.assertTrue(ds instanceof HikariDataSource);
Assert.assertEquals(false, ((HikariDataSource)ds).isAllowPoolSuspension());
}
- @Test(expected = IllegalArgumentException.class)
- public void testBoneCPConfigCannotBeSet() {
- conf.addToRestrictList(BoneCPDataSourceProvider.BONECP);
- conf.verifyAndSet(BoneCPDataSourceProvider.BONECP + ".disableJMX", "true");
- }
}
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNegative.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNegative.java
similarity index 87%
rename from metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNegative.java
rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNegative.java
index 639669ebe7..d8640b584b 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNegative.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerNegative.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -17,9 +17,10 @@
*/
package org.apache.hadoop.hive.metastore.txn;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.conf.HiveConf;
import org.junit.Test;
import static junit.framework.Assert.assertNotNull;
@@ -35,8 +36,8 @@
*/
@Test
public void testBadConnection() throws Exception {
- HiveConf conf = new HiveConf();
- conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, "blah");
+ Configuration conf = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTURLKEY, "blah");
RuntimeException e = null;
try {
TxnUtils.getTxnStore(conf);
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnUtils.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/txn/TestTxnUtils.java
similarity index 98%
rename from metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnUtils.java
rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/txn/TestTxnUtils.java
index 1497c00e5d..7ff84067fa 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnUtils.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/txn/TestTxnUtils.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -17,9 +17,11 @@
*/
package org.apache.hadoop.hive.metastore.txn;
-import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.DatabaseProduct;
import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -35,14 +37,14 @@
* Tests for TxnUtils
*/
public class TestTxnUtils {
- private HiveConf conf;
+ private Configuration conf;
public TestTxnUtils() throws Exception {
}
@Test
public void testBuildQueryWithINClause() throws Exception {
- List queries = new ArrayList();
+ List queries = new ArrayList<>();
StringBuilder prefix = new StringBuilder();
StringBuilder suffix = new StringBuilder();
@@ -53,9 +55,9 @@ public void testBuildQueryWithINClause() throws Exception {
// Case 1 - Max in list members: 10; Max query string length: 1KB
// The first query happens to have 2 full batches.
- conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH, 1);
- conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE, 10);
- List inList = new ArrayList();
+ MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH, 1);
+ MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE, 10);
+ List inList = new ArrayList<>();
for (long i = 1; i <= 200; i++) {
inList.add(i);
}
@@ -72,8 +74,8 @@ public void testBuildQueryWithINClause() throws Exception {
runAgainstDerby(queries);
// Case 3.1 - Max in list members: 1000, Max query string length: 1KB, and exact 1000 members in a single IN clause
- conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH, 1);
- conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE, 1000);
+ MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH, 1);
+ MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE, 1000);
queries.clear();
for (long i = 202; i <= 1000; i++) {
inList.add(i);
@@ -83,8 +85,8 @@ public void testBuildQueryWithINClause() throws Exception {
runAgainstDerby(queries);
// Case 3.2 - Max in list members: 1000, Max query string length: 10KB, and exact 1000 members in a single IN clause
- conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH, 10);
- conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE, 1000);
+ MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH, 10);
+ MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE, 1000);
queries.clear();
TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, inList, "TXN_ID", true, false);
Assert.assertEquals(1, queries.size());
@@ -94,12 +96,12 @@ public void testBuildQueryWithINClause() throws Exception {
for (long i = 1001; i <= 2000; i++) {
inList.add(i);
}
- conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH, 1);
+ MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH, 1);
queries.clear();
TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, inList, "TXN_ID", true, false);
Assert.assertEquals(2, queries.size());
runAgainstDerby(queries);
- conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH, 10);
+ MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH, 10);
queries.clear();
TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, inList, "TXN_ID", true, false);
Assert.assertEquals(1, queries.size());
@@ -165,7 +167,7 @@ public void testSQLGenerator() throws Exception {
Assert.assertEquals("Wrong stmt",
"insert all into colors(name, category) values('yellow', 1) into colors(name, category) values('red', 2) into colors(name, category) values('orange', 3) select * from dual", sql.get(0));
- for(int i = 0; i < conf.getIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE); i++) {
+ for(int i = 0; i < MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE); i++) {
rows.add("\'G\'," + i);
}
sql = sqlGenerator.createInsertValuesStmt("colors(name, category)", rows);
@@ -185,7 +187,7 @@ public void testSQLGenerator() throws Exception {
sql = sqlGenerator.createInsertValuesStmt("colors(name, category)", rows);
Assert.assertEquals("Number of stmts", 1, sql.size());
Assert.assertEquals("Wrong stmt", "insert into colors(name, category) values('yellow', 1),('red', 2),('orange', 3)", sql.get(0));
- for(int i = 0; i < conf.getIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE); i++) {
+ for(int i = 0; i < MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE); i++) {
rows.add("\'G\'," + i);
}
sql = sqlGenerator.createInsertValuesStmt("colors(name, category)", rows);
@@ -203,7 +205,7 @@ public void testSQLGenerator() throws Exception {
@Before
public void setUp() throws Exception {
tearDown();
- conf = new HiveConf(this.getClass());
+ conf = MetastoreConf.newMetastoreConf();
TxnDbUtil.setConfValues(conf);
TxnDbUtil.prepDb();
}
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestValidCompactorTxnList.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/txn/TestValidCompactorTxnList.java
similarity index 99%
rename from metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestValidCompactorTxnList.java
rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/txn/TestValidCompactorTxnList.java
index 91d621536e..e7aadfd486 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestValidCompactorTxnList.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/txn/TestValidCompactorTxnList.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
diff --git a/storage-api/pom.xml b/storage-api/pom.xml
index d0bf08813e..b16652f6ba 100644
--- a/storage-api/pom.xml
+++ b/storage-api/pom.xml
@@ -31,6 +31,7 @@
2.6
+ 1.1.3
14.0.1
2.8.0
4.11
@@ -115,6 +116,12 @@
${junit.version}
test
+