diff --git common/src/java/org/apache/hadoop/hive/common/FileUtils.java common/src/java/org/apache/hadoop/hive/common/FileUtils.java
index ff09dd835c..e0d9785363 100644
--- common/src/java/org/apache/hadoop/hive/common/FileUtils.java
+++ common/src/java/org/apache/hadoop/hive/common/FileUtils.java
@@ -87,27 +87,6 @@ public boolean accept(Path p) {
}
};
- public static final PathFilter SNAPSHOT_DIR_PATH_FILTER = new PathFilter() {
- @Override
- public boolean accept(Path p) {
- return ".snapshot".equalsIgnoreCase(p.getName());
- }
- };
-
- /**
- * Check if the path contains a subdirectory named '.snapshot'
- * @param p path to check
- * @param fs filesystem of the path
- * @return true if p contains a subdirectory named '.snapshot'
- * @throws IOException
- */
- public static boolean pathHasSnapshotSubDir(Path p, FileSystem fs) throws IOException {
- // Hadoop is missing a public API to check for snapshotable directories. Check with the directory name
- // until a more appropriate API is provided by HDFS-12257.
- final FileStatus[] statuses = fs.listStatus(p, FileUtils.SNAPSHOT_DIR_PATH_FILTER);
- return statuses != null && statuses.length != 0;
- }
-
/**
* Variant of Path.makeQualified that qualifies the input path against the default file system
* indicated by the configuration
diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionExpressionProxyDefault.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionExpressionProxyDefault.java
new file mode 100644
index 0000000000..e654c02d8e
--- /dev/null
+++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionExpressionProxyDefault.java
@@ -0,0 +1,40 @@
+package org.apache.hadoop.hive.metastore;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Make sure that when HiveMetaStore is instantiated, the default proper PartitionExpressionProxy
+ * instance is instantiated.
+ */
+public class TestPartitionExpressionProxyDefault {
+
+ @Test
+ public void checkPartitionExpressionProxy() throws MetaException {
+ Configuration conf = MetastoreConf.newMetastoreConf();
+ HiveMetaStore.HMSHandler hms = new HiveMetaStore.HMSHandler("for testing", conf, true);
+ Assert.assertEquals(PartitionExpressionForMetastore.class,
+ hms.getExpressionProxy().getClass());
+ }
+}
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index a4917898e4..939ae21e5f 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@ -18,15 +18,12 @@
package org.apache.hadoop.hive.metastore;
-import java.io.File;
import java.io.IOException;
import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.net.URL;
-import java.net.URLClassLoader;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.net.Socket;
import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@@ -38,13 +35,8 @@
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.hive.metastore.api.Decimal;
-import org.apache.hadoop.hive.metastore.api.Order;
-import org.apache.hadoop.hive.metastore.api.SkewedInfo;
import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-import org.apache.hadoop.hive.shims.ShimLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
@@ -54,28 +46,18 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hive.common.JavaUtils;
-import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.metastore.columnstats.merge.ColumnStatsMerger;
-import org.apache.hadoop.hive.metastore.columnstats.merge.ColumnStatsMergerFactory;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.Deserializer;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.SerDeUtils;
-import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -83,16 +65,9 @@
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import org.apache.hadoop.security.SaslRpcServer;
-import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
-import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.hadoop.util.MachineList;
-import org.apache.hive.common.util.HiveStringUtils;
import org.apache.hive.common.util.ReflectionUtil;
-import javax.annotation.Nullable;
-
public class MetaStoreUtils {
private static final Logger LOG = LoggerFactory.getLogger("hive.log");
@@ -105,241 +80,20 @@
// HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES in HiveConf as well.
public static final char[] specialCharactersInTableNames = new char[] { '/' };
- public static Table createColumnsetSchema(String name, List columns,
- List partCols, Configuration conf) throws MetaException {
-
- if (columns == null) {
- throw new MetaException("columns not specified for table " + name);
- }
-
- Table tTable = new Table();
- tTable.setTableName(name);
- tTable.setSd(new StorageDescriptor());
- StorageDescriptor sd = tTable.getSd();
- sd.setSerdeInfo(new SerDeInfo());
- SerDeInfo serdeInfo = sd.getSerdeInfo();
- serdeInfo.setSerializationLib(LazySimpleSerDe.class.getName());
- serdeInfo.setParameters(new HashMap());
- serdeInfo.getParameters().put(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT,
- Warehouse.DEFAULT_SERIALIZATION_FORMAT);
-
- List fields = new ArrayList(columns.size());
- sd.setCols(fields);
- for (String col : columns) {
- FieldSchema field = new FieldSchema(col,
- org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME, "'default'");
- fields.add(field);
- }
-
- tTable.setPartitionKeys(new ArrayList());
- for (String partCol : partCols) {
- FieldSchema part = new FieldSchema();
- part.setName(partCol);
- part.setType(org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME); // default
- // partition
- // key
- tTable.getPartitionKeys().add(part);
- }
- sd.setNumBuckets(-1);
- return tTable;
- }
-
- /**
- * recursiveDelete
- *
- * just recursively deletes a dir - you'd think Java would have something to
- * do this??
- *
- * @param f
- * - the file/dir to delete
- * @exception IOException
- * propogate f.delete() exceptions
- *
- */
- static public void recursiveDelete(File f) throws IOException {
- if (f.isDirectory()) {
- File fs[] = f.listFiles();
- for (File subf : fs) {
- recursiveDelete(subf);
- }
- }
- if (!f.delete()) {
- throw new IOException("could not delete: " + f.getPath());
- }
- }
-
- /**
- * @param partParams
- * @return True if the passed Parameters Map contains values for all "Fast Stats".
- */
- private static boolean containsAllFastStats(Map partParams) {
- for (String stat : StatsSetupConst.fastStats) {
- if (!partParams.containsKey(stat)) {
- return false;
- }
- }
- return true;
- }
-
- static boolean updateTableStatsFast(Database db, Table tbl, Warehouse wh,
- boolean madeDir, EnvironmentContext environmentContext) throws MetaException {
- return updateTableStatsFast(db, tbl, wh, madeDir, false, environmentContext);
- }
-
- private static boolean updateTableStatsFast(Database db, Table tbl, Warehouse wh,
- boolean madeDir, boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
- if (tbl.getPartitionKeysSize() == 0) {
- // Update stats only when unpartitioned
- FileStatus[] fileStatuses = wh.getFileStatusesForUnpartitionedTable(db, tbl);
- return updateTableStatsFast(tbl, fileStatuses, madeDir, forceRecompute, environmentContext);
- } else {
- return false;
- }
- }
-
- /**
- * Updates the numFiles and totalSize parameters for the passed Table by querying
- * the warehouse if the passed Table does not already have values for these parameters.
- * @param tbl
- * @param fileStatus
- * @param newDir if true, the directory was just created and can be assumed to be empty
- * @param forceRecompute Recompute stats even if the passed Table already has
- * these parameters set
- * @return true if the stats were updated, false otherwise
- */
- public static boolean updateTableStatsFast(Table tbl, FileStatus[] fileStatus, boolean newDir,
- boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
-
- Map params = tbl.getParameters();
-
- if ((params!=null) && params.containsKey(StatsSetupConst.DO_NOT_UPDATE_STATS)){
- boolean doNotUpdateStats = Boolean.valueOf(params.get(StatsSetupConst.DO_NOT_UPDATE_STATS));
- params.remove(StatsSetupConst.DO_NOT_UPDATE_STATS);
- tbl.setParameters(params); // to make sure we remove this marker property
- if (doNotUpdateStats){
- return false;
- }
- }
-
- boolean updated = false;
- if (forceRecompute ||
- params == null ||
- !containsAllFastStats(params)) {
- if (params == null) {
- params = new HashMap();
- }
- if (!newDir) {
- // The table location already exists and may contain data.
- // Let's try to populate those stats that don't require full scan.
- LOG.info("Updating table stats fast for " + tbl.getTableName());
- populateQuickStats(fileStatus, params);
- LOG.info("Updated size of table " + tbl.getTableName() +" to "+ params.get(StatsSetupConst.TOTAL_SIZE));
- if (environmentContext != null
- && environmentContext.isSetProperties()
- && StatsSetupConst.TASK.equals(environmentContext.getProperties().get(
- StatsSetupConst.STATS_GENERATED))) {
- StatsSetupConst.setBasicStatsState(params, StatsSetupConst.TRUE);
- } else {
- StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE);
- }
- }
- tbl.setParameters(params);
- updated = true;
- }
- return updated;
- }
-
public static void populateQuickStats(FileStatus[] fileStatus, Map params) {
- int numFiles = 0;
- long tableSize = 0L;
- String s = "LOG14535 Populating quick stats for: ";
- for (FileStatus status : fileStatus) {
- s += status.getPath() + ", ";
- // don't take directories into account for quick stats
- if (!status.isDir()) {
- tableSize += status.getLen();
- numFiles += 1;
- }
- }
- LOG.info(s/*, new Exception()*/);
- params.put(StatsSetupConst.NUM_FILES, Integer.toString(numFiles));
- params.put(StatsSetupConst.TOTAL_SIZE, Long.toString(tableSize));
+ org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.populateQuickStats(fileStatus, params);
}
- static boolean updatePartitionStatsFast(Partition part, Warehouse wh, EnvironmentContext environmentContext)
- throws MetaException {
- return updatePartitionStatsFast(part, wh, false, false, environmentContext);
+ public static boolean updateTableStatsFast(Table tbl, FileStatus[] fileStatus, boolean newDir,
+ boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
+ return org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.updateTableStatsFast(
+ tbl, fileStatus, newDir, forceRecompute, environmentContext);
}
- static boolean updatePartitionStatsFast(Partition part, Warehouse wh, boolean madeDir, EnvironmentContext environmentContext)
+ public static boolean updatePartitionStatsFast(Partition part, Warehouse wh, EnvironmentContext environmentContext)
throws MetaException {
- return updatePartitionStatsFast(part, wh, madeDir, false, environmentContext);
- }
-
- /**
- * Updates the numFiles and totalSize parameters for the passed Partition by querying
- * the warehouse if the passed Partition does not already have values for these parameters.
- * @param part
- * @param wh
- * @param madeDir if true, the directory was just created and can be assumed to be empty
- * @param forceRecompute Recompute stats even if the passed Partition already has
- * these parameters set
- * @return true if the stats were updated, false otherwise
- */
- private static boolean updatePartitionStatsFast(Partition part, Warehouse wh,
- boolean madeDir, boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
- return updatePartitionStatsFast(new PartitionSpecProxy.SimplePartitionWrapperIterator(part),
- wh, madeDir, forceRecompute, environmentContext);
- }
-
- /**
- * Updates the numFiles and totalSize parameters for the passed Partition by querying
- * the warehouse if the passed Partition does not already have values for these parameters.
- * @param part
- * @param wh
- * @param madeDir if true, the directory was just created and can be assumed to be empty
- * @param forceRecompute Recompute stats even if the passed Partition already has
- * these parameters set
- * @return true if the stats were updated, false otherwise
- */
- static boolean updatePartitionStatsFast(PartitionSpecProxy.PartitionIterator part, Warehouse wh,
- boolean madeDir, boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
- Map params = part.getParameters();
- boolean updated = false;
- if (forceRecompute ||
- params == null ||
- !containsAllFastStats(params)) {
- if (params == null) {
- params = new HashMap();
- }
- if (!madeDir) {
- // The partition location already existed and may contain data. Lets try to
- // populate those statistics that don't require a full scan of the data.
- LOG.warn("Updating partition stats fast for: " + part.getTableName());
- FileStatus[] fileStatus = wh.getFileStatusesForLocation(part.getLocation());
- populateQuickStats(fileStatus, params);
- LOG.warn("Updated size to " + params.get(StatsSetupConst.TOTAL_SIZE));
- updateBasicState(environmentContext, params);
- }
- part.setParameters(params);
- updated = true;
- }
- return updated;
- }
-
- private static void updateBasicState(EnvironmentContext environmentContext, Map
- params) {
- if (params == null) {
- return;
- }
- if (environmentContext != null
- && environmentContext.isSetProperties()
- && StatsSetupConst.TASK.equals(environmentContext.getProperties().get(
- StatsSetupConst.STATS_GENERATED))) {
- StatsSetupConst.setBasicStatsState(params, StatsSetupConst.TRUE);
- } else {
- StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE);
- }
+ return org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.updatePartitionStatsFast(
+ part, wh, environmentContext);
}
/**
@@ -436,53 +190,6 @@ static public Deserializer getDeserializer(Configuration conf,
}
}
- static public void deleteWHDirectory(Path path, Configuration conf,
- boolean use_trash) throws MetaException {
-
- try {
- if (!path.getFileSystem(conf).exists(path)) {
- LOG.warn("drop data called on table/partition with no directory: "
- + path);
- return;
- }
-
- if (use_trash) {
-
- int count = 0;
- Path newPath = new Path("/Trash/Current"
- + path.getParent().toUri().getPath());
-
- if (path.getFileSystem(conf).exists(newPath) == false) {
- path.getFileSystem(conf).mkdirs(newPath);
- }
-
- do {
- newPath = new Path("/Trash/Current" + path.toUri().getPath() + "."
- + count);
- if (path.getFileSystem(conf).exists(newPath)) {
- count++;
- continue;
- }
- if (path.getFileSystem(conf).rename(path, newPath)) {
- break;
- }
- } while (++count < 50);
- if (count >= 50) {
- throw new MetaException("Rename failed due to maxing out retries");
- }
- } else {
- // directly delete it
- path.getFileSystem(conf).delete(path, true);
- }
- } catch (IOException e) {
- LOG.error("Got exception trying to delete data dir: " + e);
- throw new MetaException(e.getMessage());
- } catch (MetaException e) {
- LOG.error("Got exception trying to delete data dir: " + e);
- throw e;
- }
- }
-
/**
* Given a list of partition columns and a partial mapping from
* some partition columns to values the function returns the values
@@ -537,118 +244,12 @@ public static boolean validateColumnName(String name) {
return true;
}
- static public String validateTblColumns(List cols) {
- for (FieldSchema fieldSchema : cols) {
- if (!validateColumnName(fieldSchema.getName())) {
- return "name: " + fieldSchema.getName();
- }
- String typeError = validateColumnType(fieldSchema.getType());
- if (typeError != null) {
- return typeError;
- }
- }
- return null;
- }
-
- /**
- * @return true if oldType and newType are compatible.
- * Two types are compatible if we have internal functions to cast one to another.
- */
- static private boolean areColTypesCompatible(String oldType, String newType) {
-
- /*
- * RCFile default serde (ColumnarSerde) serializes the values in such a way that the
- * datatypes can be converted from string to any type. The map is also serialized as
- * a string, which can be read as a string as well. However, with any binary
- * serialization, this is not true.
- *
- * Primitive types like INT, STRING, BIGINT, etc are compatible with each other and are
- * not blocked.
- */
-
- return TypeInfoUtils.implicitConvertible(TypeInfoUtils.getTypeInfoFromTypeString(oldType),
- TypeInfoUtils.getTypeInfoFromTypeString(newType));
- }
-
public static final String TYPE_FROM_DESERIALIZER = "";
- /**
- * validate column type
- *
- * if it is predefined, yes. otherwise no
- * @param type
- * @return
- */
- static public String validateColumnType(String type) {
- if (type.equals(TYPE_FROM_DESERIALIZER)) {
- return null;
- }
- int last = 0;
- boolean lastAlphaDigit = isValidTypeChar(type.charAt(last));
- for (int i = 1; i <= type.length(); i++) {
- if (i == type.length()
- || isValidTypeChar(type.charAt(i)) != lastAlphaDigit) {
- String token = type.substring(last, i);
- last = i;
- if (!hiveThriftTypeMap.contains(token)) {
- return "type: " + type;
- }
- break;
- }
- }
- return null;
- }
-
- private static boolean isValidTypeChar(char c) {
- return Character.isLetterOrDigit(c) || c == '_';
- }
-
- public static String validateSkewedColNames(List cols) {
- if (CollectionUtils.isEmpty(cols)) {
- return null;
- }
- for (String col : cols) {
- if (!validateColumnName(col)) {
- return col;
- }
- }
- return null;
- }
-
- public static String validateSkewedColNamesSubsetCol(List skewedColNames,
- List cols) {
- if (CollectionUtils.isEmpty(skewedColNames)) {
- return null;
- }
- List colNames = new ArrayList(cols.size());
- for (FieldSchema fieldSchema : cols) {
- colNames.add(fieldSchema.getName());
- }
- // make a copy
- List copySkewedColNames = new ArrayList(skewedColNames);
- // remove valid columns
- copySkewedColNames.removeAll(colNames);
- if (copySkewedColNames.isEmpty()) {
- return null;
- }
- return copySkewedColNames.toString();
- }
public static String getListType(String t) {
return "array<" + t + ">";
}
- public static String getMapType(String k, String v) {
- return "map<" + k + "," + v + ">";
- }
-
- public static void setSerdeParam(SerDeInfo sdi, Properties schema,
- String param) {
- String val = schema.getProperty(param);
- if (org.apache.commons.lang.StringUtils.isNotBlank(val)) {
- sdi.getParameters().put(param, val);
- }
- }
-
static HashMap typeToThriftTypeMap;
static {
typeToThriftTypeMap = new HashMap();
@@ -726,42 +327,6 @@ public static String typeToThriftType(String type) {
}
/**
- * Convert FieldSchemas to Thrift DDL + column names and column types
- *
- * @param structName
- * The name of the table
- * @param fieldSchemas
- * List of fields along with their schemas
- * @return String containing "Thrift
- * DDL#comma-separated-column-names#colon-separated-columntypes
- * Example:
- * "struct result { a string, map<int,string> b}#a,b#string:map<int,string>"
- */
- public static String getFullDDLFromFieldSchema(String structName,
- List fieldSchemas) {
- StringBuilder ddl = new StringBuilder();
- ddl.append(getDDLFromFieldSchema(structName, fieldSchemas));
- ddl.append('#');
- StringBuilder colnames = new StringBuilder();
- StringBuilder coltypes = new StringBuilder();
- boolean first = true;
- for (FieldSchema col : fieldSchemas) {
- if (first) {
- first = false;
- } else {
- colnames.append(',');
- coltypes.append(':');
- }
- colnames.append(col.getName());
- coltypes.append(col.getType());
- }
- ddl.append(colnames);
- ddl.append('#');
- ddl.append(coltypes);
- return ddl.toString();
- }
-
- /**
* Convert FieldSchemas to Thrift DDL.
*/
public static String getDDLFromFieldSchema(String structName,
@@ -1107,15 +672,131 @@ public static String getColumnCommentsFromFieldSchema(List fieldSch
return sb.toString();
}
- public static void makeDir(Path path, HiveConf hiveConf) throws MetaException {
- FileSystem fs;
+ public static int startMetaStore() throws Exception {
+ return startMetaStore(HadoopThriftAuthBridge.getBridge(), null);
+ }
+
+ public static int startMetaStore(final HadoopThriftAuthBridge bridge, HiveConf conf) throws Exception {
+ int port = findFreePort();
+ startMetaStore(port, bridge, conf);
+ return port;
+ }
+
+ public static int startMetaStore(HiveConf conf) throws Exception {
+ return startMetaStore(HadoopThriftAuthBridge.getBridge(), conf);
+ }
+
+ public static void startMetaStore(final int port, final HadoopThriftAuthBridge bridge) throws Exception {
+ startMetaStore(port, bridge, null);
+ }
+
+ public static void startMetaStore(final int port,
+ final HadoopThriftAuthBridge bridge, HiveConf hiveConf)
+ throws Exception{
+ if (hiveConf == null) {
+ hiveConf = new HiveConf(HMSHandler.class);
+ }
+ final HiveConf finalHiveConf = hiveConf;
+ Thread thread = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ HiveMetaStore.startMetaStore(port, bridge, finalHiveConf);
+ } catch (Throwable e) {
+ LOG.error("Metastore Thrift Server threw an exception...",e);
+ }
+ }
+ });
+ thread.setDaemon(true);
+ thread.start();
+ loopUntilHMSReady(port);
+ }
+
+ /**
+ * A simple connect test to make sure that the metastore is up
+ * @throws Exception
+ */
+ private static void loopUntilHMSReady(int port) throws Exception {
+ int retries = 0;
+ Exception exc = null;
+ while (true) {
+ try {
+ Socket socket = new Socket();
+ socket.connect(new InetSocketAddress(port), 5000);
+ socket.close();
+ return;
+ } catch (Exception e) {
+ if (retries++ > 60) { //give up
+ exc = e;
+ break;
+ }
+ Thread.sleep(1000);
+ }
+ }
+ // something is preventing metastore from starting
+ // print the stack from all threads for debugging purposes
+ LOG.error("Unable to connect to metastore server: " + exc.getMessage());
+ LOG.info("Printing all thread stack traces for debugging before throwing exception.");
+ LOG.info(getAllThreadStacksAsString());
+ throw exc;
+ }
+
+ private static String getAllThreadStacksAsString() {
+ Map threadStacks = Thread.getAllStackTraces();
+ StringBuilder sb = new StringBuilder();
+ for (Map.Entry entry : threadStacks.entrySet()) {
+ Thread t = entry.getKey();
+ sb.append(System.lineSeparator());
+ sb.append("Name: ").append(t.getName()).append(" State: ").append(t.getState());
+ addStackString(entry.getValue(), sb);
+ }
+ return sb.toString();
+ }
+
+ private static void addStackString(StackTraceElement[] stackElems, StringBuilder sb) {
+ sb.append(System.lineSeparator());
+ for (StackTraceElement stackElem : stackElems) {
+ sb.append(stackElem).append(System.lineSeparator());
+ }
+ }
+
+ /**
+ * Finds a free port on the machine.
+ *
+ * @return
+ * @throws IOException
+ */
+ public static int findFreePort() throws IOException {
+ ServerSocket socket= new ServerSocket(0);
+ int port = socket.getLocalPort();
+ socket.close();
+ return port;
+ }
+
+ /**
+ * Finds a free port on the machine, but allow the
+ * ability to specify a port number to not use, no matter what.
+ */
+ public static int findFreePortExcepting(int portToExclude) throws IOException {
+ ServerSocket socket1 = null;
+ ServerSocket socket2 = null;
try {
- fs = path.getFileSystem(hiveConf);
- if (!fs.exists(path)) {
- fs.mkdirs(path);
+ socket1 = new ServerSocket(0);
+ socket2 = new ServerSocket(0);
+ if (socket1.getLocalPort() != portToExclude) {
+ return socket1.getLocalPort();
+ }
+ // If we're here, then socket1.getLocalPort was the port to exclude
+ // Since both sockets were open together at a point in time, we're
+ // guaranteed that socket2.getLocalPort() is not the same.
+ return socket2.getLocalPort();
+ } finally {
+ if (socket1 != null){
+ socket1.close();
+ }
+ if (socket2 != null){
+ socket2.close();
}
- } catch (IOException e) {
- throw new MetaException("Unable to : " + path);
}
}
@@ -1225,52 +906,12 @@ public static boolean isExternalTable(Table table) {
return "TRUE".equalsIgnoreCase(params.get("EXTERNAL"));
}
- /**
- * Determines whether a table is an immutable table.
- * Immutable tables are write-once/replace, and do not support append. Partitioned
- * immutable tables do support additions by way of creation of new partitions, but
- * do not allow the partitions themselves to be appended to. "INSERT INTO" will not
- * work for Immutable tables.
- *
- * @param table table of interest
- *
- * @return true if immutable
- */
- public static boolean isImmutableTable(Table table) {
- if (table == null){
- return false;
- }
- Map params = table.getParameters();
- if (params == null) {
- return false;
- }
-
- return "TRUE".equalsIgnoreCase(params.get(hive_metastoreConstants.IS_IMMUTABLE));
- }
-
public static boolean isArchived(
org.apache.hadoop.hive.metastore.api.Partition part) {
Map params = part.getParameters();
return "TRUE".equalsIgnoreCase(params.get(hive_metastoreConstants.IS_ARCHIVED));
}
- public static Path getOriginalLocation(
- org.apache.hadoop.hive.metastore.api.Partition part) {
- Map params = part.getParameters();
- assert(isArchived(part));
- String originalLocation = params.get(hive_metastoreConstants.ORIGINAL_LOCATION);
- assert( originalLocation != null);
-
- return new Path(originalLocation);
- }
-
- public static boolean isNonNativeTable(Table table) {
- if (table == null || table.getParameters() == null) {
- return false;
- }
- return (table.getParameters().get(hive_metastoreConstants.META_TABLE_STORAGE) != null);
- }
-
/**
* Filter that filters out hidden files
*/
@@ -1301,29 +942,6 @@ public static boolean isDirEmpty(FileSystem fs, Path path) throws IOException {
return true;
}
- /**
- * Returns true if partial has the same values as full for all values that
- * aren't empty in partial.
- */
-
- public static boolean pvalMatches(List partial, List full) {
- if(partial.size() > full.size()) {
- return false;
- }
- Iterator p = partial.iterator();
- Iterator f = full.iterator();
-
- while(p.hasNext()) {
- String pval = p.next();
- String fval = f.next();
-
- if (pval.length() != 0 && !pval.equals(fval)) {
- return false;
- }
- }
- return true;
- }
-
public static String getIndexTableName(String dbName, String baseTblName, String indexName) {
return dbName + "__" + baseTblName + "_" + indexName + "__";
}
@@ -1342,26 +960,6 @@ public static boolean isMaterializedViewTable(Table table) {
return TableType.MATERIALIZED_VIEW.toString().equals(table.getTableType());
}
- /**
- * Given a map of partition column names to values, this creates a filter
- * string that can be used to call the *byFilter methods
- * @param m
- * @return the filter string
- */
- public static String makeFilterStringFromMap(Map m) {
- StringBuilder filter = new StringBuilder();
- for (Entry e : m.entrySet()) {
- String col = e.getKey();
- String val = e.getValue();
- if (filter.length() == 0) {
- filter.append(col + "=\"" + val + "\"");
- } else {
- filter.append(" and " + col + "=\"" + val + "\"");
- }
- }
- return filter.toString();
- }
-
public static boolean isView(Table table) {
if (table == null) {
return false;
@@ -1369,42 +967,6 @@ public static boolean isView(Table table) {
return TableType.VIRTUAL_VIEW.toString().equals(table.getTableType());
}
- /**
- * create listener instances as per the configuration.
- *
- * @param clazz
- * @param conf
- * @param listenerImplList
- * @return
- * @throws MetaException
- */
- static List getMetaStoreListeners(Class clazz,
- HiveConf conf, String listenerImplList) throws MetaException {
- List listeners = new ArrayList();
-
- if (StringUtils.isBlank(listenerImplList)) {
- return listeners;
- }
-
- String[] listenerImpls = listenerImplList.split(",");
- for (String listenerImpl : listenerImpls) {
- try {
- T listener = (T) Class.forName(
- listenerImpl.trim(), true, JavaUtils.getClassLoader()).getConstructor(
- Configuration.class).newInstance(conf);
- listeners.add(listener);
- } catch (InvocationTargetException ie) {
- throw new MetaException("Failed to instantiate listener named: "+
- listenerImpl + ", reason: " + ie.getCause());
- } catch (Exception e) {
- throw new MetaException("Failed to instantiate listener named: "+
- listenerImpl + ", reason: " + e);
- }
- }
-
- return listeners;
- }
-
@SuppressWarnings("unchecked")
public static Class extends RawStore> getClass(String rawStoreClassName)
throws MetaException {
@@ -1448,24 +1010,6 @@ public static boolean isView(Table table) {
}
}
- public static void validatePartitionNameCharacters(List partVals,
- Pattern partitionValidationPattern) throws MetaException {
-
- String invalidPartitionVal =
- HiveStringUtils.getPartitionValWithInvalidCharacter(partVals, partitionValidationPattern);
- if (invalidPartitionVal != null) {
- throw new MetaException("Partition value '" + invalidPartitionVal +
- "' contains a character " + "not matched by whitelist pattern '" +
- partitionValidationPattern.toString() + "'. " + "(configure with " +
- HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN.varname + ")");
- }
- }
-
- public static boolean partitionNameHasValidCharacters(List partVals,
- Pattern partitionValidationPattern) {
- return HiveStringUtils.getPartitionValWithInvalidCharacter(partVals, partitionValidationPattern) == null;
- }
-
/**
* @param schema1: The first schema to be compared
* @param schema2: The second schema to be compared
@@ -1538,97 +1082,6 @@ public static int getArchivingLevel(Partition part) throws MetaException {
return names;
}
- /**
- * Helper function to transform Nulls to empty strings.
- */
- private static final com.google.common.base.Function transFormNullsToEmptyString
- = new com.google.common.base.Function() {
- @Override
- public java.lang.String apply(@Nullable java.lang.String string) {
- return StringUtils.defaultString(string);
- }
- };
-
- /**
- * Create a URL from a string representing a path to a local file.
- * The path string can be just a path, or can start with file:/, file:///
- * @param onestr path string
- * @return
- */
- private static URL urlFromPathString(String onestr) {
- URL oneurl = null;
- try {
- if (onestr.startsWith("file:/")) {
- oneurl = new URL(onestr);
- } else {
- oneurl = new File(onestr).toURL();
- }
- } catch (Exception err) {
- LOG.error("Bad URL " + onestr + ", ignoring path");
- }
- return oneurl;
- }
-
- /**
- * Add new elements to the classpath.
- *
- * @param newPaths
- * Array of classpath elements
- */
- public static ClassLoader addToClassPath(ClassLoader cloader, String[] newPaths) throws Exception {
- URLClassLoader loader = (URLClassLoader) cloader;
- List curPath = Arrays.asList(loader.getURLs());
- ArrayList newPath = new ArrayList(curPath.size());
-
- // get a list with the current classpath components
- for (URL onePath : curPath) {
- newPath.add(onePath);
- }
- curPath = newPath;
-
- for (String onestr : newPaths) {
- URL oneurl = urlFromPathString(onestr);
- if (oneurl != null && !curPath.contains(oneurl)) {
- curPath.add(oneurl);
- }
- }
-
- return new URLClassLoader(curPath.toArray(new URL[0]), loader);
- }
-
- // this function will merge csOld into csNew.
- public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld)
- throws InvalidObjectException {
- List list = new ArrayList<>();
- if (csNew.getStatsObj().size() != csOld.getStatsObjSize()) {
- // Some of the columns' stats are missing
- // This implies partition schema has changed. We will merge columns
- // present in both, overwrite stats for columns absent in metastore and
- // leave alone columns stats missing from stats task. This last case may
- // leave stats in stale state. This will be addressed later.
- LOG.debug("New ColumnStats size is {}, but old ColumnStats size is {}",
- csNew.getStatsObj().size(), csOld.getStatsObjSize());
- }
- // In this case, we have to find out which columns can be merged.
- Map map = new HashMap<>();
- // We build a hash map from colName to object for old ColumnStats.
- for (ColumnStatisticsObj obj : csOld.getStatsObj()) {
- map.put(obj.getColName(), obj);
- }
- for (int index = 0; index < csNew.getStatsObj().size(); index++) {
- ColumnStatisticsObj statsObjNew = csNew.getStatsObj().get(index);
- ColumnStatisticsObj statsObjOld = map.get(statsObjNew.getColName());
- if (statsObjOld != null) {
- // If statsObjOld is found, we can merge.
- ColumnStatsMerger merger = ColumnStatsMergerFactory.getColumnStatsMerger(statsObjNew,
- statsObjOld);
- merger.merge(statsObjNew, statsObjOld);
- }
- list.add(statsObjNew);
- }
- csNew.setStatsObj(list);
- }
-
public static List getColumnNames(List schema) {
List cols = new ArrayList<>(schema.size());
for (FieldSchema fs : schema) {
@@ -1636,32 +1089,4 @@ public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld)
}
return cols;
}
-
- /**
- * Verify if the user is allowed to make DB notification related calls.
- * Only the superusers defined in the Hadoop proxy user settings have the permission.
- *
- * @param user the short user name
- * @param conf that contains the proxy user settings
- * @return if the user has the permission
- */
- public static boolean checkUserHasHostProxyPrivileges(String user, Configuration conf, String ipAddress) {
- DefaultImpersonationProvider sip = ProxyUsers.getDefaultImpersonationProvider();
- // Just need to initialize the ProxyUsers for the first time, given that the conf will not change on the fly
- if (sip == null) {
- ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
- sip = ProxyUsers.getDefaultImpersonationProvider();
- }
- Map> proxyHosts = sip.getProxyHosts();
- Collection hostEntries = proxyHosts.get(sip.getProxySuperuserIpConfKey(user));
- MachineList machineList = new MachineList(hostEntries);
- ipAddress = (ipAddress == null) ? StringUtils.EMPTY : ipAddress;
- return machineList.includes(ipAddress);
- }
-
- /** Duplicates AcidUtils; used in a couple places in metastore. */
- public static boolean isInsertOnlyTableParam(Map params) {
- String transactionalProp = params.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
- return (transactionalProp != null && "insert_only".equalsIgnoreCase(transactionalProp));
- }
}
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java
new file mode 100644
index 0000000000..80fae281cc
--- /dev/null
+++ metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.serde2.Deserializer;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.utils.StringUtils;
+
+import java.util.List;
+
+public class SerDeStorageSchemaReader implements StorageSchemaReader {
+ @Override
+ public List readSchema(Table tbl, EnvironmentContext envContext, Configuration conf)
+ throws MetaException {
+ ClassLoader orgHiveLoader = null;
+ try {
+ if (envContext != null) {
+ String addedJars = envContext.getProperties().get("hive.added.jars.path");
+ if (org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) {
+ //for thread safe
+ orgHiveLoader = conf.getClassLoader();
+ ClassLoader loader = org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.addToClassPath(
+ orgHiveLoader, org.apache.commons.lang.StringUtils.split(addedJars, ","));
+ conf.setClassLoader(loader);
+ }
+ }
+
+ Deserializer s = MetaStoreUtils.getDeserializer(conf, tbl, false);
+ return MetaStoreUtils.getFieldsFromDeserializer(tbl.getTableName(), s);
+ } catch (Exception e) {
+ StringUtils.stringifyException(e);
+ throw new MetaException(e.getMessage());
+ } finally {
+ if (orgHiveLoader != null) {
+ conf.setClassLoader(orgHiveLoader);
+ }
+ }
+ }
+}
diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore2.java metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore2.java
deleted file mode 100644
index fa4e02ac79..0000000000
--- metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore2.java
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.messaging.EventMessage;
-import org.apache.hadoop.hive.metastore.model.MNotificationLog;
-import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
-import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.concurrent.BrokenBarrierException;
-import java.util.concurrent.CyclicBarrier;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hive.metastore.TestOldSchema.dropAllStoreObjects;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-// Tests from TestObjectStore that can't be moved yet due to references to EventMessage. Once
-// EventMessage has been moved this should be recombined with TestObjectStore.
-
-public class TestObjectStore2 {
- private ObjectStore objectStore = null;
-
- public static class MockPartitionExpressionProxy implements PartitionExpressionProxy {
- @Override
- public String convertExprToFilter(byte[] expr) throws MetaException {
- return null;
- }
-
- @Override
- public boolean filterPartitionsByExpr(List partColumns,
- byte[] expr, String defaultPartitionName, List partitionNames)
- throws MetaException {
- return false;
- }
-
- @Override
- public FileMetadataExprType getMetadataType(String inputFormat) {
- return null;
- }
-
- @Override
- public SearchArgument createSarg(byte[] expr) {
- return null;
- }
-
- @Override
- public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) {
- return null;
- }
- }
-
- @Before
- public void setUp() throws Exception {
- Configuration conf = MetastoreConf.newMetastoreConf();
- MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS,
- MockPartitionExpressionProxy.class.getName());
-
- objectStore = new ObjectStore();
- objectStore.setConf(conf);
- dropAllStoreObjects(objectStore);
- }
-
- /**
- * Test notification operations
- */
- // TODO MS-SPLIT uncomment once we move EventMessage over
- @Test
- public void testNotificationOps() throws InterruptedException {
- final int NO_EVENT_ID = 0;
- final int FIRST_EVENT_ID = 1;
- final int SECOND_EVENT_ID = 2;
-
- NotificationEvent event =
- new NotificationEvent(0, 0, EventMessage.EventType.CREATE_DATABASE.toString(), "");
- NotificationEventResponse eventResponse;
- CurrentNotificationEventId eventId;
-
- // Verify that there is no notifications available yet
- eventId = objectStore.getCurrentNotificationEventId();
- assertEquals(NO_EVENT_ID, eventId.getEventId());
-
- // Verify that addNotificationEvent() updates the NotificationEvent with the new event ID
- objectStore.addNotificationEvent(event);
- assertEquals(FIRST_EVENT_ID, event.getEventId());
- objectStore.addNotificationEvent(event);
- assertEquals(SECOND_EVENT_ID, event.getEventId());
-
- // Verify that objectStore fetches the latest notification event ID
- eventId = objectStore.getCurrentNotificationEventId();
- assertEquals(SECOND_EVENT_ID, eventId.getEventId());
-
- // Verify that getNextNotification() returns all events
- eventResponse = objectStore.getNextNotification(new NotificationEventRequest());
- assertEquals(2, eventResponse.getEventsSize());
- assertEquals(FIRST_EVENT_ID, eventResponse.getEvents().get(0).getEventId());
- assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(1).getEventId());
-
- // Verify that getNextNotification(last) returns events after a specified event
- eventResponse = objectStore.getNextNotification(new NotificationEventRequest(FIRST_EVENT_ID));
- assertEquals(1, eventResponse.getEventsSize());
- assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(0).getEventId());
-
- // Verify that getNextNotification(last) returns zero events if there are no more notifications available
- eventResponse = objectStore.getNextNotification(new NotificationEventRequest(SECOND_EVENT_ID));
- assertEquals(0, eventResponse.getEventsSize());
-
- // Verify that cleanNotificationEvents() cleans up all old notifications
- Thread.sleep(1);
- objectStore.cleanNotificationEvents(1);
- eventResponse = objectStore.getNextNotification(new NotificationEventRequest());
- assertEquals(0, eventResponse.getEventsSize());
- }
-
- @Ignore(
- "This test is here to allow testing with other databases like mysql / postgres etc\n"
- + " with user changes to the code. This cannot be run on apache derby because of\n"
- + " https://db.apache.org/derby/docs/10.10/devguide/cdevconcepts842385.html"
- )
- @Test
- public void testConcurrentAddNotifications() throws ExecutionException, InterruptedException {
-
- final int NUM_THREADS = 10;
- CyclicBarrier cyclicBarrier = new CyclicBarrier(NUM_THREADS,
- () -> LoggerFactory.getLogger("test")
- .debug(NUM_THREADS + " threads going to add notification"));
-
- Configuration conf = MetastoreConf.newMetastoreConf();
- MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS,
- MockPartitionExpressionProxy.class.getName());
- /*
- Below are the properties that need to be set based on what database this test is going to be run
- */
-
-// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver");
-// conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY,
-// "jdbc:mysql://localhost:3306/metastore_db");
-// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, "");
-// conf.setVar(HiveConf.ConfVars.METASTOREPWD, "");
-
- /*
- we have to add this one manually as for tests the db is initialized via the metastoreDiretSQL
- and we don't run the schema creation sql that includes the an insert for notification_sequence
- which can be locked. the entry in notification_sequence happens via notification_event insertion.
- */
- objectStore.getPersistenceManager().newQuery(MNotificationLog.class, "eventType==''").execute();
- objectStore.getPersistenceManager().newQuery(MNotificationNextId.class, "nextEventId==-1").execute();
-
- objectStore.addNotificationEvent(
- new NotificationEvent(0, 0,
- EventMessage.EventType.CREATE_DATABASE.toString(),
- "CREATE DATABASE DB initial"));
-
- ExecutorService executorService = Executors.newFixedThreadPool(NUM_THREADS);
- for (int i = 0; i < NUM_THREADS; i++) {
- final int n = i;
-
- executorService.execute(
- () -> {
- ObjectStore store = new ObjectStore();
- store.setConf(conf);
-
- String eventType = EventMessage.EventType.CREATE_DATABASE.toString();
- NotificationEvent dbEvent =
- new NotificationEvent(0, 0, eventType,
- "CREATE DATABASE DB" + n);
- System.out.println("ADDING NOTIFICATION");
-
- try {
- cyclicBarrier.await();
- } catch (InterruptedException | BrokenBarrierException e) {
- throw new RuntimeException(e);
- }
- store.addNotificationEvent(dbEvent);
- System.out.println("FINISH NOTIFICATION");
- });
- }
- executorService.shutdown();
- assertTrue(executorService.awaitTermination(15, TimeUnit.SECONDS));
-
- // we have to setup this again as the underlying PMF keeps getting reinitialized with original
- // reference closed
- ObjectStore store = new ObjectStore();
- store.setConf(conf);
-
- NotificationEventResponse eventResponse = store.getNextNotification(
- new NotificationEventRequest());
- assertEquals(NUM_THREADS + 1, eventResponse.getEventsSize());
- long previousId = 0;
- for (NotificationEvent event : eventResponse.getEvents()) {
- assertTrue("previous:" + previousId + " current:" + event.getEventId(),
- previousId < event.getEventId());
- assertTrue(previousId + 1 == event.getEventId());
- previousId = event.getEventId();
- }
- }
-}
diff --git standalone-metastore/pom.xml standalone-metastore/pom.xml
index a2a34a5c9a..eee652806c 100644
--- standalone-metastore/pom.xml
+++ standalone-metastore/pom.xml
@@ -522,6 +522,7 @@
org.antlr
antlr3-maven-plugin
+ ${antlr.version}
diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultPartitionExpressionProxy.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultPartitionExpressionProxy.java
new file mode 100644
index 0000000000..ec543be397
--- /dev/null
+++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultPartitionExpressionProxy.java
@@ -0,0 +1,57 @@
+package org.apache.hadoop.hive.metastore;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
+
+import java.util.List;
+
+/**
+ * Default implementation of PartitionExpressionProxy. Eventually this should use the SARGs in
+ * Hive's storage-api. For now it just throws UnsupportedOperationException.
+ */
+public class DefaultPartitionExpressionProxy implements PartitionExpressionProxy {
+ @Override
+ public String convertExprToFilter(byte[] expr) throws MetaException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean filterPartitionsByExpr(List partColumns, byte[] expr, String
+ defaultPartitionName, List partitionNames) throws MetaException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public FileMetadataExprType getMetadataType(String inputFormat) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public SearchArgument createSarg(byte[] expr) {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultStorageSchemaReader.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultStorageSchemaReader.java
new file mode 100644
index 0000000000..1dbfa4272c
--- /dev/null
+++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultStorageSchemaReader.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+
+import java.util.List;
+
+/**
+ * Default StorageSchemaReader. This just throws as the metastore currently doesn't know how to
+ * read schemas from storage.
+ */
+public class DefaultStorageSchemaReader implements StorageSchemaReader {
+ @Override
+ public List readSchema(Table tbl, EnvironmentContext envContext,
+ Configuration conf) throws MetaException {
+ throw new UnsupportedOperationException("Storage schema reading not supported");
+ }
+}
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
similarity index 90%
rename from metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 8a55305647..791f549a1a 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -1,4 +1,4 @@
-/** * Licensed to the Apache Software Foundation (ASF) under one
+/* * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
@@ -20,9 +20,10 @@
import static org.apache.commons.lang.StringUtils.join;
import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_COMMENT;
import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
-import static org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName;
import java.io.IOException;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.security.PrivilegedExceptionAction;
import java.util.AbstractMap;
@@ -66,21 +67,12 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.common.LogUtils;
-import org.apache.hadoop.hive.common.LogUtils.LogInitializationException;
import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.common.auth.HiveAuthUtils;
-import org.apache.hadoop.hive.common.classification.InterfaceAudience;
-import org.apache.hadoop.hive.common.classification.InterfaceStability;
-import org.apache.hadoop.hive.common.cli.CommonCliOptions;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.io.HdfsUtils;
import org.apache.hadoop.hive.metastore.api.*;
import org.apache.hadoop.hive.metastore.events.AddForeignKeyEvent;
import org.apache.hadoop.hive.metastore.cache.CachedStore;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
import org.apache.hadoop.hive.metastore.events.AddNotNullConstraintEvent;
import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
@@ -119,7 +111,6 @@
import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent;
import org.apache.hadoop.hive.metastore.events.PreReadTableEvent;
-import org.apache.hadoop.hive.metastore.filemeta.OrcFileMetadataHandler;
import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
import org.apache.hadoop.hive.metastore.metrics.JvmPauseMonitor;
import org.apache.hadoop.hive.metastore.metrics.Metrics;
@@ -136,15 +127,19 @@
import org.apache.hadoop.hive.metastore.txn.AcidWriteSetService;
import org.apache.hadoop.hive.metastore.txn.TxnStore;
import org.apache.hadoop.hive.metastore.txn.TxnUtils;
-import org.apache.hadoop.hive.serde2.Deserializer;
-import org.apache.hadoop.hive.serde2.SerDeException;
-import org.apache.hadoop.hive.shims.Utils;
import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.hive.metastore.utils.CommonCliOptions;
+import org.apache.hadoop.hive.metastore.utils.FileUtils;
+import org.apache.hadoop.hive.metastore.utils.HdfsUtils;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.apache.hadoop.hive.metastore.utils.LogUtils;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.utils.MetastoreVersionInfo;
+import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.StringUtils;
-import org.apache.hive.common.util.HiveStringUtils;
-import org.apache.hive.common.util.ShutdownHookManager;
import org.apache.thrift.TException;
import org.apache.thrift.TProcessor;
import org.apache.thrift.protocol.TBinaryProtocol;
@@ -189,6 +184,8 @@
@VisibleForTesting
static long TEST_TIMEOUT_VALUE = -1;
+ private static ShutdownHookManager shutdownHookMgr;
+
public static final String ADMIN = "admin";
public static final String PUBLIC = "public";
/** MM write states. */
@@ -198,8 +195,8 @@
private static MetastoreDelegationTokenManager delegationTokenManager;
private static boolean useSasl;
- public static final String NO_FILTER_STRING = "";
- public static final int UNLIMITED_MAX_PARTITIONS = -1;
+ static final String NO_FILTER_STRING = "";
+ static final int UNLIMITED_MAX_PARTITIONS = -1;
private static final class ChainedTTransportFactory extends TTransportFactory {
private final TTransportFactory parentTransFactory;
@@ -220,12 +217,13 @@ public TTransport getTransport(TTransport trans) {
public static class HMSHandler extends FacebookBase implements IHMSHandler {
public static final Logger LOG = HiveMetaStore.LOG;
- private final HiveConf hiveConf; // stores datastore (jpox) properties,
+ private final Configuration conf; // stores datastore (jpox) properties,
// right now they come from jpox.properties
private static String currentUrl;
private FileMetadataManager fileMetadataManager;
private PartitionExpressionProxy expressionProxy;
+ private StorageSchemaReader storageSchemaReader;
// Variables for metrics
// Package visible so that HMSMetricsListener can see them.
@@ -259,7 +257,7 @@ public static RawStore getRawStore() {
return threadLocalMS.get();
}
- public static void removeRawStore() {
+ static void removeRawStore() {
threadLocalMS.remove();
}
@@ -286,17 +284,17 @@ protected Configuration initialValue() {
private static ExecutorService threadPool;
- public static final Logger auditLog = LoggerFactory.getLogger(
+ static final Logger auditLog = LoggerFactory.getLogger(
HiveMetaStore.class.getName() + ".audit");
- private static final void logAuditEvent(String cmd) {
+ private static void logAuditEvent(String cmd) {
if (cmd == null) {
return;
}
UserGroupInformation ugi;
try {
- ugi = Utils.getUGI();
+ ugi = SecurityUtils.getUGI();
} catch (Exception ex) {
throw new RuntimeException(ex);
}
@@ -325,7 +323,7 @@ private static String getIPAddress() {
private static ThreadLocal threadLocalId = new ThreadLocal() {
@Override
protected Integer initialValue() {
- return new Integer(nextSerialNum++);
+ return nextSerialNum++;
}
};
@@ -388,17 +386,24 @@ private void notifyMetaListenersOnShutDown() {
}
}
- public static void setThreadLocalIpAddress(String ipAddress) {
+ static void setThreadLocalIpAddress(String ipAddress) {
threadLocalIpAddress.set(ipAddress);
}
// This will return null if the metastore is not being accessed from a metastore Thrift server,
// or if the TTransport being used to connect is not an instance of TSocket, or if kereberos
// is used
- public static String getThreadLocalIpAddress() {
+ static String getThreadLocalIpAddress() {
return threadLocalIpAddress.get();
}
+ // Make it possible for tests to check that the right type of PartitionExpressionProxy was
+ // instantiated.
+ @VisibleForTesting
+ PartitionExpressionProxy getExpressionProxy() {
+ return expressionProxy;
+ }
+
/**
* Use {@link #getThreadId()} instead.
* @return thread id
@@ -414,21 +419,20 @@ public int getThreadId() {
}
public HMSHandler(String name) throws MetaException {
- this(name, new HiveConf(HMSHandler.class), true);
+ this(name, MetastoreConf.newMetastoreConf(), true);
}
- public HMSHandler(String name, HiveConf conf) throws MetaException {
+ public HMSHandler(String name, Configuration conf) throws MetaException {
this(name, conf, true);
}
- public HMSHandler(String name, HiveConf conf, boolean init) throws MetaException {
+ public HMSHandler(String name, Configuration conf, boolean init) throws MetaException {
super(name);
- hiveConf = conf;
- isInTest = HiveConf.getBoolVar(hiveConf, ConfVars.HIVE_IN_TEST);
+ this.conf = conf;
+ isInTest = MetastoreConf.getBoolVar(this.conf, ConfVars.HIVE_IN_TEST);
synchronized (HMSHandler.class) {
if (threadPool == null) {
- int numThreads = HiveConf.getIntVar(conf,
- ConfVars.METASTORE_FS_HANDLER_THREADS_COUNT);
+ int numThreads = MetastoreConf.getIntVar(conf, ConfVars.FS_HANDLER_THREADS_COUNT);
threadPool = Executors.newFixedThreadPool(numThreads,
new ThreadFactoryBuilder().setDaemon(true)
.setNameFormat("HMSHandler #%d").build());
@@ -439,8 +443,13 @@ public HMSHandler(String name, HiveConf conf, boolean init) throws MetaException
}
}
- public HiveConf getHiveConf() {
- return hiveConf;
+ /**
+ * Use {@link #getConf()} instead.
+ * @return Configuration object
+ */
+ @Deprecated
+ public Configuration getHiveConf() {
+ return conf;
}
private ClassLoader classLoader;
@@ -468,32 +477,30 @@ public HiveConf getHiveConf() {
@Override
public void init() throws MetaException {
initListeners = MetaStoreUtils.getMetaStoreListeners(
- MetaStoreInitListener.class, hiveConf,
- hiveConf.getVar(HiveConf.ConfVars.METASTORE_INIT_HOOKS));
+ MetaStoreInitListener.class, conf, MetastoreConf.getVar(conf, ConfVars.INIT_HOOKS));
for (MetaStoreInitListener singleInitListener: initListeners) {
MetaStoreInitContext context = new MetaStoreInitContext();
singleInitListener.onInit(context);
}
- String alterHandlerName = hiveConf.get("hive.metastore.alter.impl",
- HiveAlterHandler.class.getName());
- alterHandler = (AlterHandler) ReflectionUtils.newInstance(MetaStoreUtils.getClass(
- alterHandlerName), hiveConf);
- wh = new Warehouse(hiveConf);
+ String alterHandlerName = MetastoreConf.getVar(conf, ConfVars.ALTER_HANDLER);
+ alterHandler = ReflectionUtils.newInstance(JavaUtils.getClass(
+ alterHandlerName, AlterHandler.class), conf);
+ wh = new Warehouse(conf);
synchronized (HMSHandler.class) {
- if (currentUrl == null || !currentUrl.equals(MetaStoreInit.getConnectionURL(hiveConf))) {
+ if (currentUrl == null || !currentUrl.equals(MetaStoreInit.getConnectionURL(conf))) {
createDefaultDB();
createDefaultRoles();
addAdminUsers();
- currentUrl = MetaStoreInit.getConnectionURL(hiveConf);
+ currentUrl = MetaStoreInit.getConnectionURL(conf);
}
}
//Start Metrics
- if (hiveConf.getBoolVar(ConfVars.METASTORE_METRICS)) {
+ if (MetastoreConf.getBoolVar(conf, ConfVars.METRICS_ENABLED)) {
LOG.info("Begin calculating metadata count metrics.");
- Metrics.initialize(hiveConf);
+ Metrics.initialize(conf);
databaseCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_DATABASES);
tableCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_TABLES);
partCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_PARTITIONS);
@@ -502,46 +509,46 @@ public void init() throws MetaException {
}
preListeners = MetaStoreUtils.getMetaStoreListeners(MetaStorePreEventListener.class,
- hiveConf,
- hiveConf.getVar(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS));
- preListeners.add(0, new TransactionalValidationListener(hiveConf));
- listeners = MetaStoreUtils.getMetaStoreListeners(MetaStoreEventListener.class, hiveConf,
- hiveConf.getVar(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS));
- listeners.add(new SessionPropertiesListener(hiveConf));
- listeners.add(new AcidEventListener(hiveConf));
- transactionalListeners = MetaStoreUtils.getMetaStoreListeners(TransactionalMetaStoreEventListener.class,hiveConf,
- hiveConf.getVar(ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS));
+ conf, MetastoreConf.getVar(conf, ConfVars.PRE_EVENT_LISTENERS));
+ preListeners.add(0, new TransactionalValidationListener(conf));
+ listeners = MetaStoreUtils.getMetaStoreListeners(MetaStoreEventListener.class, conf,
+ MetastoreConf.getVar(conf, ConfVars.EVENT_LISTENERS));
+ listeners.add(new SessionPropertiesListener(conf));
+ listeners.add(new AcidEventListener(conf));
+ transactionalListeners = MetaStoreUtils.getMetaStoreListeners(TransactionalMetaStoreEventListener.class,
+ conf, MetastoreConf.getVar(conf, ConfVars.TRANSACTIONAL_EVENT_LISTENERS));
if (Metrics.getRegistry() != null) {
- listeners.add(new HMSMetricsListener(hiveConf));
+ listeners.add(new HMSMetricsListener(conf));
}
endFunctionListeners = MetaStoreUtils.getMetaStoreListeners(
- MetaStoreEndFunctionListener.class, hiveConf,
- hiveConf.getVar(HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS));
+ MetaStoreEndFunctionListener.class, conf, MetastoreConf.getVar(conf, ConfVars.END_FUNCTION_LISTENERS));
String partitionValidationRegex =
- hiveConf.getVar(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN);
+ MetastoreConf.getVar(conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN);
if (partitionValidationRegex != null && !partitionValidationRegex.isEmpty()) {
partitionValidationPattern = Pattern.compile(partitionValidationRegex);
} else {
partitionValidationPattern = null;
}
- long cleanFreq = hiveConf.getTimeVar(ConfVars.METASTORE_EVENT_CLEAN_FREQ, TimeUnit.MILLISECONDS);
+ ThreadPool.initialize(conf);
+ long cleanFreq = MetastoreConf.getTimeVar(conf, ConfVars.EVENT_CLEAN_FREQ, TimeUnit.MILLISECONDS);
if (cleanFreq > 0) {
- // In default config, there is no timer.
- Timer cleaner = new Timer("Metastore Events Cleaner Thread", true);
- cleaner.schedule(new EventCleanerTask(this), cleanFreq, cleanFreq);
+ ThreadPool.getPool().scheduleAtFixedRate(new EventCleanerTask(this), cleanFreq,
+ cleanFreq, TimeUnit.MILLISECONDS);
}
- cleanFreq = hiveConf.getTimeVar(ConfVars.REPL_DUMPDIR_CLEAN_FREQ, TimeUnit.MILLISECONDS);
+ cleanFreq = MetastoreConf.getTimeVar(conf, ConfVars.REPL_DUMPDIR_CLEAN_FREQ,
+ TimeUnit.MILLISECONDS);
if (cleanFreq > 0) {
- // In default config, there is no timer.
- Timer cleaner = new Timer("Repl Dump Dir Cleaner Thread", true);
- cleaner.schedule(new DumpDirCleanerTask(hiveConf), cleanFreq, cleanFreq);
+ DumpDirCleanerTask ddc = new DumpDirCleanerTask();
+ ddc.setConf(conf);
+ ThreadPool.getPool().scheduleAtFixedRate(ddc, cleanFreq, cleanFreq,
+ TimeUnit.MILLISECONDS);
}
- expressionProxy = PartFilterExprUtil.createExpressionProxy(hiveConf);
- fileMetadataManager = new FileMetadataManager(this.getMS(), hiveConf);
+ expressionProxy = PartFilterExprUtil.createExpressionProxy(conf);
+ fileMetadataManager = new FileMetadataManager(this.getMS(), conf);
}
private static String addPrefix(String s) {
@@ -569,7 +576,7 @@ public void setConf(Configuration conf) {
public Configuration getConf() {
Configuration conf = threadLocalConf.get();
if (conf == null) {
- conf = new Configuration(hiveConf);
+ conf = new Configuration(this.conf);
threadLocalConf.set(conf);
}
return conf;
@@ -578,7 +585,7 @@ public Configuration getConf() {
private Map getModifiedConf() {
Map modifiedConf = threadLocalModifiedConfig.get();
if (modifiedConf == null) {
- modifiedConf = new HashMap();
+ modifiedConf = new HashMap<>();
threadLocalModifiedConfig.set(modifiedConf);
}
return modifiedConf;
@@ -591,17 +598,18 @@ public Warehouse getWh() {
@Override
public void setMetaConf(String key, String value) throws MetaException {
- ConfVars confVar = HiveConf.getMetaConf(key);
+ ConfVars confVar = MetastoreConf.getMetaConf(key);
if (confVar == null) {
throw new MetaException("Invalid configuration key " + key);
}
- String validate = confVar.validate(value);
- if (validate != null) {
+ try {
+ confVar.validate(value);
+ } catch (IllegalArgumentException e) {
throw new MetaException("Invalid configuration value " + value + " for key " + key +
- " by " + validate);
+ " by " + e.getMessage());
}
Configuration configuration = getConf();
- String oldValue = configuration.get(key);
+ String oldValue = MetastoreConf.get(configuration, key);
// Save prev val of the key on threadLocal
Map modifiedConf = getModifiedConf();
if (!modifiedConf.containsKey(key)) {
@@ -616,11 +624,11 @@ public void setMetaConf(String key, String value) throws MetaException {
@Override
public String getMetaConf(String key) throws MetaException {
- ConfVars confVar = HiveConf.getMetaConf(key);
+ ConfVars confVar = MetastoreConf.getMetaConf(key);
if (confVar == null) {
throw new MetaException("Invalid configuration key " + key);
}
- return getConf().get(key, confVar.getDefaultValue());
+ return getConf().get(key, confVar.getDefaultVal().toString());
}
/**
@@ -629,8 +637,6 @@ public String getMetaConf(String key) throws MetaException {
* @return the cached RawStore
* @throws MetaException
*/
- @InterfaceAudience.LimitedPrivate({"HCATALOG"})
- @InterfaceStability.Evolving
@Override
public RawStore getMS() throws MetaException {
Configuration conf = getConf();
@@ -651,30 +657,17 @@ public static RawStore getMSForConf(Configuration conf) throws MetaException {
private TxnStore getTxnHandler() {
TxnStore txn = threadLocalTxn.get();
if (txn == null) {
- txn = TxnUtils.getTxnStore(hiveConf);
+ txn = TxnUtils.getTxnStore(conf);
threadLocalTxn.set(txn);
}
return txn;
}
private static RawStore newRawStoreForConf(Configuration conf) throws MetaException {
- HiveConf hiveConf = new HiveConf(conf, HiveConf.class);
- String rawStoreClassName = hiveConf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL);
+ Configuration newConf = new Configuration(conf);
+ String rawStoreClassName = MetastoreConf.getVar(newConf, ConfVars.RAW_STORE_IMPL);
LOG.info(addPrefix("Opening raw store with implementation class:" + rawStoreClassName));
- if (hiveConf.getBoolVar(ConfVars.METASTORE_FASTPATH)) {
- LOG.info("Fastpath, skipping raw store proxy");
- try {
- RawStore rs =
- ((Class extends RawStore>) MetaStoreUtils.getClass(rawStoreClassName))
- .newInstance();
- rs.setConf(hiveConf);
- return rs;
- } catch (Exception e) {
- LOG.error("Unable to instantiate raw store directly in fastpath mode", e);
- throw new RuntimeException(e);
- }
- }
- return RawStoreProxy.getProxy(hiveConf, conf, rawStoreClassName, threadLocalId.get());
+ return RawStoreProxy.getProxy(newConf, conf, rawStoreClassName, threadLocalId.get());
}
private void createDefaultDB_core(RawStore ms) throws MetaException, InvalidObjectException {
@@ -791,7 +784,7 @@ private void addAdminUsers() throws MetaException {
private void addAdminUsers_core() throws MetaException {
// now add pre-configured users to admin role
- String userStr = HiveConf.getVar(hiveConf,ConfVars.USERS_IN_ADMIN_ROLE,"").trim();
+ String userStr = MetastoreConf.getVar(conf,ConfVars.USERS_IN_ADMIN_ROLE,"").trim();
if (userStr.isEmpty()) {
LOG.info("No user is added in admin role, since config is empty");
return;
@@ -850,24 +843,23 @@ private String startFunction(String function) {
return startFunction(function, "");
}
- private String startTableFunction(String function, String db, String tbl) {
- return startFunction(function, " : db=" + db + " tbl=" + tbl);
+ private void startTableFunction(String function, String db, String tbl) {
+ startFunction(function, " : db=" + db + " tbl=" + tbl);
}
- private String startMultiTableFunction(String function, String db, List tbls) {
+ private void startMultiTableFunction(String function, String db, List tbls) {
String tableNames = join(tbls, ",");
- return startFunction(function, " : db=" + db + " tbls=" + tableNames);
+ startFunction(function, " : db=" + db + " tbls=" + tableNames);
}
- private String startPartitionFunction(String function, String db, String tbl,
- List partVals) {
- return startFunction(function, " : db=" + db + " tbl=" + tbl
- + "[" + join(partVals, ",") + "]");
+ private void startPartitionFunction(String function, String db, String tbl,
+ List partVals) {
+ startFunction(function, " : db=" + db + " tbl=" + tbl + "[" + join(partVals, ",") + "]");
}
- private String startPartitionFunction(String function, String db, String tbl,
- Map partName) {
- return startFunction(function, " : db=" + db + " tbl=" + tbl + "partition=" + partName);
+ private void startPartitionFunction(String function, String db, String tbl,
+ Map partName) {
+ startFunction(function, " : db=" + db + " tbl=" + tbl + "partition=" + partName);
}
private void endFunction(String function, boolean successful, Exception e) {
@@ -918,7 +910,7 @@ public void shutdown() {
private void create_database_core(RawStore ms, final Database db)
throws AlreadyExistsException, InvalidObjectException, MetaException {
- if (!validateName(db.getName(), null)) {
+ if (!MetaStoreUtils.validateName(db.getName(), null)) {
throw new InvalidObjectException(db.getName() + " is not a valid database name");
}
@@ -1020,10 +1012,7 @@ public Database get_database(final String name) throws NoSuchObjectException, Me
try {
db = get_database_core(name);
firePreEvent(new PreReadDatabaseEvent(db, this));
- } catch (MetaException e) {
- ex = e;
- throw e;
- } catch (NoSuchObjectException e) {
+ } catch (MetaException|NoSuchObjectException e) {
ex = e;
throw e;
} finally {
@@ -1038,9 +1027,7 @@ public Database get_database_core(final String name) throws NoSuchObjectExceptio
Database db = null;
try {
db = getMS().getDatabase(name);
- } catch (MetaException e) {
- throw e;
- } catch (NoSuchObjectException e) {
+ } catch (MetaException | NoSuchObjectException e) {
throw e;
} catch (Exception e) {
assert (e instanceof RuntimeException);
@@ -1050,8 +1037,7 @@ public Database get_database_core(final String name) throws NoSuchObjectExceptio
}
@Override
- public void alter_database(final String dbName, final Database newDB)
- throws NoSuchObjectException, TException, MetaException {
+ public void alter_database(final String dbName, final Database newDB) throws TException {
startFunction("alter_database" + dbName);
boolean success = false;
Exception ex = null;
@@ -1083,8 +1069,8 @@ private void drop_database_core(RawStore ms,
IOException, InvalidObjectException, InvalidInputException {
boolean success = false;
Database db = null;
- List tablePaths = new ArrayList();
- List partitionPaths = new ArrayList();
+ List tablePaths = new ArrayList<>();
+ List partitionPaths = new ArrayList<>();
Map transactionalListenerResponses = Collections.emptyMap();
try {
ms.openTransaction();
@@ -1109,7 +1095,7 @@ private void drop_database_core(RawStore ms,
if (!wh.isWritable(path)) {
throw new MetaException("Database not dropped since " +
path + " is not writable by " +
- hiveConf.getUser());
+ SecurityUtils.getUser());
}
Path databasePath = wh.getDnsPath(wh.getDatabasePath(db));
@@ -1120,15 +1106,15 @@ private void drop_database_core(RawStore ms,
}
// drop tables before dropping db
- int tableBatchSize = HiveConf.getIntVar(hiveConf,
- ConfVars.METASTORE_BATCH_RETRIEVE_MAX);
+ int tableBatchSize = MetastoreConf.getIntVar(conf,
+ ConfVars.BATCH_RETRIEVE_MAX);
int startIndex = 0;
// retrieve the tables from the metastore in batches to alleviate memory constraints
while (startIndex < allTables.size()) {
int endIndex = Math.min(startIndex + tableBatchSize, allTables.size());
- List tables = null;
+ List tables;
try {
tables = ms.getTableObjectsByName(name, allTables.subList(startIndex, endIndex));
} catch (UnknownDBException e) {
@@ -1146,7 +1132,7 @@ private void drop_database_core(RawStore ms,
if (!wh.isWritable(tablePath.getParent())) {
throw new MetaException("Database metadata not deleted since table: " +
table.getTableName() + " has a parent location " + tablePath.getParent() +
- " which is not writable by " + hiveConf.getUser());
+ " which is not writable by " + SecurityUtils.getUser());
}
if (!isSubdirectory(databasePath, tablePath)) {
@@ -1409,7 +1395,7 @@ private void create_table_core(final RawStore ms, final Table tbl,
List notNullConstraints)
throws AlreadyExistsException, MetaException,
InvalidObjectException, NoSuchObjectException {
- if (!MetaStoreUtils.validateName(tbl.getTableName(), hiveConf)) {
+ if (!MetaStoreUtils.validateName(tbl.getTableName(), conf)) {
throw new InvalidObjectException(tbl.getTableName()
+ " is not a valid object name");
}
@@ -1479,7 +1465,7 @@ private void create_table_core(final RawStore ms, final Table tbl,
madeDir = true;
}
}
- if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) &&
+ if (MetastoreConf.getBoolVar(conf, ConfVars.STATS_AUTO_GATHER) &&
!MetaStoreUtils.isView(tbl)) {
MetaStoreUtils.updateTableStatsFast(db, tbl, wh, madeDir, envContext);
}
@@ -1660,7 +1646,7 @@ public void drop_constraint(DropConstraintRequest req)
String dbName = req.getDbname();
String tableName = req.getTablename();
String constraintName = req.getConstraintname();
- startFunction("drop_constraint", ": " + constraintName.toString());
+ startFunction("drop_constraint", ": " + constraintName);
boolean success = false;
Exception ex = null;
RawStore ms = getMS();
@@ -1682,8 +1668,6 @@ public void drop_constraint(DropConstraintRequest req)
ex = e;
if (e instanceof MetaException) {
throw (MetaException) e;
- } else if (e instanceof InvalidObjectException) {
- throw (InvalidObjectException) e;
} else {
throw newMetaException(e);
}
@@ -1965,7 +1949,7 @@ private boolean drop_table_core(final RawStore ms, final String dbname, final St
String target = indexName == null ? "Table" : "Index table";
throw new MetaException(target + " metadata not deleted since " +
tblPath.getParent() + " is not writable by " +
- hiveConf.getUser());
+ SecurityUtils.getUser());
}
}
@@ -2094,13 +2078,13 @@ private void deletePartitionData(List partPaths, boolean ifPurge) {
String tableName, Path tablePath, List partitionKeys, boolean checkLocation)
throws MetaException, IOException, NoSuchObjectException, InvalidObjectException,
InvalidInputException {
- int partitionBatchSize = HiveConf.getIntVar(hiveConf,
- ConfVars.METASTORE_BATCH_RETRIEVE_MAX);
+ int partitionBatchSize = MetastoreConf.getIntVar(conf,
+ ConfVars.BATCH_RETRIEVE_MAX);
Path tableDnsPath = null;
if (tablePath != null) {
tableDnsPath = wh.getDnsPath(tablePath);
}
- List partPaths = new ArrayList();
+ List partPaths = new ArrayList<>();
Table tbl = ms.getTable(dbName, tableName);
// call dropPartition on each of the table's partitions to follow the
@@ -2110,7 +2094,7 @@ private void deletePartitionData(List partPaths, boolean ifPurge) {
if (partsToDelete == null || partsToDelete.isEmpty()) {
break;
}
- List partNames = new ArrayList();
+ List partNames = new ArrayList<>();
for (Partition part : partsToDelete) {
if (checkLocation && part.getSd() != null &&
part.getSd().getLocation() != null) {
@@ -2122,7 +2106,7 @@ private void deletePartitionData(List partPaths, boolean ifPurge) {
throw new MetaException("Table metadata not deleted since the partition " +
Warehouse.makePartName(partitionKeys, part.getValues()) +
" has parent location " + partPath.getParent() + " which is not writable " +
- "by " + hiveConf.getUser());
+ "by " + SecurityUtils.getUser());
}
partPaths.add(partPath);
}
@@ -2255,7 +2239,7 @@ private void alterTableStatsForTruncate(final RawStore ms,
final String tableName,
final Table table,
final List partNames) throws Exception {
- List locations = new ArrayList();
+ List locations = new ArrayList<>();
if (partNames == null) {
if (0 != table.getPartitionKeysSize()) {
for (Partition partition : ms.getPartitions(dbName, tableName, Integer.MAX_VALUE)) {
@@ -2287,15 +2271,15 @@ public void truncate_table(final String dbName, final String tableName, List tables = new ArrayList();
+ List tables = new ArrayList<>();
startMultiTableFunction("get_multi_table", dbName, tableNames);
Exception ex = null;
- int tableBatchSize = HiveConf.getIntVar(hiveConf,
- ConfVars.METASTORE_BATCH_RETRIEVE_MAX);
+ int tableBatchSize = MetastoreConf.getIntVar(conf,
+ ConfVars.BATCH_RETRIEVE_MAX);
try {
if (dbName == null || dbName.isEmpty()) {
@@ -2469,11 +2450,11 @@ public GetTablesResult get_table_objects_by_name_req(GetTablesRequest req) throw
// to break into multiple batches, remove duplicates first.
List distinctTableNames = tableNames;
if (distinctTableNames.size() > tableBatchSize) {
- List lowercaseTableNames = new ArrayList();
+ List lowercaseTableNames = new ArrayList<>();
for (String tableName : tableNames) {
- lowercaseTableNames.add(HiveStringUtils.normalizeIdentifier(tableName));
+ lowercaseTableNames.add(org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier(tableName));
}
- distinctTableNames = new ArrayList(new HashSet(lowercaseTableNames));
+ distinctTableNames = new ArrayList<>(new HashSet<>(lowercaseTableNames));
}
RawStore ms = getMS();
@@ -2512,7 +2493,7 @@ private void assertClientHasCapability(ClientCapabilities client,
ClientCapability value, String what, String call) throws MetaException {
if (!doesClientHaveCapability(client, value)) {
throw new MetaException("Your client does not appear to support " + what + ". To skip"
- + " capability checks, please set " + ConfVars.METASTORE_CAPABILITY_CHECK.varname
+ + " capability checks, please set " + ConfVars.CAPABILITY_CHECK.toString()
+ " to false. This setting can be set globally, or on the client for the current"
+ " metastore session. Note that this may lead to incorrect results, data loss,"
+ " undefined behavior, etc. if your client is actually incompatible. You can also"
@@ -2521,7 +2502,7 @@ private void assertClientHasCapability(ClientCapabilities client,
}
private boolean doesClientHaveCapability(ClientCapabilities client, ClientCapability value) {
- if (!HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_CAPABILITY_CHECK)) return true;
+ if (!MetastoreConf.getBoolVar(getConf(), ConfVars.CAPABILITY_CHECK)) return true;
return (client != null && client.isSetValues() && client.getValues().contains(value));
}
@@ -2591,7 +2572,7 @@ private Partition append_partition_common(RawStore ms, String dbName, String tab
.makePartName(tbl.getPartitionKeys(), part_vals));
part.getSd().setLocation(partLocation.toString());
- Partition old_part = null;
+ Partition old_part;
try {
old_part = ms.getPartition(part.getDbName(), part
.getTableName(), part.getValues());
@@ -2616,7 +2597,7 @@ private Partition append_partition_common(RawStore ms, String dbName, String tab
part.setCreateTime((int) time);
part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time));
- if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) &&
+ if (MetastoreConf.getBoolVar(conf, ConfVars.STATS_AUTO_GATHER) &&
!MetaStoreUtils.isView(tbl)) {
MetaStoreUtils.updatePartitionStatsFast(part, wh, madeDir, envContext);
}
@@ -2705,7 +2686,7 @@ public Partition append_partition_with_environment_context(final String dbName,
private static class PartValEqWrapper {
Partition partition;
- public PartValEqWrapper(Partition partition) {
+ PartValEqWrapper(Partition partition) {
this.partition = partition;
}
@@ -2739,7 +2720,7 @@ public boolean equals(Object obj) {
List values;
String location;
- public PartValEqWrapperLite(Partition partition) {
+ PartValEqWrapperLite(Partition partition) {
this.values = partition.isSetValues()? partition.getValues() : null;
this.location = partition.getSd().getLocation();
}
@@ -2783,14 +2764,14 @@ public boolean equals(Object obj) {
private List add_partitions_core(final RawStore ms,
String dbName, String tblName, List parts, final boolean ifNotExists)
- throws MetaException, InvalidObjectException, AlreadyExistsException, TException {
+ throws TException {
logInfo("add_partitions");
boolean success = false;
// Ensures that the list doesn't have dups, and keeps track of directories we have created.
final Map addedPartitions =
Collections.synchronizedMap(new HashMap());
- final List newParts = new ArrayList();
- final List existingParts = new ArrayList();
+ final List newParts = new ArrayList<>();
+ final List existingParts = new ArrayList<>();
Table tbl = null;
Map transactionalListenerResponses = Collections.emptyMap();
@@ -2926,7 +2907,7 @@ public Object run() throws Exception {
@Override
public AddPartitionsResult add_partitions_req(AddPartitionsRequest request)
- throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
+ throws TException {
AddPartitionsResult result = new AddPartitionsResult();
if (request.getParts().isEmpty()) {
return result;
@@ -3110,7 +3091,7 @@ public Partition run() throws Exception {
}
private boolean startAddPartition(
- RawStore ms, Partition part, boolean ifNotExists) throws MetaException, TException {
+ RawStore ms, Partition part, boolean ifNotExists) throws TException {
MetaStoreUtils.validatePartitionNameCharacters(part.getValues(),
partitionValidationPattern);
boolean doesExist = ms.doesPartitionExist(
@@ -3174,7 +3155,7 @@ private void initializeAddedPartition(
private void initializeAddedPartition(
final Table tbl, final PartitionSpecProxy.PartitionIterator part, boolean madeDir) throws MetaException {
- if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) &&
+ if (MetastoreConf.getBoolVar(conf, ConfVars.STATS_AUTO_GATHER) &&
!MetaStoreUtils.isView(tbl)) {
MetaStoreUtils.updatePartitionStatsFast(part, wh, madeDir, false, null);
}
@@ -3189,10 +3170,10 @@ private void initializeAddedPartition(
// Inherit table properties into partition properties.
Map tblParams = tbl.getParameters();
- String inheritProps = hiveConf.getVar(ConfVars.METASTORE_PART_INHERIT_TBL_PROPS).trim();
+ String inheritProps = MetastoreConf.getVar(conf, ConfVars.PART_INHERIT_TBL_PROPS).trim();
// Default value is empty string in which case no properties will be inherited.
// * implies all properties needs to be inherited
- Set inheritKeys = new HashSet(Arrays.asList(inheritProps.split(",")));
+ Set inheritKeys = new HashSet<>(Arrays.asList(inheritProps.split(",")));
if (inheritKeys.contains("*")) {
inheritKeys = tblParams.keySet();
}
@@ -3207,7 +3188,7 @@ private void initializeAddedPartition(
private Partition add_partition_core(final RawStore ms,
final Partition part, final EnvironmentContext envContext)
- throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
+ throws TException {
boolean success = false;
Table tbl = null;
Map transactionalListenerResponses = Collections.emptyMap();
@@ -3302,8 +3283,7 @@ public Partition add_partition_with_environment_context(
@Override
public Partition exchange_partition(Map partitionSpecs,
String sourceDbName, String sourceTableName, String destDbName,
- String destTableName) throws MetaException, NoSuchObjectException,
- InvalidObjectException, InvalidInputException, TException {
+ String destTableName) throws TException {
exchange_partitions(partitionSpecs, sourceDbName, sourceTableName, destDbName, destTableName);
return new Partition();
}
@@ -3311,8 +3291,7 @@ public Partition exchange_partition(Map partitionSpecs,
@Override
public List exchange_partitions(Map partitionSpecs,
String sourceDbName, String sourceTableName, String destDbName,
- String destTableName) throws MetaException, NoSuchObjectException,
- InvalidObjectException, InvalidInputException, TException {
+ String destTableName) throws TException {
boolean success = false;
boolean pathCreated = false;
RawStore ms = getMS();
@@ -3321,8 +3300,8 @@ public Partition exchange_partition(Map partitionSpecs,
Table sourceTable = ms.getTable(sourceDbName, sourceTableName);
List partVals = MetaStoreUtils.getPvals(sourceTable.getPartitionKeys(),
partitionSpecs);
- List partValsPresent = new ArrayList ();
- List partitionKeysPresent = new ArrayList ();
+ List partValsPresent = new ArrayList<> ();
+ List partitionKeysPresent = new ArrayList<> ();
int i = 0;
for (FieldSchema fs: sourceTable.getPartitionKeys()) {
String partVal = partVals.get(i);
@@ -3346,7 +3325,7 @@ public Partition exchange_partition(Map partitionSpecs,
Warehouse.makePartName(partitionKeysPresent, partValsPresent));
Path destPath = new Path(destinationTable.getSd().getLocation(),
Warehouse.makePartName(partitionKeysPresent, partValsPresent));
- List destPartitions = new ArrayList();
+ List destPartitions = new ArrayList<>();
Map transactionalListenerResponsesForAddPartition = Collections.emptyMap();
List