diff --git common/src/java/org/apache/hadoop/hive/common/FileUtils.java common/src/java/org/apache/hadoop/hive/common/FileUtils.java index ff09dd835c..e0d9785363 100644 --- common/src/java/org/apache/hadoop/hive/common/FileUtils.java +++ common/src/java/org/apache/hadoop/hive/common/FileUtils.java @@ -87,27 +87,6 @@ public boolean accept(Path p) { } }; - public static final PathFilter SNAPSHOT_DIR_PATH_FILTER = new PathFilter() { - @Override - public boolean accept(Path p) { - return ".snapshot".equalsIgnoreCase(p.getName()); - } - }; - - /** - * Check if the path contains a subdirectory named '.snapshot' - * @param p path to check - * @param fs filesystem of the path - * @return true if p contains a subdirectory named '.snapshot' - * @throws IOException - */ - public static boolean pathHasSnapshotSubDir(Path p, FileSystem fs) throws IOException { - // Hadoop is missing a public API to check for snapshotable directories. Check with the directory name - // until a more appropriate API is provided by HDFS-12257. - final FileStatus[] statuses = fs.listStatus(p, FileUtils.SNAPSHOT_DIR_PATH_FILTER); - return statuses != null && statuses.length != 0; - } - /** * Variant of Path.makeQualified that qualifies the input path against the default file system * indicated by the configuration diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionExpressionProxyDefault.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionExpressionProxyDefault.java new file mode 100644 index 0000000000..e654c02d8e --- /dev/null +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionExpressionProxyDefault.java @@ -0,0 +1,40 @@ +package org.apache.hadoop.hive.metastore; +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore; +import org.junit.Assert; +import org.junit.Test; + +/** + * Make sure that when HiveMetaStore is instantiated, the default proper PartitionExpressionProxy + * instance is instantiated. + */ +public class TestPartitionExpressionProxyDefault { + + @Test + public void checkPartitionExpressionProxy() throws MetaException { + Configuration conf = MetastoreConf.newMetastoreConf(); + HiveMetaStore.HMSHandler hms = new HiveMetaStore.HMSHandler("for testing", conf, true); + Assert.assertEquals(PartitionExpressionForMetastore.class, + hms.getExpressionProxy().getClass()); + } +} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java index 191d4a34f1..e78318035a 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.HiveMetaStore; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hive.jdbc.miniHS2.MiniHS2; import org.apache.hive.service.cli.HiveSQLException; import org.junit.After; @@ -290,7 +291,7 @@ private void executeQueryExceedPartitionLimit(String query, int expectedPartitio + PARTITION_REQUEST_LIMIT); } catch (HiveSQLException e) { String exceedLimitMsg = String.format(HiveMetaStore.PARTITION_NUMBER_EXCEED_LIMIT_MSG, expectedPartitionNumber, - TABLE_NAME, PARTITION_REQUEST_LIMIT, ConfVars.METASTORE_LIMIT_PARTITION_REQUEST.varname); + TABLE_NAME, PARTITION_REQUEST_LIMIT, MetastoreConf.ConfVars.LIMIT_PARTITION_REQUEST.toString()); assertTrue(getWrongExceptionMessage(exceedLimitMsg, e.getMessage()), e.getMessage().contains(exceedLimitMsg.toString())); } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index a4917898e4..939ae21e5f 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -18,15 +18,12 @@ package org.apache.hadoop.hive.metastore; -import java.io.File; import java.io.IOException; import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.net.URL; -import java.net.URLClassLoader; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -38,13 +35,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hive.metastore.api.Decimal; -import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; -import org.apache.hadoop.hive.shims.ShimLoader; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -54,28 +46,18 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.common.JavaUtils; -import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; -import org.apache.hadoop.hive.metastore.columnstats.merge.ColumnStatsMerger; -import org.apache.hadoop.hive.metastore.columnstats.merge.ColumnStatsMergerFactory; -import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.SerDeUtils; -import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -83,16 +65,9 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.security.SaslRpcServer; -import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; -import org.apache.hadoop.security.authorize.ProxyUsers; -import org.apache.hadoop.util.MachineList; -import org.apache.hive.common.util.HiveStringUtils; import org.apache.hive.common.util.ReflectionUtil; -import javax.annotation.Nullable; - public class MetaStoreUtils { private static final Logger LOG = LoggerFactory.getLogger("hive.log"); @@ -105,241 +80,20 @@ // HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES in HiveConf as well. public static final char[] specialCharactersInTableNames = new char[] { '/' }; - public static Table createColumnsetSchema(String name, List columns, - List partCols, Configuration conf) throws MetaException { - - if (columns == null) { - throw new MetaException("columns not specified for table " + name); - } - - Table tTable = new Table(); - tTable.setTableName(name); - tTable.setSd(new StorageDescriptor()); - StorageDescriptor sd = tTable.getSd(); - sd.setSerdeInfo(new SerDeInfo()); - SerDeInfo serdeInfo = sd.getSerdeInfo(); - serdeInfo.setSerializationLib(LazySimpleSerDe.class.getName()); - serdeInfo.setParameters(new HashMap()); - serdeInfo.getParameters().put(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, - Warehouse.DEFAULT_SERIALIZATION_FORMAT); - - List fields = new ArrayList(columns.size()); - sd.setCols(fields); - for (String col : columns) { - FieldSchema field = new FieldSchema(col, - org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME, "'default'"); - fields.add(field); - } - - tTable.setPartitionKeys(new ArrayList()); - for (String partCol : partCols) { - FieldSchema part = new FieldSchema(); - part.setName(partCol); - part.setType(org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME); // default - // partition - // key - tTable.getPartitionKeys().add(part); - } - sd.setNumBuckets(-1); - return tTable; - } - - /** - * recursiveDelete - * - * just recursively deletes a dir - you'd think Java would have something to - * do this?? - * - * @param f - * - the file/dir to delete - * @exception IOException - * propogate f.delete() exceptions - * - */ - static public void recursiveDelete(File f) throws IOException { - if (f.isDirectory()) { - File fs[] = f.listFiles(); - for (File subf : fs) { - recursiveDelete(subf); - } - } - if (!f.delete()) { - throw new IOException("could not delete: " + f.getPath()); - } - } - - /** - * @param partParams - * @return True if the passed Parameters Map contains values for all "Fast Stats". - */ - private static boolean containsAllFastStats(Map partParams) { - for (String stat : StatsSetupConst.fastStats) { - if (!partParams.containsKey(stat)) { - return false; - } - } - return true; - } - - static boolean updateTableStatsFast(Database db, Table tbl, Warehouse wh, - boolean madeDir, EnvironmentContext environmentContext) throws MetaException { - return updateTableStatsFast(db, tbl, wh, madeDir, false, environmentContext); - } - - private static boolean updateTableStatsFast(Database db, Table tbl, Warehouse wh, - boolean madeDir, boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException { - if (tbl.getPartitionKeysSize() == 0) { - // Update stats only when unpartitioned - FileStatus[] fileStatuses = wh.getFileStatusesForUnpartitionedTable(db, tbl); - return updateTableStatsFast(tbl, fileStatuses, madeDir, forceRecompute, environmentContext); - } else { - return false; - } - } - - /** - * Updates the numFiles and totalSize parameters for the passed Table by querying - * the warehouse if the passed Table does not already have values for these parameters. - * @param tbl - * @param fileStatus - * @param newDir if true, the directory was just created and can be assumed to be empty - * @param forceRecompute Recompute stats even if the passed Table already has - * these parameters set - * @return true if the stats were updated, false otherwise - */ - public static boolean updateTableStatsFast(Table tbl, FileStatus[] fileStatus, boolean newDir, - boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException { - - Map params = tbl.getParameters(); - - if ((params!=null) && params.containsKey(StatsSetupConst.DO_NOT_UPDATE_STATS)){ - boolean doNotUpdateStats = Boolean.valueOf(params.get(StatsSetupConst.DO_NOT_UPDATE_STATS)); - params.remove(StatsSetupConst.DO_NOT_UPDATE_STATS); - tbl.setParameters(params); // to make sure we remove this marker property - if (doNotUpdateStats){ - return false; - } - } - - boolean updated = false; - if (forceRecompute || - params == null || - !containsAllFastStats(params)) { - if (params == null) { - params = new HashMap(); - } - if (!newDir) { - // The table location already exists and may contain data. - // Let's try to populate those stats that don't require full scan. - LOG.info("Updating table stats fast for " + tbl.getTableName()); - populateQuickStats(fileStatus, params); - LOG.info("Updated size of table " + tbl.getTableName() +" to "+ params.get(StatsSetupConst.TOTAL_SIZE)); - if (environmentContext != null - && environmentContext.isSetProperties() - && StatsSetupConst.TASK.equals(environmentContext.getProperties().get( - StatsSetupConst.STATS_GENERATED))) { - StatsSetupConst.setBasicStatsState(params, StatsSetupConst.TRUE); - } else { - StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE); - } - } - tbl.setParameters(params); - updated = true; - } - return updated; - } - public static void populateQuickStats(FileStatus[] fileStatus, Map params) { - int numFiles = 0; - long tableSize = 0L; - String s = "LOG14535 Populating quick stats for: "; - for (FileStatus status : fileStatus) { - s += status.getPath() + ", "; - // don't take directories into account for quick stats - if (!status.isDir()) { - tableSize += status.getLen(); - numFiles += 1; - } - } - LOG.info(s/*, new Exception()*/); - params.put(StatsSetupConst.NUM_FILES, Integer.toString(numFiles)); - params.put(StatsSetupConst.TOTAL_SIZE, Long.toString(tableSize)); + org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.populateQuickStats(fileStatus, params); } - static boolean updatePartitionStatsFast(Partition part, Warehouse wh, EnvironmentContext environmentContext) - throws MetaException { - return updatePartitionStatsFast(part, wh, false, false, environmentContext); + public static boolean updateTableStatsFast(Table tbl, FileStatus[] fileStatus, boolean newDir, + boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException { + return org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.updateTableStatsFast( + tbl, fileStatus, newDir, forceRecompute, environmentContext); } - static boolean updatePartitionStatsFast(Partition part, Warehouse wh, boolean madeDir, EnvironmentContext environmentContext) + public static boolean updatePartitionStatsFast(Partition part, Warehouse wh, EnvironmentContext environmentContext) throws MetaException { - return updatePartitionStatsFast(part, wh, madeDir, false, environmentContext); - } - - /** - * Updates the numFiles and totalSize parameters for the passed Partition by querying - * the warehouse if the passed Partition does not already have values for these parameters. - * @param part - * @param wh - * @param madeDir if true, the directory was just created and can be assumed to be empty - * @param forceRecompute Recompute stats even if the passed Partition already has - * these parameters set - * @return true if the stats were updated, false otherwise - */ - private static boolean updatePartitionStatsFast(Partition part, Warehouse wh, - boolean madeDir, boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException { - return updatePartitionStatsFast(new PartitionSpecProxy.SimplePartitionWrapperIterator(part), - wh, madeDir, forceRecompute, environmentContext); - } - - /** - * Updates the numFiles and totalSize parameters for the passed Partition by querying - * the warehouse if the passed Partition does not already have values for these parameters. - * @param part - * @param wh - * @param madeDir if true, the directory was just created and can be assumed to be empty - * @param forceRecompute Recompute stats even if the passed Partition already has - * these parameters set - * @return true if the stats were updated, false otherwise - */ - static boolean updatePartitionStatsFast(PartitionSpecProxy.PartitionIterator part, Warehouse wh, - boolean madeDir, boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException { - Map params = part.getParameters(); - boolean updated = false; - if (forceRecompute || - params == null || - !containsAllFastStats(params)) { - if (params == null) { - params = new HashMap(); - } - if (!madeDir) { - // The partition location already existed and may contain data. Lets try to - // populate those statistics that don't require a full scan of the data. - LOG.warn("Updating partition stats fast for: " + part.getTableName()); - FileStatus[] fileStatus = wh.getFileStatusesForLocation(part.getLocation()); - populateQuickStats(fileStatus, params); - LOG.warn("Updated size to " + params.get(StatsSetupConst.TOTAL_SIZE)); - updateBasicState(environmentContext, params); - } - part.setParameters(params); - updated = true; - } - return updated; - } - - private static void updateBasicState(EnvironmentContext environmentContext, Map - params) { - if (params == null) { - return; - } - if (environmentContext != null - && environmentContext.isSetProperties() - && StatsSetupConst.TASK.equals(environmentContext.getProperties().get( - StatsSetupConst.STATS_GENERATED))) { - StatsSetupConst.setBasicStatsState(params, StatsSetupConst.TRUE); - } else { - StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE); - } + return org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.updatePartitionStatsFast( + part, wh, environmentContext); } /** @@ -436,53 +190,6 @@ static public Deserializer getDeserializer(Configuration conf, } } - static public void deleteWHDirectory(Path path, Configuration conf, - boolean use_trash) throws MetaException { - - try { - if (!path.getFileSystem(conf).exists(path)) { - LOG.warn("drop data called on table/partition with no directory: " - + path); - return; - } - - if (use_trash) { - - int count = 0; - Path newPath = new Path("/Trash/Current" - + path.getParent().toUri().getPath()); - - if (path.getFileSystem(conf).exists(newPath) == false) { - path.getFileSystem(conf).mkdirs(newPath); - } - - do { - newPath = new Path("/Trash/Current" + path.toUri().getPath() + "." - + count); - if (path.getFileSystem(conf).exists(newPath)) { - count++; - continue; - } - if (path.getFileSystem(conf).rename(path, newPath)) { - break; - } - } while (++count < 50); - if (count >= 50) { - throw new MetaException("Rename failed due to maxing out retries"); - } - } else { - // directly delete it - path.getFileSystem(conf).delete(path, true); - } - } catch (IOException e) { - LOG.error("Got exception trying to delete data dir: " + e); - throw new MetaException(e.getMessage()); - } catch (MetaException e) { - LOG.error("Got exception trying to delete data dir: " + e); - throw e; - } - } - /** * Given a list of partition columns and a partial mapping from * some partition columns to values the function returns the values @@ -537,118 +244,12 @@ public static boolean validateColumnName(String name) { return true; } - static public String validateTblColumns(List cols) { - for (FieldSchema fieldSchema : cols) { - if (!validateColumnName(fieldSchema.getName())) { - return "name: " + fieldSchema.getName(); - } - String typeError = validateColumnType(fieldSchema.getType()); - if (typeError != null) { - return typeError; - } - } - return null; - } - - /** - * @return true if oldType and newType are compatible. - * Two types are compatible if we have internal functions to cast one to another. - */ - static private boolean areColTypesCompatible(String oldType, String newType) { - - /* - * RCFile default serde (ColumnarSerde) serializes the values in such a way that the - * datatypes can be converted from string to any type. The map is also serialized as - * a string, which can be read as a string as well. However, with any binary - * serialization, this is not true. - * - * Primitive types like INT, STRING, BIGINT, etc are compatible with each other and are - * not blocked. - */ - - return TypeInfoUtils.implicitConvertible(TypeInfoUtils.getTypeInfoFromTypeString(oldType), - TypeInfoUtils.getTypeInfoFromTypeString(newType)); - } - public static final String TYPE_FROM_DESERIALIZER = ""; - /** - * validate column type - * - * if it is predefined, yes. otherwise no - * @param type - * @return - */ - static public String validateColumnType(String type) { - if (type.equals(TYPE_FROM_DESERIALIZER)) { - return null; - } - int last = 0; - boolean lastAlphaDigit = isValidTypeChar(type.charAt(last)); - for (int i = 1; i <= type.length(); i++) { - if (i == type.length() - || isValidTypeChar(type.charAt(i)) != lastAlphaDigit) { - String token = type.substring(last, i); - last = i; - if (!hiveThriftTypeMap.contains(token)) { - return "type: " + type; - } - break; - } - } - return null; - } - - private static boolean isValidTypeChar(char c) { - return Character.isLetterOrDigit(c) || c == '_'; - } - - public static String validateSkewedColNames(List cols) { - if (CollectionUtils.isEmpty(cols)) { - return null; - } - for (String col : cols) { - if (!validateColumnName(col)) { - return col; - } - } - return null; - } - - public static String validateSkewedColNamesSubsetCol(List skewedColNames, - List cols) { - if (CollectionUtils.isEmpty(skewedColNames)) { - return null; - } - List colNames = new ArrayList(cols.size()); - for (FieldSchema fieldSchema : cols) { - colNames.add(fieldSchema.getName()); - } - // make a copy - List copySkewedColNames = new ArrayList(skewedColNames); - // remove valid columns - copySkewedColNames.removeAll(colNames); - if (copySkewedColNames.isEmpty()) { - return null; - } - return copySkewedColNames.toString(); - } public static String getListType(String t) { return "array<" + t + ">"; } - public static String getMapType(String k, String v) { - return "map<" + k + "," + v + ">"; - } - - public static void setSerdeParam(SerDeInfo sdi, Properties schema, - String param) { - String val = schema.getProperty(param); - if (org.apache.commons.lang.StringUtils.isNotBlank(val)) { - sdi.getParameters().put(param, val); - } - } - static HashMap typeToThriftTypeMap; static { typeToThriftTypeMap = new HashMap(); @@ -726,42 +327,6 @@ public static String typeToThriftType(String type) { } /** - * Convert FieldSchemas to Thrift DDL + column names and column types - * - * @param structName - * The name of the table - * @param fieldSchemas - * List of fields along with their schemas - * @return String containing "Thrift - * DDL#comma-separated-column-names#colon-separated-columntypes - * Example: - * "struct result { a string, map<int,string> b}#a,b#string:map<int,string>" - */ - public static String getFullDDLFromFieldSchema(String structName, - List fieldSchemas) { - StringBuilder ddl = new StringBuilder(); - ddl.append(getDDLFromFieldSchema(structName, fieldSchemas)); - ddl.append('#'); - StringBuilder colnames = new StringBuilder(); - StringBuilder coltypes = new StringBuilder(); - boolean first = true; - for (FieldSchema col : fieldSchemas) { - if (first) { - first = false; - } else { - colnames.append(','); - coltypes.append(':'); - } - colnames.append(col.getName()); - coltypes.append(col.getType()); - } - ddl.append(colnames); - ddl.append('#'); - ddl.append(coltypes); - return ddl.toString(); - } - - /** * Convert FieldSchemas to Thrift DDL. */ public static String getDDLFromFieldSchema(String structName, @@ -1107,15 +672,131 @@ public static String getColumnCommentsFromFieldSchema(List fieldSch return sb.toString(); } - public static void makeDir(Path path, HiveConf hiveConf) throws MetaException { - FileSystem fs; + public static int startMetaStore() throws Exception { + return startMetaStore(HadoopThriftAuthBridge.getBridge(), null); + } + + public static int startMetaStore(final HadoopThriftAuthBridge bridge, HiveConf conf) throws Exception { + int port = findFreePort(); + startMetaStore(port, bridge, conf); + return port; + } + + public static int startMetaStore(HiveConf conf) throws Exception { + return startMetaStore(HadoopThriftAuthBridge.getBridge(), conf); + } + + public static void startMetaStore(final int port, final HadoopThriftAuthBridge bridge) throws Exception { + startMetaStore(port, bridge, null); + } + + public static void startMetaStore(final int port, + final HadoopThriftAuthBridge bridge, HiveConf hiveConf) + throws Exception{ + if (hiveConf == null) { + hiveConf = new HiveConf(HMSHandler.class); + } + final HiveConf finalHiveConf = hiveConf; + Thread thread = new Thread(new Runnable() { + @Override + public void run() { + try { + HiveMetaStore.startMetaStore(port, bridge, finalHiveConf); + } catch (Throwable e) { + LOG.error("Metastore Thrift Server threw an exception...",e); + } + } + }); + thread.setDaemon(true); + thread.start(); + loopUntilHMSReady(port); + } + + /** + * A simple connect test to make sure that the metastore is up + * @throws Exception + */ + private static void loopUntilHMSReady(int port) throws Exception { + int retries = 0; + Exception exc = null; + while (true) { + try { + Socket socket = new Socket(); + socket.connect(new InetSocketAddress(port), 5000); + socket.close(); + return; + } catch (Exception e) { + if (retries++ > 60) { //give up + exc = e; + break; + } + Thread.sleep(1000); + } + } + // something is preventing metastore from starting + // print the stack from all threads for debugging purposes + LOG.error("Unable to connect to metastore server: " + exc.getMessage()); + LOG.info("Printing all thread stack traces for debugging before throwing exception."); + LOG.info(getAllThreadStacksAsString()); + throw exc; + } + + private static String getAllThreadStacksAsString() { + Map threadStacks = Thread.getAllStackTraces(); + StringBuilder sb = new StringBuilder(); + for (Map.Entry entry : threadStacks.entrySet()) { + Thread t = entry.getKey(); + sb.append(System.lineSeparator()); + sb.append("Name: ").append(t.getName()).append(" State: ").append(t.getState()); + addStackString(entry.getValue(), sb); + } + return sb.toString(); + } + + private static void addStackString(StackTraceElement[] stackElems, StringBuilder sb) { + sb.append(System.lineSeparator()); + for (StackTraceElement stackElem : stackElems) { + sb.append(stackElem).append(System.lineSeparator()); + } + } + + /** + * Finds a free port on the machine. + * + * @return + * @throws IOException + */ + public static int findFreePort() throws IOException { + ServerSocket socket= new ServerSocket(0); + int port = socket.getLocalPort(); + socket.close(); + return port; + } + + /** + * Finds a free port on the machine, but allow the + * ability to specify a port number to not use, no matter what. + */ + public static int findFreePortExcepting(int portToExclude) throws IOException { + ServerSocket socket1 = null; + ServerSocket socket2 = null; try { - fs = path.getFileSystem(hiveConf); - if (!fs.exists(path)) { - fs.mkdirs(path); + socket1 = new ServerSocket(0); + socket2 = new ServerSocket(0); + if (socket1.getLocalPort() != portToExclude) { + return socket1.getLocalPort(); + } + // If we're here, then socket1.getLocalPort was the port to exclude + // Since both sockets were open together at a point in time, we're + // guaranteed that socket2.getLocalPort() is not the same. + return socket2.getLocalPort(); + } finally { + if (socket1 != null){ + socket1.close(); + } + if (socket2 != null){ + socket2.close(); } - } catch (IOException e) { - throw new MetaException("Unable to : " + path); } } @@ -1225,52 +906,12 @@ public static boolean isExternalTable(Table table) { return "TRUE".equalsIgnoreCase(params.get("EXTERNAL")); } - /** - * Determines whether a table is an immutable table. - * Immutable tables are write-once/replace, and do not support append. Partitioned - * immutable tables do support additions by way of creation of new partitions, but - * do not allow the partitions themselves to be appended to. "INSERT INTO" will not - * work for Immutable tables. - * - * @param table table of interest - * - * @return true if immutable - */ - public static boolean isImmutableTable(Table table) { - if (table == null){ - return false; - } - Map params = table.getParameters(); - if (params == null) { - return false; - } - - return "TRUE".equalsIgnoreCase(params.get(hive_metastoreConstants.IS_IMMUTABLE)); - } - public static boolean isArchived( org.apache.hadoop.hive.metastore.api.Partition part) { Map params = part.getParameters(); return "TRUE".equalsIgnoreCase(params.get(hive_metastoreConstants.IS_ARCHIVED)); } - public static Path getOriginalLocation( - org.apache.hadoop.hive.metastore.api.Partition part) { - Map params = part.getParameters(); - assert(isArchived(part)); - String originalLocation = params.get(hive_metastoreConstants.ORIGINAL_LOCATION); - assert( originalLocation != null); - - return new Path(originalLocation); - } - - public static boolean isNonNativeTable(Table table) { - if (table == null || table.getParameters() == null) { - return false; - } - return (table.getParameters().get(hive_metastoreConstants.META_TABLE_STORAGE) != null); - } - /** * Filter that filters out hidden files */ @@ -1301,29 +942,6 @@ public static boolean isDirEmpty(FileSystem fs, Path path) throws IOException { return true; } - /** - * Returns true if partial has the same values as full for all values that - * aren't empty in partial. - */ - - public static boolean pvalMatches(List partial, List full) { - if(partial.size() > full.size()) { - return false; - } - Iterator p = partial.iterator(); - Iterator f = full.iterator(); - - while(p.hasNext()) { - String pval = p.next(); - String fval = f.next(); - - if (pval.length() != 0 && !pval.equals(fval)) { - return false; - } - } - return true; - } - public static String getIndexTableName(String dbName, String baseTblName, String indexName) { return dbName + "__" + baseTblName + "_" + indexName + "__"; } @@ -1342,26 +960,6 @@ public static boolean isMaterializedViewTable(Table table) { return TableType.MATERIALIZED_VIEW.toString().equals(table.getTableType()); } - /** - * Given a map of partition column names to values, this creates a filter - * string that can be used to call the *byFilter methods - * @param m - * @return the filter string - */ - public static String makeFilterStringFromMap(Map m) { - StringBuilder filter = new StringBuilder(); - for (Entry e : m.entrySet()) { - String col = e.getKey(); - String val = e.getValue(); - if (filter.length() == 0) { - filter.append(col + "=\"" + val + "\""); - } else { - filter.append(" and " + col + "=\"" + val + "\""); - } - } - return filter.toString(); - } - public static boolean isView(Table table) { if (table == null) { return false; @@ -1369,42 +967,6 @@ public static boolean isView(Table table) { return TableType.VIRTUAL_VIEW.toString().equals(table.getTableType()); } - /** - * create listener instances as per the configuration. - * - * @param clazz - * @param conf - * @param listenerImplList - * @return - * @throws MetaException - */ - static List getMetaStoreListeners(Class clazz, - HiveConf conf, String listenerImplList) throws MetaException { - List listeners = new ArrayList(); - - if (StringUtils.isBlank(listenerImplList)) { - return listeners; - } - - String[] listenerImpls = listenerImplList.split(","); - for (String listenerImpl : listenerImpls) { - try { - T listener = (T) Class.forName( - listenerImpl.trim(), true, JavaUtils.getClassLoader()).getConstructor( - Configuration.class).newInstance(conf); - listeners.add(listener); - } catch (InvocationTargetException ie) { - throw new MetaException("Failed to instantiate listener named: "+ - listenerImpl + ", reason: " + ie.getCause()); - } catch (Exception e) { - throw new MetaException("Failed to instantiate listener named: "+ - listenerImpl + ", reason: " + e); - } - } - - return listeners; - } - @SuppressWarnings("unchecked") public static Class getClass(String rawStoreClassName) throws MetaException { @@ -1448,24 +1010,6 @@ public static boolean isView(Table table) { } } - public static void validatePartitionNameCharacters(List partVals, - Pattern partitionValidationPattern) throws MetaException { - - String invalidPartitionVal = - HiveStringUtils.getPartitionValWithInvalidCharacter(partVals, partitionValidationPattern); - if (invalidPartitionVal != null) { - throw new MetaException("Partition value '" + invalidPartitionVal + - "' contains a character " + "not matched by whitelist pattern '" + - partitionValidationPattern.toString() + "'. " + "(configure with " + - HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN.varname + ")"); - } - } - - public static boolean partitionNameHasValidCharacters(List partVals, - Pattern partitionValidationPattern) { - return HiveStringUtils.getPartitionValWithInvalidCharacter(partVals, partitionValidationPattern) == null; - } - /** * @param schema1: The first schema to be compared * @param schema2: The second schema to be compared @@ -1538,97 +1082,6 @@ public static int getArchivingLevel(Partition part) throws MetaException { return names; } - /** - * Helper function to transform Nulls to empty strings. - */ - private static final com.google.common.base.Function transFormNullsToEmptyString - = new com.google.common.base.Function() { - @Override - public java.lang.String apply(@Nullable java.lang.String string) { - return StringUtils.defaultString(string); - } - }; - - /** - * Create a URL from a string representing a path to a local file. - * The path string can be just a path, or can start with file:/, file:/// - * @param onestr path string - * @return - */ - private static URL urlFromPathString(String onestr) { - URL oneurl = null; - try { - if (onestr.startsWith("file:/")) { - oneurl = new URL(onestr); - } else { - oneurl = new File(onestr).toURL(); - } - } catch (Exception err) { - LOG.error("Bad URL " + onestr + ", ignoring path"); - } - return oneurl; - } - - /** - * Add new elements to the classpath. - * - * @param newPaths - * Array of classpath elements - */ - public static ClassLoader addToClassPath(ClassLoader cloader, String[] newPaths) throws Exception { - URLClassLoader loader = (URLClassLoader) cloader; - List curPath = Arrays.asList(loader.getURLs()); - ArrayList newPath = new ArrayList(curPath.size()); - - // get a list with the current classpath components - for (URL onePath : curPath) { - newPath.add(onePath); - } - curPath = newPath; - - for (String onestr : newPaths) { - URL oneurl = urlFromPathString(onestr); - if (oneurl != null && !curPath.contains(oneurl)) { - curPath.add(oneurl); - } - } - - return new URLClassLoader(curPath.toArray(new URL[0]), loader); - } - - // this function will merge csOld into csNew. - public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld) - throws InvalidObjectException { - List list = new ArrayList<>(); - if (csNew.getStatsObj().size() != csOld.getStatsObjSize()) { - // Some of the columns' stats are missing - // This implies partition schema has changed. We will merge columns - // present in both, overwrite stats for columns absent in metastore and - // leave alone columns stats missing from stats task. This last case may - // leave stats in stale state. This will be addressed later. - LOG.debug("New ColumnStats size is {}, but old ColumnStats size is {}", - csNew.getStatsObj().size(), csOld.getStatsObjSize()); - } - // In this case, we have to find out which columns can be merged. - Map map = new HashMap<>(); - // We build a hash map from colName to object for old ColumnStats. - for (ColumnStatisticsObj obj : csOld.getStatsObj()) { - map.put(obj.getColName(), obj); - } - for (int index = 0; index < csNew.getStatsObj().size(); index++) { - ColumnStatisticsObj statsObjNew = csNew.getStatsObj().get(index); - ColumnStatisticsObj statsObjOld = map.get(statsObjNew.getColName()); - if (statsObjOld != null) { - // If statsObjOld is found, we can merge. - ColumnStatsMerger merger = ColumnStatsMergerFactory.getColumnStatsMerger(statsObjNew, - statsObjOld); - merger.merge(statsObjNew, statsObjOld); - } - list.add(statsObjNew); - } - csNew.setStatsObj(list); - } - public static List getColumnNames(List schema) { List cols = new ArrayList<>(schema.size()); for (FieldSchema fs : schema) { @@ -1636,32 +1089,4 @@ public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld) } return cols; } - - /** - * Verify if the user is allowed to make DB notification related calls. - * Only the superusers defined in the Hadoop proxy user settings have the permission. - * - * @param user the short user name - * @param conf that contains the proxy user settings - * @return if the user has the permission - */ - public static boolean checkUserHasHostProxyPrivileges(String user, Configuration conf, String ipAddress) { - DefaultImpersonationProvider sip = ProxyUsers.getDefaultImpersonationProvider(); - // Just need to initialize the ProxyUsers for the first time, given that the conf will not change on the fly - if (sip == null) { - ProxyUsers.refreshSuperUserGroupsConfiguration(conf); - sip = ProxyUsers.getDefaultImpersonationProvider(); - } - Map> proxyHosts = sip.getProxyHosts(); - Collection hostEntries = proxyHosts.get(sip.getProxySuperuserIpConfKey(user)); - MachineList machineList = new MachineList(hostEntries); - ipAddress = (ipAddress == null) ? StringUtils.EMPTY : ipAddress; - return machineList.includes(ipAddress); - } - - /** Duplicates AcidUtils; used in a couple places in metastore. */ - public static boolean isInsertOnlyTableParam(Map params) { - String transactionalProp = params.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES); - return (transactionalProp != null && "insert_only".equalsIgnoreCase(transactionalProp)); - } } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java new file mode 100644 index 0000000000..80fae281cc --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.serde2.Deserializer; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.StringUtils; + +import java.util.List; + +public class SerDeStorageSchemaReader implements StorageSchemaReader { + @Override + public List readSchema(Table tbl, EnvironmentContext envContext, Configuration conf) + throws MetaException { + ClassLoader orgHiveLoader = null; + try { + if (envContext != null) { + String addedJars = envContext.getProperties().get("hive.added.jars.path"); + if (org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) { + //for thread safe + orgHiveLoader = conf.getClassLoader(); + ClassLoader loader = org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.addToClassPath( + orgHiveLoader, org.apache.commons.lang.StringUtils.split(addedJars, ",")); + conf.setClassLoader(loader); + } + } + + Deserializer s = MetaStoreUtils.getDeserializer(conf, tbl, false); + return MetaStoreUtils.getFieldsFromDeserializer(tbl.getTableName(), s); + } catch (Exception e) { + StringUtils.stringifyException(e); + throw new MetaException(e.getMessage()); + } finally { + if (orgHiveLoader != null) { + conf.setClassLoader(orgHiveLoader); + } + } + } +} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore2.java metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore2.java deleted file mode 100644 index fa4e02ac79..0000000000 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore2.java +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.metastore; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.messaging.EventMessage; -import org.apache.hadoop.hive.metastore.model.MNotificationLog; -import org.apache.hadoop.hive.metastore.model.MNotificationNextId; -import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.concurrent.BrokenBarrierException; -import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hive.metastore.TestOldSchema.dropAllStoreObjects; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -// Tests from TestObjectStore that can't be moved yet due to references to EventMessage. Once -// EventMessage has been moved this should be recombined with TestObjectStore. - -public class TestObjectStore2 { - private ObjectStore objectStore = null; - - public static class MockPartitionExpressionProxy implements PartitionExpressionProxy { - @Override - public String convertExprToFilter(byte[] expr) throws MetaException { - return null; - } - - @Override - public boolean filterPartitionsByExpr(List partColumns, - byte[] expr, String defaultPartitionName, List partitionNames) - throws MetaException { - return false; - } - - @Override - public FileMetadataExprType getMetadataType(String inputFormat) { - return null; - } - - @Override - public SearchArgument createSarg(byte[] expr) { - return null; - } - - @Override - public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) { - return null; - } - } - - @Before - public void setUp() throws Exception { - Configuration conf = MetastoreConf.newMetastoreConf(); - MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, - MockPartitionExpressionProxy.class.getName()); - - objectStore = new ObjectStore(); - objectStore.setConf(conf); - dropAllStoreObjects(objectStore); - } - - /** - * Test notification operations - */ - // TODO MS-SPLIT uncomment once we move EventMessage over - @Test - public void testNotificationOps() throws InterruptedException { - final int NO_EVENT_ID = 0; - final int FIRST_EVENT_ID = 1; - final int SECOND_EVENT_ID = 2; - - NotificationEvent event = - new NotificationEvent(0, 0, EventMessage.EventType.CREATE_DATABASE.toString(), ""); - NotificationEventResponse eventResponse; - CurrentNotificationEventId eventId; - - // Verify that there is no notifications available yet - eventId = objectStore.getCurrentNotificationEventId(); - assertEquals(NO_EVENT_ID, eventId.getEventId()); - - // Verify that addNotificationEvent() updates the NotificationEvent with the new event ID - objectStore.addNotificationEvent(event); - assertEquals(FIRST_EVENT_ID, event.getEventId()); - objectStore.addNotificationEvent(event); - assertEquals(SECOND_EVENT_ID, event.getEventId()); - - // Verify that objectStore fetches the latest notification event ID - eventId = objectStore.getCurrentNotificationEventId(); - assertEquals(SECOND_EVENT_ID, eventId.getEventId()); - - // Verify that getNextNotification() returns all events - eventResponse = objectStore.getNextNotification(new NotificationEventRequest()); - assertEquals(2, eventResponse.getEventsSize()); - assertEquals(FIRST_EVENT_ID, eventResponse.getEvents().get(0).getEventId()); - assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(1).getEventId()); - - // Verify that getNextNotification(last) returns events after a specified event - eventResponse = objectStore.getNextNotification(new NotificationEventRequest(FIRST_EVENT_ID)); - assertEquals(1, eventResponse.getEventsSize()); - assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(0).getEventId()); - - // Verify that getNextNotification(last) returns zero events if there are no more notifications available - eventResponse = objectStore.getNextNotification(new NotificationEventRequest(SECOND_EVENT_ID)); - assertEquals(0, eventResponse.getEventsSize()); - - // Verify that cleanNotificationEvents() cleans up all old notifications - Thread.sleep(1); - objectStore.cleanNotificationEvents(1); - eventResponse = objectStore.getNextNotification(new NotificationEventRequest()); - assertEquals(0, eventResponse.getEventsSize()); - } - - @Ignore( - "This test is here to allow testing with other databases like mysql / postgres etc\n" - + " with user changes to the code. This cannot be run on apache derby because of\n" - + " https://db.apache.org/derby/docs/10.10/devguide/cdevconcepts842385.html" - ) - @Test - public void testConcurrentAddNotifications() throws ExecutionException, InterruptedException { - - final int NUM_THREADS = 10; - CyclicBarrier cyclicBarrier = new CyclicBarrier(NUM_THREADS, - () -> LoggerFactory.getLogger("test") - .debug(NUM_THREADS + " threads going to add notification")); - - Configuration conf = MetastoreConf.newMetastoreConf(); - MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, - MockPartitionExpressionProxy.class.getName()); - /* - Below are the properties that need to be set based on what database this test is going to be run - */ - -// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver"); -// conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, -// "jdbc:mysql://localhost:3306/metastore_db"); -// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, ""); -// conf.setVar(HiveConf.ConfVars.METASTOREPWD, ""); - - /* - we have to add this one manually as for tests the db is initialized via the metastoreDiretSQL - and we don't run the schema creation sql that includes the an insert for notification_sequence - which can be locked. the entry in notification_sequence happens via notification_event insertion. - */ - objectStore.getPersistenceManager().newQuery(MNotificationLog.class, "eventType==''").execute(); - objectStore.getPersistenceManager().newQuery(MNotificationNextId.class, "nextEventId==-1").execute(); - - objectStore.addNotificationEvent( - new NotificationEvent(0, 0, - EventMessage.EventType.CREATE_DATABASE.toString(), - "CREATE DATABASE DB initial")); - - ExecutorService executorService = Executors.newFixedThreadPool(NUM_THREADS); - for (int i = 0; i < NUM_THREADS; i++) { - final int n = i; - - executorService.execute( - () -> { - ObjectStore store = new ObjectStore(); - store.setConf(conf); - - String eventType = EventMessage.EventType.CREATE_DATABASE.toString(); - NotificationEvent dbEvent = - new NotificationEvent(0, 0, eventType, - "CREATE DATABASE DB" + n); - System.out.println("ADDING NOTIFICATION"); - - try { - cyclicBarrier.await(); - } catch (InterruptedException | BrokenBarrierException e) { - throw new RuntimeException(e); - } - store.addNotificationEvent(dbEvent); - System.out.println("FINISH NOTIFICATION"); - }); - } - executorService.shutdown(); - assertTrue(executorService.awaitTermination(15, TimeUnit.SECONDS)); - - // we have to setup this again as the underlying PMF keeps getting reinitialized with original - // reference closed - ObjectStore store = new ObjectStore(); - store.setConf(conf); - - NotificationEventResponse eventResponse = store.getNextNotification( - new NotificationEventRequest()); - assertEquals(NUM_THREADS + 1, eventResponse.getEventsSize()); - long previousId = 0; - for (NotificationEvent event : eventResponse.getEvents()) { - assertTrue("previous:" + previousId + " current:" + event.getEventId(), - previousId < event.getEventId()); - assertTrue(previousId + 1 == event.getEventId()); - previousId = event.getEventId(); - } - } -} diff --git standalone-metastore/pom.xml standalone-metastore/pom.xml index a2a34a5c9a..eee652806c 100644 --- standalone-metastore/pom.xml +++ standalone-metastore/pom.xml @@ -522,6 +522,7 @@ org.antlr antlr3-maven-plugin + ${antlr.version} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultPartitionExpressionProxy.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultPartitionExpressionProxy.java new file mode 100644 index 0000000000..ec543be397 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultPartitionExpressionProxy.java @@ -0,0 +1,57 @@ +package org.apache.hadoop.hive.metastore; +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; + +import java.util.List; + +/** + * Default implementation of PartitionExpressionProxy. Eventually this should use the SARGs in + * Hive's storage-api. For now it just throws UnsupportedOperationException. + */ +public class DefaultPartitionExpressionProxy implements PartitionExpressionProxy { + @Override + public String convertExprToFilter(byte[] expr) throws MetaException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean filterPartitionsByExpr(List partColumns, byte[] expr, String + defaultPartitionName, List partitionNames) throws MetaException { + throw new UnsupportedOperationException(); + } + + @Override + public FileMetadataExprType getMetadataType(String inputFormat) { + throw new UnsupportedOperationException(); + } + + @Override + public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) { + throw new UnsupportedOperationException(); + } + + @Override + public SearchArgument createSarg(byte[] expr) { + throw new UnsupportedOperationException(); + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultStorageSchemaReader.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultStorageSchemaReader.java new file mode 100644 index 0000000000..1dbfa4272c --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultStorageSchemaReader.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Table; + +import java.util.List; + +/** + * Default StorageSchemaReader. This just throws as the metastore currently doesn't know how to + * read schemas from storage. + */ +public class DefaultStorageSchemaReader implements StorageSchemaReader { + @Override + public List readSchema(Table tbl, EnvironmentContext envContext, + Configuration conf) throws MetaException { + throw new UnsupportedOperationException("Storage schema reading not supported"); + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java similarity index 90% rename from metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 8a55305647..791f549a1a 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -1,4 +1,4 @@ -/** * Licensed to the Apache Software Foundation (ASF) under one +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file @@ -20,9 +20,10 @@ import static org.apache.commons.lang.StringUtils.join; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_COMMENT; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName; import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.security.PrivilegedExceptionAction; import java.util.AbstractMap; @@ -66,21 +67,12 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.common.LogUtils; -import org.apache.hadoop.hive.common.LogUtils.LogInitializationException; import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.common.auth.HiveAuthUtils; -import org.apache.hadoop.hive.common.classification.InterfaceAudience; -import org.apache.hadoop.hive.common.classification.InterfaceStability; -import org.apache.hadoop.hive.common.cli.CommonCliOptions; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.io.HdfsUtils; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.events.AddForeignKeyEvent; import org.apache.hadoop.hive.metastore.cache.CachedStore; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.events.AddIndexEvent; import org.apache.hadoop.hive.metastore.events.AddNotNullConstraintEvent; import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; @@ -119,7 +111,6 @@ import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent; import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent; import org.apache.hadoop.hive.metastore.events.PreReadTableEvent; -import org.apache.hadoop.hive.metastore.filemeta.OrcFileMetadataHandler; import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType; import org.apache.hadoop.hive.metastore.metrics.JvmPauseMonitor; import org.apache.hadoop.hive.metastore.metrics.Metrics; @@ -136,15 +127,19 @@ import org.apache.hadoop.hive.metastore.txn.AcidWriteSetService; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; -import org.apache.hadoop.hive.serde2.Deserializer; -import org.apache.hadoop.hive.serde2.SerDeException; -import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.hive.metastore.utils.CommonCliOptions; +import org.apache.hadoop.hive.metastore.utils.FileUtils; +import org.apache.hadoop.hive.metastore.utils.HdfsUtils; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; +import org.apache.hadoop.hive.metastore.utils.LogUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.utils.MetastoreVersionInfo; +import org.apache.hadoop.hive.metastore.utils.SecurityUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; -import org.apache.hive.common.util.HiveStringUtils; -import org.apache.hive.common.util.ShutdownHookManager; import org.apache.thrift.TException; import org.apache.thrift.TProcessor; import org.apache.thrift.protocol.TBinaryProtocol; @@ -189,6 +184,8 @@ @VisibleForTesting static long TEST_TIMEOUT_VALUE = -1; + private static ShutdownHookManager shutdownHookMgr; + public static final String ADMIN = "admin"; public static final String PUBLIC = "public"; /** MM write states. */ @@ -198,8 +195,8 @@ private static MetastoreDelegationTokenManager delegationTokenManager; private static boolean useSasl; - public static final String NO_FILTER_STRING = ""; - public static final int UNLIMITED_MAX_PARTITIONS = -1; + static final String NO_FILTER_STRING = ""; + static final int UNLIMITED_MAX_PARTITIONS = -1; private static final class ChainedTTransportFactory extends TTransportFactory { private final TTransportFactory parentTransFactory; @@ -220,12 +217,13 @@ public TTransport getTransport(TTransport trans) { public static class HMSHandler extends FacebookBase implements IHMSHandler { public static final Logger LOG = HiveMetaStore.LOG; - private final HiveConf hiveConf; // stores datastore (jpox) properties, + private final Configuration conf; // stores datastore (jpox) properties, // right now they come from jpox.properties private static String currentUrl; private FileMetadataManager fileMetadataManager; private PartitionExpressionProxy expressionProxy; + private StorageSchemaReader storageSchemaReader; // Variables for metrics // Package visible so that HMSMetricsListener can see them. @@ -259,7 +257,7 @@ public static RawStore getRawStore() { return threadLocalMS.get(); } - public static void removeRawStore() { + static void removeRawStore() { threadLocalMS.remove(); } @@ -286,17 +284,17 @@ protected Configuration initialValue() { private static ExecutorService threadPool; - public static final Logger auditLog = LoggerFactory.getLogger( + static final Logger auditLog = LoggerFactory.getLogger( HiveMetaStore.class.getName() + ".audit"); - private static final void logAuditEvent(String cmd) { + private static void logAuditEvent(String cmd) { if (cmd == null) { return; } UserGroupInformation ugi; try { - ugi = Utils.getUGI(); + ugi = SecurityUtils.getUGI(); } catch (Exception ex) { throw new RuntimeException(ex); } @@ -325,7 +323,7 @@ private static String getIPAddress() { private static ThreadLocal threadLocalId = new ThreadLocal() { @Override protected Integer initialValue() { - return new Integer(nextSerialNum++); + return nextSerialNum++; } }; @@ -388,17 +386,24 @@ private void notifyMetaListenersOnShutDown() { } } - public static void setThreadLocalIpAddress(String ipAddress) { + static void setThreadLocalIpAddress(String ipAddress) { threadLocalIpAddress.set(ipAddress); } // This will return null if the metastore is not being accessed from a metastore Thrift server, // or if the TTransport being used to connect is not an instance of TSocket, or if kereberos // is used - public static String getThreadLocalIpAddress() { + static String getThreadLocalIpAddress() { return threadLocalIpAddress.get(); } + // Make it possible for tests to check that the right type of PartitionExpressionProxy was + // instantiated. + @VisibleForTesting + PartitionExpressionProxy getExpressionProxy() { + return expressionProxy; + } + /** * Use {@link #getThreadId()} instead. * @return thread id @@ -414,21 +419,20 @@ public int getThreadId() { } public HMSHandler(String name) throws MetaException { - this(name, new HiveConf(HMSHandler.class), true); + this(name, MetastoreConf.newMetastoreConf(), true); } - public HMSHandler(String name, HiveConf conf) throws MetaException { + public HMSHandler(String name, Configuration conf) throws MetaException { this(name, conf, true); } - public HMSHandler(String name, HiveConf conf, boolean init) throws MetaException { + public HMSHandler(String name, Configuration conf, boolean init) throws MetaException { super(name); - hiveConf = conf; - isInTest = HiveConf.getBoolVar(hiveConf, ConfVars.HIVE_IN_TEST); + this.conf = conf; + isInTest = MetastoreConf.getBoolVar(this.conf, ConfVars.HIVE_IN_TEST); synchronized (HMSHandler.class) { if (threadPool == null) { - int numThreads = HiveConf.getIntVar(conf, - ConfVars.METASTORE_FS_HANDLER_THREADS_COUNT); + int numThreads = MetastoreConf.getIntVar(conf, ConfVars.FS_HANDLER_THREADS_COUNT); threadPool = Executors.newFixedThreadPool(numThreads, new ThreadFactoryBuilder().setDaemon(true) .setNameFormat("HMSHandler #%d").build()); @@ -439,8 +443,13 @@ public HMSHandler(String name, HiveConf conf, boolean init) throws MetaException } } - public HiveConf getHiveConf() { - return hiveConf; + /** + * Use {@link #getConf()} instead. + * @return Configuration object + */ + @Deprecated + public Configuration getHiveConf() { + return conf; } private ClassLoader classLoader; @@ -468,32 +477,30 @@ public HiveConf getHiveConf() { @Override public void init() throws MetaException { initListeners = MetaStoreUtils.getMetaStoreListeners( - MetaStoreInitListener.class, hiveConf, - hiveConf.getVar(HiveConf.ConfVars.METASTORE_INIT_HOOKS)); + MetaStoreInitListener.class, conf, MetastoreConf.getVar(conf, ConfVars.INIT_HOOKS)); for (MetaStoreInitListener singleInitListener: initListeners) { MetaStoreInitContext context = new MetaStoreInitContext(); singleInitListener.onInit(context); } - String alterHandlerName = hiveConf.get("hive.metastore.alter.impl", - HiveAlterHandler.class.getName()); - alterHandler = (AlterHandler) ReflectionUtils.newInstance(MetaStoreUtils.getClass( - alterHandlerName), hiveConf); - wh = new Warehouse(hiveConf); + String alterHandlerName = MetastoreConf.getVar(conf, ConfVars.ALTER_HANDLER); + alterHandler = ReflectionUtils.newInstance(JavaUtils.getClass( + alterHandlerName, AlterHandler.class), conf); + wh = new Warehouse(conf); synchronized (HMSHandler.class) { - if (currentUrl == null || !currentUrl.equals(MetaStoreInit.getConnectionURL(hiveConf))) { + if (currentUrl == null || !currentUrl.equals(MetaStoreInit.getConnectionURL(conf))) { createDefaultDB(); createDefaultRoles(); addAdminUsers(); - currentUrl = MetaStoreInit.getConnectionURL(hiveConf); + currentUrl = MetaStoreInit.getConnectionURL(conf); } } //Start Metrics - if (hiveConf.getBoolVar(ConfVars.METASTORE_METRICS)) { + if (MetastoreConf.getBoolVar(conf, ConfVars.METRICS_ENABLED)) { LOG.info("Begin calculating metadata count metrics."); - Metrics.initialize(hiveConf); + Metrics.initialize(conf); databaseCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_DATABASES); tableCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_TABLES); partCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_PARTITIONS); @@ -502,46 +509,46 @@ public void init() throws MetaException { } preListeners = MetaStoreUtils.getMetaStoreListeners(MetaStorePreEventListener.class, - hiveConf, - hiveConf.getVar(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS)); - preListeners.add(0, new TransactionalValidationListener(hiveConf)); - listeners = MetaStoreUtils.getMetaStoreListeners(MetaStoreEventListener.class, hiveConf, - hiveConf.getVar(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS)); - listeners.add(new SessionPropertiesListener(hiveConf)); - listeners.add(new AcidEventListener(hiveConf)); - transactionalListeners = MetaStoreUtils.getMetaStoreListeners(TransactionalMetaStoreEventListener.class,hiveConf, - hiveConf.getVar(ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS)); + conf, MetastoreConf.getVar(conf, ConfVars.PRE_EVENT_LISTENERS)); + preListeners.add(0, new TransactionalValidationListener(conf)); + listeners = MetaStoreUtils.getMetaStoreListeners(MetaStoreEventListener.class, conf, + MetastoreConf.getVar(conf, ConfVars.EVENT_LISTENERS)); + listeners.add(new SessionPropertiesListener(conf)); + listeners.add(new AcidEventListener(conf)); + transactionalListeners = MetaStoreUtils.getMetaStoreListeners(TransactionalMetaStoreEventListener.class, + conf, MetastoreConf.getVar(conf, ConfVars.TRANSACTIONAL_EVENT_LISTENERS)); if (Metrics.getRegistry() != null) { - listeners.add(new HMSMetricsListener(hiveConf)); + listeners.add(new HMSMetricsListener(conf)); } endFunctionListeners = MetaStoreUtils.getMetaStoreListeners( - MetaStoreEndFunctionListener.class, hiveConf, - hiveConf.getVar(HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS)); + MetaStoreEndFunctionListener.class, conf, MetastoreConf.getVar(conf, ConfVars.END_FUNCTION_LISTENERS)); String partitionValidationRegex = - hiveConf.getVar(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN); + MetastoreConf.getVar(conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN); if (partitionValidationRegex != null && !partitionValidationRegex.isEmpty()) { partitionValidationPattern = Pattern.compile(partitionValidationRegex); } else { partitionValidationPattern = null; } - long cleanFreq = hiveConf.getTimeVar(ConfVars.METASTORE_EVENT_CLEAN_FREQ, TimeUnit.MILLISECONDS); + ThreadPool.initialize(conf); + long cleanFreq = MetastoreConf.getTimeVar(conf, ConfVars.EVENT_CLEAN_FREQ, TimeUnit.MILLISECONDS); if (cleanFreq > 0) { - // In default config, there is no timer. - Timer cleaner = new Timer("Metastore Events Cleaner Thread", true); - cleaner.schedule(new EventCleanerTask(this), cleanFreq, cleanFreq); + ThreadPool.getPool().scheduleAtFixedRate(new EventCleanerTask(this), cleanFreq, + cleanFreq, TimeUnit.MILLISECONDS); } - cleanFreq = hiveConf.getTimeVar(ConfVars.REPL_DUMPDIR_CLEAN_FREQ, TimeUnit.MILLISECONDS); + cleanFreq = MetastoreConf.getTimeVar(conf, ConfVars.REPL_DUMPDIR_CLEAN_FREQ, + TimeUnit.MILLISECONDS); if (cleanFreq > 0) { - // In default config, there is no timer. - Timer cleaner = new Timer("Repl Dump Dir Cleaner Thread", true); - cleaner.schedule(new DumpDirCleanerTask(hiveConf), cleanFreq, cleanFreq); + DumpDirCleanerTask ddc = new DumpDirCleanerTask(); + ddc.setConf(conf); + ThreadPool.getPool().scheduleAtFixedRate(ddc, cleanFreq, cleanFreq, + TimeUnit.MILLISECONDS); } - expressionProxy = PartFilterExprUtil.createExpressionProxy(hiveConf); - fileMetadataManager = new FileMetadataManager(this.getMS(), hiveConf); + expressionProxy = PartFilterExprUtil.createExpressionProxy(conf); + fileMetadataManager = new FileMetadataManager(this.getMS(), conf); } private static String addPrefix(String s) { @@ -569,7 +576,7 @@ public void setConf(Configuration conf) { public Configuration getConf() { Configuration conf = threadLocalConf.get(); if (conf == null) { - conf = new Configuration(hiveConf); + conf = new Configuration(this.conf); threadLocalConf.set(conf); } return conf; @@ -578,7 +585,7 @@ public Configuration getConf() { private Map getModifiedConf() { Map modifiedConf = threadLocalModifiedConfig.get(); if (modifiedConf == null) { - modifiedConf = new HashMap(); + modifiedConf = new HashMap<>(); threadLocalModifiedConfig.set(modifiedConf); } return modifiedConf; @@ -591,17 +598,18 @@ public Warehouse getWh() { @Override public void setMetaConf(String key, String value) throws MetaException { - ConfVars confVar = HiveConf.getMetaConf(key); + ConfVars confVar = MetastoreConf.getMetaConf(key); if (confVar == null) { throw new MetaException("Invalid configuration key " + key); } - String validate = confVar.validate(value); - if (validate != null) { + try { + confVar.validate(value); + } catch (IllegalArgumentException e) { throw new MetaException("Invalid configuration value " + value + " for key " + key + - " by " + validate); + " by " + e.getMessage()); } Configuration configuration = getConf(); - String oldValue = configuration.get(key); + String oldValue = MetastoreConf.get(configuration, key); // Save prev val of the key on threadLocal Map modifiedConf = getModifiedConf(); if (!modifiedConf.containsKey(key)) { @@ -616,11 +624,11 @@ public void setMetaConf(String key, String value) throws MetaException { @Override public String getMetaConf(String key) throws MetaException { - ConfVars confVar = HiveConf.getMetaConf(key); + ConfVars confVar = MetastoreConf.getMetaConf(key); if (confVar == null) { throw new MetaException("Invalid configuration key " + key); } - return getConf().get(key, confVar.getDefaultValue()); + return getConf().get(key, confVar.getDefaultVal().toString()); } /** @@ -629,8 +637,6 @@ public String getMetaConf(String key) throws MetaException { * @return the cached RawStore * @throws MetaException */ - @InterfaceAudience.LimitedPrivate({"HCATALOG"}) - @InterfaceStability.Evolving @Override public RawStore getMS() throws MetaException { Configuration conf = getConf(); @@ -651,30 +657,17 @@ public static RawStore getMSForConf(Configuration conf) throws MetaException { private TxnStore getTxnHandler() { TxnStore txn = threadLocalTxn.get(); if (txn == null) { - txn = TxnUtils.getTxnStore(hiveConf); + txn = TxnUtils.getTxnStore(conf); threadLocalTxn.set(txn); } return txn; } private static RawStore newRawStoreForConf(Configuration conf) throws MetaException { - HiveConf hiveConf = new HiveConf(conf, HiveConf.class); - String rawStoreClassName = hiveConf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL); + Configuration newConf = new Configuration(conf); + String rawStoreClassName = MetastoreConf.getVar(newConf, ConfVars.RAW_STORE_IMPL); LOG.info(addPrefix("Opening raw store with implementation class:" + rawStoreClassName)); - if (hiveConf.getBoolVar(ConfVars.METASTORE_FASTPATH)) { - LOG.info("Fastpath, skipping raw store proxy"); - try { - RawStore rs = - ((Class) MetaStoreUtils.getClass(rawStoreClassName)) - .newInstance(); - rs.setConf(hiveConf); - return rs; - } catch (Exception e) { - LOG.error("Unable to instantiate raw store directly in fastpath mode", e); - throw new RuntimeException(e); - } - } - return RawStoreProxy.getProxy(hiveConf, conf, rawStoreClassName, threadLocalId.get()); + return RawStoreProxy.getProxy(newConf, conf, rawStoreClassName, threadLocalId.get()); } private void createDefaultDB_core(RawStore ms) throws MetaException, InvalidObjectException { @@ -791,7 +784,7 @@ private void addAdminUsers() throws MetaException { private void addAdminUsers_core() throws MetaException { // now add pre-configured users to admin role - String userStr = HiveConf.getVar(hiveConf,ConfVars.USERS_IN_ADMIN_ROLE,"").trim(); + String userStr = MetastoreConf.getVar(conf,ConfVars.USERS_IN_ADMIN_ROLE,"").trim(); if (userStr.isEmpty()) { LOG.info("No user is added in admin role, since config is empty"); return; @@ -850,24 +843,23 @@ private String startFunction(String function) { return startFunction(function, ""); } - private String startTableFunction(String function, String db, String tbl) { - return startFunction(function, " : db=" + db + " tbl=" + tbl); + private void startTableFunction(String function, String db, String tbl) { + startFunction(function, " : db=" + db + " tbl=" + tbl); } - private String startMultiTableFunction(String function, String db, List tbls) { + private void startMultiTableFunction(String function, String db, List tbls) { String tableNames = join(tbls, ","); - return startFunction(function, " : db=" + db + " tbls=" + tableNames); + startFunction(function, " : db=" + db + " tbls=" + tableNames); } - private String startPartitionFunction(String function, String db, String tbl, - List partVals) { - return startFunction(function, " : db=" + db + " tbl=" + tbl - + "[" + join(partVals, ",") + "]"); + private void startPartitionFunction(String function, String db, String tbl, + List partVals) { + startFunction(function, " : db=" + db + " tbl=" + tbl + "[" + join(partVals, ",") + "]"); } - private String startPartitionFunction(String function, String db, String tbl, - Map partName) { - return startFunction(function, " : db=" + db + " tbl=" + tbl + "partition=" + partName); + private void startPartitionFunction(String function, String db, String tbl, + Map partName) { + startFunction(function, " : db=" + db + " tbl=" + tbl + "partition=" + partName); } private void endFunction(String function, boolean successful, Exception e) { @@ -918,7 +910,7 @@ public void shutdown() { private void create_database_core(RawStore ms, final Database db) throws AlreadyExistsException, InvalidObjectException, MetaException { - if (!validateName(db.getName(), null)) { + if (!MetaStoreUtils.validateName(db.getName(), null)) { throw new InvalidObjectException(db.getName() + " is not a valid database name"); } @@ -1020,10 +1012,7 @@ public Database get_database(final String name) throws NoSuchObjectException, Me try { db = get_database_core(name); firePreEvent(new PreReadDatabaseEvent(db, this)); - } catch (MetaException e) { - ex = e; - throw e; - } catch (NoSuchObjectException e) { + } catch (MetaException|NoSuchObjectException e) { ex = e; throw e; } finally { @@ -1038,9 +1027,7 @@ public Database get_database_core(final String name) throws NoSuchObjectExceptio Database db = null; try { db = getMS().getDatabase(name); - } catch (MetaException e) { - throw e; - } catch (NoSuchObjectException e) { + } catch (MetaException | NoSuchObjectException e) { throw e; } catch (Exception e) { assert (e instanceof RuntimeException); @@ -1050,8 +1037,7 @@ public Database get_database_core(final String name) throws NoSuchObjectExceptio } @Override - public void alter_database(final String dbName, final Database newDB) - throws NoSuchObjectException, TException, MetaException { + public void alter_database(final String dbName, final Database newDB) throws TException { startFunction("alter_database" + dbName); boolean success = false; Exception ex = null; @@ -1083,8 +1069,8 @@ private void drop_database_core(RawStore ms, IOException, InvalidObjectException, InvalidInputException { boolean success = false; Database db = null; - List tablePaths = new ArrayList(); - List partitionPaths = new ArrayList(); + List tablePaths = new ArrayList<>(); + List partitionPaths = new ArrayList<>(); Map transactionalListenerResponses = Collections.emptyMap(); try { ms.openTransaction(); @@ -1109,7 +1095,7 @@ private void drop_database_core(RawStore ms, if (!wh.isWritable(path)) { throw new MetaException("Database not dropped since " + path + " is not writable by " + - hiveConf.getUser()); + SecurityUtils.getUser()); } Path databasePath = wh.getDnsPath(wh.getDatabasePath(db)); @@ -1120,15 +1106,15 @@ private void drop_database_core(RawStore ms, } // drop tables before dropping db - int tableBatchSize = HiveConf.getIntVar(hiveConf, - ConfVars.METASTORE_BATCH_RETRIEVE_MAX); + int tableBatchSize = MetastoreConf.getIntVar(conf, + ConfVars.BATCH_RETRIEVE_MAX); int startIndex = 0; // retrieve the tables from the metastore in batches to alleviate memory constraints while (startIndex < allTables.size()) { int endIndex = Math.min(startIndex + tableBatchSize, allTables.size()); - List tables = null; + List
tables; try { tables = ms.getTableObjectsByName(name, allTables.subList(startIndex, endIndex)); } catch (UnknownDBException e) { @@ -1146,7 +1132,7 @@ private void drop_database_core(RawStore ms, if (!wh.isWritable(tablePath.getParent())) { throw new MetaException("Database metadata not deleted since table: " + table.getTableName() + " has a parent location " + tablePath.getParent() + - " which is not writable by " + hiveConf.getUser()); + " which is not writable by " + SecurityUtils.getUser()); } if (!isSubdirectory(databasePath, tablePath)) { @@ -1409,7 +1395,7 @@ private void create_table_core(final RawStore ms, final Table tbl, List notNullConstraints) throws AlreadyExistsException, MetaException, InvalidObjectException, NoSuchObjectException { - if (!MetaStoreUtils.validateName(tbl.getTableName(), hiveConf)) { + if (!MetaStoreUtils.validateName(tbl.getTableName(), conf)) { throw new InvalidObjectException(tbl.getTableName() + " is not a valid object name"); } @@ -1479,7 +1465,7 @@ private void create_table_core(final RawStore ms, final Table tbl, madeDir = true; } } - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) && + if (MetastoreConf.getBoolVar(conf, ConfVars.STATS_AUTO_GATHER) && !MetaStoreUtils.isView(tbl)) { MetaStoreUtils.updateTableStatsFast(db, tbl, wh, madeDir, envContext); } @@ -1660,7 +1646,7 @@ public void drop_constraint(DropConstraintRequest req) String dbName = req.getDbname(); String tableName = req.getTablename(); String constraintName = req.getConstraintname(); - startFunction("drop_constraint", ": " + constraintName.toString()); + startFunction("drop_constraint", ": " + constraintName); boolean success = false; Exception ex = null; RawStore ms = getMS(); @@ -1682,8 +1668,6 @@ public void drop_constraint(DropConstraintRequest req) ex = e; if (e instanceof MetaException) { throw (MetaException) e; - } else if (e instanceof InvalidObjectException) { - throw (InvalidObjectException) e; } else { throw newMetaException(e); } @@ -1965,7 +1949,7 @@ private boolean drop_table_core(final RawStore ms, final String dbname, final St String target = indexName == null ? "Table" : "Index table"; throw new MetaException(target + " metadata not deleted since " + tblPath.getParent() + " is not writable by " + - hiveConf.getUser()); + SecurityUtils.getUser()); } } @@ -2094,13 +2078,13 @@ private void deletePartitionData(List partPaths, boolean ifPurge) { String tableName, Path tablePath, List partitionKeys, boolean checkLocation) throws MetaException, IOException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - int partitionBatchSize = HiveConf.getIntVar(hiveConf, - ConfVars.METASTORE_BATCH_RETRIEVE_MAX); + int partitionBatchSize = MetastoreConf.getIntVar(conf, + ConfVars.BATCH_RETRIEVE_MAX); Path tableDnsPath = null; if (tablePath != null) { tableDnsPath = wh.getDnsPath(tablePath); } - List partPaths = new ArrayList(); + List partPaths = new ArrayList<>(); Table tbl = ms.getTable(dbName, tableName); // call dropPartition on each of the table's partitions to follow the @@ -2110,7 +2094,7 @@ private void deletePartitionData(List partPaths, boolean ifPurge) { if (partsToDelete == null || partsToDelete.isEmpty()) { break; } - List partNames = new ArrayList(); + List partNames = new ArrayList<>(); for (Partition part : partsToDelete) { if (checkLocation && part.getSd() != null && part.getSd().getLocation() != null) { @@ -2122,7 +2106,7 @@ private void deletePartitionData(List partPaths, boolean ifPurge) { throw new MetaException("Table metadata not deleted since the partition " + Warehouse.makePartName(partitionKeys, part.getValues()) + " has parent location " + partPath.getParent() + " which is not writable " + - "by " + hiveConf.getUser()); + "by " + SecurityUtils.getUser()); } partPaths.add(partPath); } @@ -2255,7 +2239,7 @@ private void alterTableStatsForTruncate(final RawStore ms, final String tableName, final Table table, final List partNames) throws Exception { - List locations = new ArrayList(); + List locations = new ArrayList<>(); if (partNames == null) { if (0 != table.getPartitionKeysSize()) { for (Partition partition : ms.getPartitions(dbName, tableName, Integer.MAX_VALUE)) { @@ -2287,15 +2271,15 @@ public void truncate_table(final String dbName, final String tableName, List tables = new ArrayList
(); + List
tables = new ArrayList<>(); startMultiTableFunction("get_multi_table", dbName, tableNames); Exception ex = null; - int tableBatchSize = HiveConf.getIntVar(hiveConf, - ConfVars.METASTORE_BATCH_RETRIEVE_MAX); + int tableBatchSize = MetastoreConf.getIntVar(conf, + ConfVars.BATCH_RETRIEVE_MAX); try { if (dbName == null || dbName.isEmpty()) { @@ -2469,11 +2450,11 @@ public GetTablesResult get_table_objects_by_name_req(GetTablesRequest req) throw // to break into multiple batches, remove duplicates first. List distinctTableNames = tableNames; if (distinctTableNames.size() > tableBatchSize) { - List lowercaseTableNames = new ArrayList(); + List lowercaseTableNames = new ArrayList<>(); for (String tableName : tableNames) { - lowercaseTableNames.add(HiveStringUtils.normalizeIdentifier(tableName)); + lowercaseTableNames.add(org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier(tableName)); } - distinctTableNames = new ArrayList(new HashSet(lowercaseTableNames)); + distinctTableNames = new ArrayList<>(new HashSet<>(lowercaseTableNames)); } RawStore ms = getMS(); @@ -2512,7 +2493,7 @@ private void assertClientHasCapability(ClientCapabilities client, ClientCapability value, String what, String call) throws MetaException { if (!doesClientHaveCapability(client, value)) { throw new MetaException("Your client does not appear to support " + what + ". To skip" - + " capability checks, please set " + ConfVars.METASTORE_CAPABILITY_CHECK.varname + + " capability checks, please set " + ConfVars.CAPABILITY_CHECK.toString() + " to false. This setting can be set globally, or on the client for the current" + " metastore session. Note that this may lead to incorrect results, data loss," + " undefined behavior, etc. if your client is actually incompatible. You can also" @@ -2521,7 +2502,7 @@ private void assertClientHasCapability(ClientCapabilities client, } private boolean doesClientHaveCapability(ClientCapabilities client, ClientCapability value) { - if (!HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_CAPABILITY_CHECK)) return true; + if (!MetastoreConf.getBoolVar(getConf(), ConfVars.CAPABILITY_CHECK)) return true; return (client != null && client.isSetValues() && client.getValues().contains(value)); } @@ -2591,7 +2572,7 @@ private Partition append_partition_common(RawStore ms, String dbName, String tab .makePartName(tbl.getPartitionKeys(), part_vals)); part.getSd().setLocation(partLocation.toString()); - Partition old_part = null; + Partition old_part; try { old_part = ms.getPartition(part.getDbName(), part .getTableName(), part.getValues()); @@ -2616,7 +2597,7 @@ private Partition append_partition_common(RawStore ms, String dbName, String tab part.setCreateTime((int) time); part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time)); - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) && + if (MetastoreConf.getBoolVar(conf, ConfVars.STATS_AUTO_GATHER) && !MetaStoreUtils.isView(tbl)) { MetaStoreUtils.updatePartitionStatsFast(part, wh, madeDir, envContext); } @@ -2705,7 +2686,7 @@ public Partition append_partition_with_environment_context(final String dbName, private static class PartValEqWrapper { Partition partition; - public PartValEqWrapper(Partition partition) { + PartValEqWrapper(Partition partition) { this.partition = partition; } @@ -2739,7 +2720,7 @@ public boolean equals(Object obj) { List values; String location; - public PartValEqWrapperLite(Partition partition) { + PartValEqWrapperLite(Partition partition) { this.values = partition.isSetValues()? partition.getValues() : null; this.location = partition.getSd().getLocation(); } @@ -2783,14 +2764,14 @@ public boolean equals(Object obj) { private List add_partitions_core(final RawStore ms, String dbName, String tblName, List parts, final boolean ifNotExists) - throws MetaException, InvalidObjectException, AlreadyExistsException, TException { + throws TException { logInfo("add_partitions"); boolean success = false; // Ensures that the list doesn't have dups, and keeps track of directories we have created. final Map addedPartitions = Collections.synchronizedMap(new HashMap()); - final List newParts = new ArrayList(); - final List existingParts = new ArrayList(); + final List newParts = new ArrayList<>(); + final List existingParts = new ArrayList<>(); Table tbl = null; Map transactionalListenerResponses = Collections.emptyMap(); @@ -2926,7 +2907,7 @@ public Object run() throws Exception { @Override public AddPartitionsResult add_partitions_req(AddPartitionsRequest request) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + throws TException { AddPartitionsResult result = new AddPartitionsResult(); if (request.getParts().isEmpty()) { return result; @@ -3110,7 +3091,7 @@ public Partition run() throws Exception { } private boolean startAddPartition( - RawStore ms, Partition part, boolean ifNotExists) throws MetaException, TException { + RawStore ms, Partition part, boolean ifNotExists) throws TException { MetaStoreUtils.validatePartitionNameCharacters(part.getValues(), partitionValidationPattern); boolean doesExist = ms.doesPartitionExist( @@ -3174,7 +3155,7 @@ private void initializeAddedPartition( private void initializeAddedPartition( final Table tbl, final PartitionSpecProxy.PartitionIterator part, boolean madeDir) throws MetaException { - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) && + if (MetastoreConf.getBoolVar(conf, ConfVars.STATS_AUTO_GATHER) && !MetaStoreUtils.isView(tbl)) { MetaStoreUtils.updatePartitionStatsFast(part, wh, madeDir, false, null); } @@ -3189,10 +3170,10 @@ private void initializeAddedPartition( // Inherit table properties into partition properties. Map tblParams = tbl.getParameters(); - String inheritProps = hiveConf.getVar(ConfVars.METASTORE_PART_INHERIT_TBL_PROPS).trim(); + String inheritProps = MetastoreConf.getVar(conf, ConfVars.PART_INHERIT_TBL_PROPS).trim(); // Default value is empty string in which case no properties will be inherited. // * implies all properties needs to be inherited - Set inheritKeys = new HashSet(Arrays.asList(inheritProps.split(","))); + Set inheritKeys = new HashSet<>(Arrays.asList(inheritProps.split(","))); if (inheritKeys.contains("*")) { inheritKeys = tblParams.keySet(); } @@ -3207,7 +3188,7 @@ private void initializeAddedPartition( private Partition add_partition_core(final RawStore ms, final Partition part, final EnvironmentContext envContext) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + throws TException { boolean success = false; Table tbl = null; Map transactionalListenerResponses = Collections.emptyMap(); @@ -3302,8 +3283,7 @@ public Partition add_partition_with_environment_context( @Override public Partition exchange_partition(Map partitionSpecs, String sourceDbName, String sourceTableName, String destDbName, - String destTableName) throws MetaException, NoSuchObjectException, - InvalidObjectException, InvalidInputException, TException { + String destTableName) throws TException { exchange_partitions(partitionSpecs, sourceDbName, sourceTableName, destDbName, destTableName); return new Partition(); } @@ -3311,8 +3291,7 @@ public Partition exchange_partition(Map partitionSpecs, @Override public List exchange_partitions(Map partitionSpecs, String sourceDbName, String sourceTableName, String destDbName, - String destTableName) throws MetaException, NoSuchObjectException, - InvalidObjectException, InvalidInputException, TException { + String destTableName) throws TException { boolean success = false; boolean pathCreated = false; RawStore ms = getMS(); @@ -3321,8 +3300,8 @@ public Partition exchange_partition(Map partitionSpecs, Table sourceTable = ms.getTable(sourceDbName, sourceTableName); List partVals = MetaStoreUtils.getPvals(sourceTable.getPartitionKeys(), partitionSpecs); - List partValsPresent = new ArrayList (); - List partitionKeysPresent = new ArrayList (); + List partValsPresent = new ArrayList<> (); + List partitionKeysPresent = new ArrayList<> (); int i = 0; for (FieldSchema fs: sourceTable.getPartitionKeys()) { String partVal = partVals.get(i); @@ -3346,7 +3325,7 @@ public Partition exchange_partition(Map partitionSpecs, Warehouse.makePartName(partitionKeysPresent, partValsPresent)); Path destPath = new Path(destinationTable.getSd().getLocation(), Warehouse.makePartName(partitionKeysPresent, partValsPresent)); - List destPartitions = new ArrayList(); + List destPartitions = new ArrayList<>(); Map transactionalListenerResponsesForAddPartition = Collections.emptyMap(); List> transactionalListenerResponsesForDropPartition = @@ -3371,7 +3350,7 @@ public Partition exchange_partition(Map partitionSpecs, throw new MetaException("Unable to create path " + destParentPath); } } - /** + /* * TODO: Use the hard link feature of hdfs * once https://issues.apache.org/jira/browse/HDFS-3370 is done */ @@ -3544,31 +3523,31 @@ private void deleteParentRecursive(Path parent, int depth, boolean mustPurge) th @Override public boolean drop_partition(final String db_name, final String tbl_name, final List part_vals, final boolean deleteData) - throws NoSuchObjectException, MetaException, TException { + throws TException { return drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, null); } private static class PathAndPartValSize { - public PathAndPartValSize(Path path, int partValSize) { + PathAndPartValSize(Path path, int partValSize) { this.path = path; this.partValSize = partValSize; } public Path path; - public int partValSize; + int partValSize; } @Override public DropPartitionsResult drop_partitions_req( - DropPartitionsRequest request) throws MetaException, NoSuchObjectException, TException { + DropPartitionsRequest request) throws TException { RawStore ms = getMS(); String dbName = request.getDbName(), tblName = request.getTblName(); boolean ifExists = request.isSetIfExists() && request.isIfExists(); boolean deleteData = request.isSetDeleteData() && request.isDeleteData(); boolean ignoreProtection = request.isSetIgnoreProtection() && request.isIgnoreProtection(); boolean needResult = !request.isSetNeedResult() || request.isNeedResult(); - List dirsToDelete = new ArrayList(); - List archToDelete = new ArrayList(); + List dirsToDelete = new ArrayList<>(); + List archToDelete = new ArrayList<>(); EnvironmentContext envContext = request.isSetEnvironmentContext() ? request.getEnvironmentContext() : null; @@ -3590,10 +3569,10 @@ public DropPartitionsResult drop_partitions_req( List partNames = null; if (spec.isSetExprs()) { // Dropping by expressions. - parts = new ArrayList(spec.getExprs().size()); + parts = new ArrayList<>(spec.getExprs().size()); for (DropPartitionsExpr expr : spec.getExprs()) { ++minCount; // At least one partition per expression, if not ifExists - List result = new ArrayList(); + List result = new ArrayList<>(); boolean hasUnknown = ms.getPartitionsByExpr( dbName, tblName, expr.getExpr(), null, (short)-1, result); if (hasUnknown) { @@ -3627,8 +3606,8 @@ public DropPartitionsResult drop_partitions_req( List colNames = null; if (partNames == null) { - partNames = new ArrayList(parts.size()); - colNames = new ArrayList(tbl.getPartitionKeys().size()); + partNames = new ArrayList<>(parts.size()); + colNames = new ArrayList<>(tbl.getPartitionKeys().size()); for (FieldSchema col : tbl.getPartitionKeys()) { colNames.add(col.getName()); } @@ -3721,7 +3700,7 @@ private void verifyIsWritablePath(Path dir) throws MetaException { try { if (!wh.isWritable(dir.getParent())) { throw new MetaException("Table partition not deleted since " + dir.getParent() - + " is not writable by " + hiveConf.getUser()); + + " is not writable by " + SecurityUtils.getUser()); } } catch (IOException ex) { LOG.warn("Error from isWritable", ex); @@ -3734,7 +3713,7 @@ private void verifyIsWritablePath(Path dir) throws MetaException { public boolean drop_partition_with_environment_context(final String db_name, final String tbl_name, final List part_vals, final boolean deleteData, final EnvironmentContext envContext) - throws NoSuchObjectException, MetaException, TException { + throws TException { startPartitionFunction("drop_partition", db_name, tbl_name, part_vals); LOG.info("Partition values:" + part_vals); @@ -3800,7 +3779,7 @@ private void fireReadTablePreEvent(String dbName, String tblName) throws MetaExc public Partition get_partition_with_auth(final String db_name, final String tbl_name, final List part_vals, final String user_name, final List group_names) - throws MetaException, NoSuchObjectException, TException { + throws TException { startPartitionFunction("get_partition_with_auth", db_name, tbl_name, part_vals); fireReadTablePreEvent(db_name, tbl_name); @@ -3844,8 +3823,7 @@ public Partition get_partition_with_auth(final String db_name, @Override public List get_partitions_with_auth(final String dbName, final String tblName, final short maxParts, final String userName, - final List groupNames) throws NoSuchObjectException, - MetaException, TException { + final List groupNames) throws TException { startTableFunction("get_partitions_with_auth", dbName, tblName); List ret = null; @@ -3880,16 +3858,16 @@ private void checkLimitNumberOfPartitionsByExpr(String dbName, String tblName, b } private boolean isPartitionLimitEnabled() { - int partitionLimit = HiveConf.getIntVar(hiveConf, HiveConf.ConfVars.METASTORE_LIMIT_PARTITION_REQUEST); + int partitionLimit = MetastoreConf.getIntVar(conf, ConfVars.LIMIT_PARTITION_REQUEST); return partitionLimit > -1; } private void checkLimitNumberOfPartitions(String tblName, int numPartitions, int maxToFetch) throws MetaException { if (isPartitionLimitEnabled()) { - int partitionLimit = HiveConf.getIntVar(hiveConf, HiveConf.ConfVars.METASTORE_LIMIT_PARTITION_REQUEST); + int partitionLimit = MetastoreConf.getIntVar(conf, ConfVars.LIMIT_PARTITION_REQUEST); int partitionRequest = (maxToFetch < 0) ? numPartitions : maxToFetch; if (partitionRequest > partitionLimit) { - String configName = ConfVars.METASTORE_LIMIT_PARTITION_REQUEST.varname; + String configName = ConfVars.LIMIT_PARTITION_REQUEST.toString(); throw new MetaException(String.format(PARTITION_NUMBER_EXCEED_LIMIT_MSG, partitionRequest, tblName, partitionLimit, configName)); } @@ -3979,13 +3957,13 @@ public Boolean apply(Partition input) { } }); - List partSpecs = new ArrayList(); + List partSpecs = new ArrayList<>(); // Classify partitions within the table directory into groups, // based on shared SD properties. Map> sdToPartList - = new HashMap>(); + = new HashMap<>(); if (partitionsWithinTableDirectory.containsKey(true)) { @@ -4000,7 +3978,7 @@ public Boolean apply(Partition input) { StorageDescriptorKey sdKey = new StorageDescriptorKey(partition.getSd()); if (!sdToPartList.containsKey(sdKey)) { - sdToPartList.put(sdKey, new ArrayList()); + sdToPartList.put(sdKey, new ArrayList<>()); } sdToPartList.get(sdKey).add(partitionWithoutSD); @@ -4087,7 +4065,7 @@ public PartitionValuesResponse get_partition_values(PartitionValuesRequest reque @Override public void alter_partition(final String db_name, final String tbl_name, final Partition new_part) - throws InvalidOperationException, MetaException, TException { + throws TException { rename_partition(db_name, tbl_name, null, new_part); } @@ -4095,7 +4073,7 @@ public void alter_partition(final String db_name, final String tbl_name, public void alter_partition_with_environment_context(final String dbName, final String tableName, final Partition newPartition, final EnvironmentContext envContext) - throws InvalidOperationException, MetaException, TException { + throws TException { rename_partition(dbName, tableName, null, newPartition, envContext); } @@ -4103,7 +4081,7 @@ public void alter_partition_with_environment_context(final String dbName, @Override public void rename_partition(final String db_name, final String tbl_name, final List part_vals, final Partition new_part) - throws InvalidOperationException, MetaException, TException { + throws TException { // Call rename_partition without an environment context. rename_partition(db_name, tbl_name, part_vals, new_part, null); } @@ -4111,7 +4089,7 @@ public void rename_partition(final String db_name, final String tbl_name, private void rename_partition(final String db_name, final String tbl_name, final List part_vals, final Partition new_part, final EnvironmentContext envContext) - throws InvalidOperationException, MetaException, TException { + throws TException { startTableFunction("alter_partition", db_name, tbl_name); if (LOG.isInfoEnabled()) { @@ -4166,8 +4144,6 @@ private void rename_partition(final String db_name, final String tbl_name, throw (MetaException) e; } else if (e instanceof InvalidOperationException) { throw (InvalidOperationException) e; - } else if (e instanceof TException) { - throw (TException) e; } else { throw newMetaException(e); } @@ -4179,14 +4155,14 @@ private void rename_partition(final String db_name, final String tbl_name, @Override public void alter_partitions(final String db_name, final String tbl_name, final List new_parts) - throws InvalidOperationException, MetaException, TException { + throws TException { alter_partitions_with_environment_context(db_name, tbl_name, new_parts, null); } @Override public void alter_partitions_with_environment_context(final String db_name, final String tbl_name, final List new_parts, EnvironmentContext environmentContext) - throws InvalidOperationException, MetaException, TException { + throws TException { startTableFunction("alter_partitions", db_name, tbl_name); @@ -4209,7 +4185,7 @@ public void alter_partitions_with_environment_context(final String db_name, fina // Only fetch the table if we have a listener that needs it. Table table = null; for (Partition tmpPart : new_parts) { - Partition oldTmpPart = null; + Partition oldTmpPart; if (olditr.hasNext()) { oldTmpPart = olditr.next(); } @@ -4239,8 +4215,6 @@ public void alter_partitions_with_environment_context(final String db_name, fina throw (MetaException) e; } else if (e instanceof InvalidOperationException) { throw (InvalidOperationException) e; - } else if (e instanceof TException) { - throw (TException) e; } else { throw newMetaException(e); } @@ -4493,7 +4467,6 @@ private void alter_table_core(final String dbname, final String name, final Tabl List ret = null; Exception ex = null; ClassLoader orgHiveLoader = null; - Configuration curConf = hiveConf; try { try { tbl = get_table_core(db, base_table_name); @@ -4501,34 +4474,17 @@ private void alter_table_core(final String dbname, final String name, final Tabl throw new UnknownTableException(e.getMessage()); } if (null == tbl.getSd().getSerdeInfo().getSerializationLib() || - hiveConf.getStringCollection(ConfVars.SERDESUSINGMETASTOREFORSCHEMA.varname).contains - (tbl.getSd().getSerdeInfo().getSerializationLib())) { + MetastoreConf.getStringCollection(conf, + ConfVars.SERDES_USING_METASTORE_FOR_SCHEMA).contains( + tbl.getSd().getSerdeInfo().getSerializationLib())) { ret = tbl.getSd().getCols(); } else { - try { - if (envContext != null) { - String addedJars = envContext.getProperties().get("hive.added.jars.path"); - if (org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) { - //for thread safe - curConf = getConf(); - orgHiveLoader = curConf.getClassLoader(); - ClassLoader loader = MetaStoreUtils.addToClassPath(orgHiveLoader, org.apache.commons.lang.StringUtils.split(addedJars, ",")); - curConf.setClassLoader(loader); - } - } - - Deserializer s = MetaStoreUtils.getDeserializer(curConf, tbl, false); - ret = MetaStoreUtils.getFieldsFromDeserializer(tableName, s); - } catch (SerDeException e) { - StringUtils.stringifyException(e); - throw new MetaException(e.getMessage()); - } + StorageSchemaReader schemaReader = getStorageSchemaReader(); + ret = schemaReader.readSchema(tbl, envContext, getConf()); } } catch (Exception e) { ex = e; - if (e instanceof UnknownDBException) { - throw (UnknownDBException) e; - } else if (e instanceof UnknownTableException) { + if (e instanceof UnknownTableException) { throw (UnknownTableException) e; } else if (e instanceof MetaException) { throw (MetaException) e; @@ -4537,7 +4493,7 @@ private void alter_table_core(final String dbname, final String name, final Tabl } } finally { if (orgHiveLoader != null) { - curConf.setClassLoader(orgHiveLoader); + conf.setClassLoader(orgHiveLoader); } endFunction("get_fields_with_environment_context", ret != null, ex, tableName); } @@ -4545,6 +4501,22 @@ private void alter_table_core(final String dbname, final String name, final Tabl return ret; } + private StorageSchemaReader getStorageSchemaReader() throws MetaException { + if (storageSchemaReader == null) { + String className = + MetastoreConf.getVar(conf, MetastoreConf.ConfVars.STORAGE_SCHEMA_READER_IMPL); + Class readerClass = + JavaUtils.getClass(className, StorageSchemaReader.class); + try { + storageSchemaReader = readerClass.newInstance(); + } catch (InstantiationException|IllegalAccessException e) { + LOG.error("Unable to instantiate class " + className, e); + throw new MetaException(e.getMessage()); + } + } + return storageSchemaReader; + } + /** * Return the schema of the table. This function includes partition columns * in addition to the regular columns. @@ -4641,7 +4613,7 @@ public String getCpuProfile(int profileDurationInSec) throws TException { */ @Override public String get_config_value(String name, String defaultValue) - throws TException, ConfigValSecurityException { + throws TException { startFunction("get_config_value", ": name=" + name + " defaultValue=" + defaultValue); boolean success = false; @@ -4653,14 +4625,15 @@ public String get_config_value(String name, String defaultValue) } // Allow only keys that start with hive.*, hdfs.*, mapred.* for security // i.e. don't allow access to db password - if (!Pattern.matches("(hive|hdfs|mapred).*", name)) { + if (!Pattern.matches("(hive|hdfs|mapred|metastore).*", name)) { throw new ConfigValSecurityException("For security reasons, the " + "config key " + name + " cannot be accessed"); } String toReturn = defaultValue; try { - toReturn = hiveConf.get(name, defaultValue); + toReturn = MetastoreConf.get(conf, name); + if (toReturn == null) toReturn = defaultValue; } catch (RuntimeException e) { LOG.error(threadLocalId.get().toString() + ": " + "RuntimeException thrown in get_config_value - msg: " @@ -4672,12 +4645,8 @@ public String get_config_value(String name, String defaultValue) ex = e; if (e instanceof ConfigValSecurityException) { throw (ConfigValSecurityException) e; - } else if (e instanceof TException) { - throw (TException) e; } else { - TException te = new TException(e.toString()); - te.initCause(e); - throw te; + throw new TException(e); } } finally { endFunction("get_config_value", success, ex); @@ -4690,7 +4659,7 @@ public String get_config_value(String name, String defaultValue) // Unescape the partition name LinkedHashMap hm = Warehouse.makeSpecFromName(partName); - List partVals = new ArrayList(); + List partVals = new ArrayList<>(); for (FieldSchema field : t.getPartitionKeys()) { String key = field.getName(); String val = hm.get(key); @@ -4714,9 +4683,9 @@ public String get_config_value(String name, String defaultValue) private Partition get_partition_by_name_core(final RawStore ms, final String db_name, final String tbl_name, final String part_name) - throws MetaException, NoSuchObjectException, TException { + throws TException { fireReadTablePreEvent(db_name, tbl_name); - List partVals = null; + List partVals; try { partVals = getPartValsFromName(ms, db_name, tbl_name, part_name); } catch (InvalidObjectException e) { @@ -4733,7 +4702,7 @@ private Partition get_partition_by_name_core(final RawStore ms, final String db_ @Override public Partition get_partition_by_name(final String db_name, final String tbl_name, - final String part_name) throws MetaException, NoSuchObjectException, TException { + final String part_name) throws TException { startFunction("get_partition_by_name", ": db=" + db_name + " tbl=" + tbl_name + " part=" + part_name); @@ -4752,15 +4721,14 @@ public Partition get_partition_by_name(final String db_name, final String tbl_na @Override public Partition append_partition_by_name(final String db_name, final String tbl_name, - final String part_name) throws InvalidObjectException, - AlreadyExistsException, MetaException, TException { + final String part_name) throws TException { return append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, null); } @Override public Partition append_partition_by_name_with_environment_context(final String db_name, final String tbl_name, final String part_name, final EnvironmentContext env_context) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + throws TException { startFunction("append_partition_by_name", ": db=" + db_name + " tbl=" + tbl_name + " part=" + part_name); @@ -4778,8 +4746,6 @@ public Partition append_partition_by_name_with_environment_context(final String throw (AlreadyExistsException) e; } else if (e instanceof MetaException) { throw (MetaException) e; - } else if (e instanceof TException) { - throw (TException) e; } else { throw newMetaException(e); } @@ -4791,10 +4757,9 @@ public Partition append_partition_by_name_with_environment_context(final String private boolean drop_partition_by_name_core(final RawStore ms, final String db_name, final String tbl_name, final String part_name, final boolean deleteData, - final EnvironmentContext envContext) throws NoSuchObjectException, MetaException, - TException, IOException, InvalidObjectException, InvalidInputException { + final EnvironmentContext envContext) throws TException, IOException { - List partVals = null; + List partVals; try { partVals = getPartValsFromName(ms, db_name, tbl_name, part_name); } catch (InvalidObjectException e) { @@ -4806,8 +4771,7 @@ private boolean drop_partition_by_name_core(final RawStore ms, final String db_n @Override public boolean drop_partition_by_name(final String db_name, final String tbl_name, - final String part_name, final boolean deleteData) throws NoSuchObjectException, - MetaException, TException { + final String part_name, final boolean deleteData) throws TException { return drop_partition_by_name_with_environment_context(db_name, tbl_name, part_name, deleteData, null); } @@ -4815,8 +4779,7 @@ public boolean drop_partition_by_name(final String db_name, final String tbl_nam @Override public boolean drop_partition_by_name_with_environment_context(final String db_name, final String tbl_name, final String part_name, final boolean deleteData, - final EnvironmentContext envContext) throws NoSuchObjectException, - MetaException, TException { + final EnvironmentContext envContext) throws TException { startFunction("drop_partition_by_name", ": db=" + db_name + " tbl=" + tbl_name + " part=" + part_name); @@ -4841,7 +4804,7 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n @Override public List get_partitions_ps(final String db_name, final String tbl_name, final List part_vals, - final short max_parts) throws MetaException, TException, NoSuchObjectException { + final short max_parts) throws TException { startPartitionFunction("get_partitions_ps", db_name, tbl_name, part_vals); List ret = null; @@ -4863,7 +4826,7 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n public List get_partitions_ps_with_auth(final String db_name, final String tbl_name, final List part_vals, final short max_parts, final String userName, - final List groupNames) throws MetaException, TException, NoSuchObjectException { + final List groupNames) throws TException { startPartitionFunction("get_partitions_ps_with_auth", db_name, tbl_name, part_vals); fireReadTablePreEvent(db_name, tbl_name); @@ -4887,7 +4850,7 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n @Override public List get_partition_names_ps(final String db_name, final String tbl_name, final List part_vals, final short max_parts) - throws MetaException, TException, NoSuchObjectException { + throws TException { startPartitionFunction("get_partitions_names_ps", db_name, tbl_name, part_vals); fireReadTablePreEvent(db_name, tbl_name); List ret = null; @@ -4904,29 +4867,26 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n } @Override - public List partition_name_to_vals(String part_name) - throws MetaException, TException { + public List partition_name_to_vals(String part_name) throws TException { if (part_name.length() == 0) { - return new ArrayList(); + return new ArrayList<>(); } LinkedHashMap map = Warehouse.makeSpecFromName(part_name); - List part_vals = new ArrayList(); + List part_vals = new ArrayList<>(); part_vals.addAll(map.values()); return part_vals; } @Override - public Map partition_name_to_spec(String part_name) throws MetaException, - TException { + public Map partition_name_to_spec(String part_name) throws TException { if (part_name.length() == 0) { - return new HashMap(); + return new HashMap<>(); } return Warehouse.makeSpecFromName(part_name); } @Override - public Index add_index(final Index newIndex, final Table indexTable) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + public Index add_index(final Index newIndex, final Table indexTable) throws TException { String tableName = indexTable != null ? indexTable.getTableName() : ""; startFunction("add_index", ": " + newIndex.toString() + " " + tableName); Index ret = null; @@ -4941,8 +4901,6 @@ public Index add_index(final Index newIndex, final Table indexTable) throw (AlreadyExistsException) e; } else if (e instanceof MetaException) { throw (MetaException) e; - } else if (e instanceof TException) { - throw (TException) e; } else { throw newMetaException(e); } @@ -5028,8 +4986,7 @@ private Index add_index_core(final RawStore ms, final Index index, final Table i @Override public boolean drop_index_by_name(final String dbName, final String tblName, - final String indexName, final boolean deleteData) throws NoSuchObjectException, - MetaException, TException { + final String indexName, final boolean deleteData) throws TException { startFunction("drop_index_by_name", ": db=" + dbName + " tbl=" + tblName + " index=" + indexName); @@ -5053,8 +5010,7 @@ public boolean drop_index_by_name(final String dbName, final String tblName, private boolean drop_index_by_name_core(final RawStore ms, final String dbName, final String tblName, - final String indexName, final boolean deleteData) throws NoSuchObjectException, - MetaException, TException, IOException, InvalidObjectException, InvalidInputException { + final String indexName, final boolean deleteData) throws TException, IOException { boolean success = false; Index index = null; Path tblPath = null; @@ -5079,7 +5035,7 @@ private boolean drop_index_by_name_core(final RawStore ms, if (!wh.isWritable(tblPath.getParent())) { throw new MetaException("Index table metadata not deleted since " + tblPath.getParent() + " is not writable by " + - hiveConf.getUser()); + SecurityUtils.getUser()); } } @@ -5122,8 +5078,7 @@ private boolean drop_index_by_name_core(final RawStore ms, @Override public Index get_index_by_name(final String dbName, final String tblName, - final String indexName) throws MetaException, NoSuchObjectException, - TException { + final String indexName) throws TException { startFunction("get_index_by_name", ": db=" + dbName + " tbl=" + tblName + " index=" + indexName); @@ -5142,8 +5097,7 @@ public Index get_index_by_name(final String dbName, final String tblName, } private Index get_index_by_name_core(final RawStore ms, final String db_name, - final String tbl_name, final String index_name) - throws MetaException, NoSuchObjectException, TException { + final String tbl_name, final String index_name) throws TException { Index index = ms.getIndex(db_name, tbl_name, index_name); if (index == null) { @@ -5155,7 +5109,7 @@ private Index get_index_by_name_core(final RawStore ms, final String db_name, @Override public List get_index_names(final String dbName, final String tblName, - final short maxIndexes) throws MetaException, TException { + final short maxIndexes) throws TException { startTableFunction("get_index_names", dbName, tblName); List ret = null; @@ -5166,8 +5120,6 @@ private Index get_index_by_name_core(final RawStore ms, final String db_name, ex = e; if (e instanceof MetaException) { throw (MetaException) e; - } else if (e instanceof TException) { - throw (TException) e; } else { throw newMetaException(e); } @@ -5179,8 +5131,7 @@ private Index get_index_by_name_core(final RawStore ms, final String db_name, @Override public List get_indexes(final String dbName, final String tblName, - final short maxIndexes) throws NoSuchObjectException, MetaException, - TException { + final short maxIndexes) throws TException { startTableFunction("get_indexes", dbName, tblName); List ret = null; @@ -5217,9 +5168,7 @@ private String lowerCaseConvertPartName(String partName) throws MetaException { @Override public ColumnStatistics get_table_column_statistics(String dbName, String tableName, - String colName) throws NoSuchObjectException, MetaException, TException, - InvalidInputException, InvalidObjectException - { + String colName) throws TException { dbName = dbName.toLowerCase(); tableName = tableName.toLowerCase(); colName = colName.toLowerCase(); @@ -5239,20 +5188,19 @@ public ColumnStatistics get_table_column_statistics(String dbName, String tableN } @Override - public TableStatsResult get_table_statistics_req(TableStatsRequest request) - throws MetaException, NoSuchObjectException, TException { + public TableStatsResult get_table_statistics_req(TableStatsRequest request) throws TException { String dbName = request.getDbName().toLowerCase(); String tblName = request.getTblName().toLowerCase(); startFunction("get_table_statistics_req", ": db=" + dbName + " table=" + tblName); TableStatsResult result = null; - List lowerCaseColNames = new ArrayList(request.getColNames().size()); + List lowerCaseColNames = new ArrayList<>(request.getColNames().size()); for (String colName : request.getColNames()) { lowerCaseColNames.add(colName.toLowerCase()); } try { ColumnStatistics cs = getMS().getTableColumnStatistics(dbName, tblName, lowerCaseColNames); result = new TableStatsResult((cs == null || cs.getStatsObj() == null) - ? Lists.newArrayList() : cs.getStatsObj()); + ? Lists.newArrayList() : cs.getStatsObj()); } finally { endFunction("get_table_statistics_req", result == null, null, tblName); } @@ -5261,8 +5209,7 @@ public TableStatsResult get_table_statistics_req(TableStatsRequest request) @Override public ColumnStatistics get_partition_column_statistics(String dbName, String tableName, - String partName, String colName) throws NoSuchObjectException, MetaException, - InvalidInputException, TException, InvalidObjectException { + String partName, String colName) throws TException { dbName = dbName.toLowerCase(); tableName = tableName.toLowerCase(); colName = colName.toLowerCase(); @@ -5288,25 +5235,24 @@ public ColumnStatistics get_partition_column_statistics(String dbName, String ta @Override public PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsRequest request) - throws MetaException, NoSuchObjectException, TException { + throws TException { String dbName = request.getDbName().toLowerCase(); String tblName = request.getTblName().toLowerCase(); startFunction("get_partitions_statistics_req", ": db=" + dbName + " table=" + tblName); PartitionsStatsResult result = null; - List lowerCaseColNames = new ArrayList(request.getColNames().size()); + List lowerCaseColNames = new ArrayList<>(request.getColNames().size()); for (String colName : request.getColNames()) { lowerCaseColNames.add(colName.toLowerCase()); } - List lowerCasePartNames = new ArrayList(request.getPartNames().size()); + List lowerCasePartNames = new ArrayList<>(request.getPartNames().size()); for (String partName : request.getPartNames()) { lowerCasePartNames.add(lowerCaseConvertPartName(partName)); } try { List stats = getMS().getPartitionColumnStatistics( dbName, tblName, lowerCasePartNames, lowerCaseColNames); - Map> map = - new HashMap>(); + Map> map = new HashMap<>(); for (ColumnStatistics stat : stats) { map.put(stat.getStatsDesc().getPartName(), stat.getStatsObj()); } @@ -5318,13 +5264,10 @@ public PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsReques } @Override - public boolean update_table_column_statistics(ColumnStatistics colStats) - throws NoSuchObjectException,InvalidObjectException,MetaException,TException, - InvalidInputException - { - String dbName = null; - String tableName = null; - String colName = null; + public boolean update_table_column_statistics(ColumnStatistics colStats) throws TException { + String dbName; + String tableName; + String colName; ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); dbName = statsDesc.getDbName().toLowerCase(); tableName = statsDesc.getTableName().toLowerCase(); @@ -5359,10 +5302,10 @@ public boolean update_table_column_statistics(ColumnStatistics colStats) private boolean updatePartitonColStats(Table tbl, ColumnStatistics colStats) throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { - String dbName = null; - String tableName = null; - String partName = null; - String colName = null; + String dbName; + String tableName; + String partName; + String colName; ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); dbName = statsDesc.getDbName().toLowerCase(); @@ -5405,17 +5348,13 @@ private boolean updatePartitonColStats(Table tbl, ColumnStatistics colStats) } @Override - public boolean update_partition_column_statistics(ColumnStatistics colStats) - throws NoSuchObjectException,InvalidObjectException,MetaException,TException, - InvalidInputException { + public boolean update_partition_column_statistics(ColumnStatistics colStats) throws TException { return updatePartitonColStats(null, colStats); } @Override public boolean delete_partition_column_statistics(String dbName, String tableName, - String partName, String colName) throws NoSuchObjectException, MetaException, - InvalidObjectException, TException, InvalidInputException - { + String partName, String colName) throws TException { dbName = dbName.toLowerCase(); tableName = tableName.toLowerCase(); if (colName != null) { @@ -5439,8 +5378,7 @@ public boolean delete_partition_column_statistics(String dbName, String tableNam @Override public boolean delete_table_column_statistics(String dbName, String tableName, String colName) - throws NoSuchObjectException, MetaException, InvalidObjectException, TException, - InvalidInputException { + throws TException { dbName = dbName.toLowerCase(); tableName = tableName.toLowerCase(); @@ -5460,9 +5398,9 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S } @Override - public List get_partitions_by_filter(final String dbName, - final String tblName, final String filter, final short maxParts) - throws MetaException, NoSuchObjectException, TException { + public List get_partitions_by_filter(final String dbName, final String tblName, + final String filter, final short maxParts) + throws TException { startTableFunction("get_partitions_by_filter", dbName, tblName); fireReadTablePreEvent(dbName, tblName); List ret = null; @@ -5480,9 +5418,9 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S } @Override - public List get_part_specs_by_filter(final String dbName, - final String tblName, final String filter, final int maxParts) - throws MetaException, NoSuchObjectException, TException { + public List get_part_specs_by_filter(final String dbName, final String tblName, + final String filter, final int maxParts) + throws TException { startTableFunction("get_partitions_by_filter_pspec", dbName, tblName); @@ -5520,7 +5458,7 @@ public PartitionsByExprResult get_partitions_by_expr( Exception ex = null; try { checkLimitNumberOfPartitionsByExpr(dbName, tblName, req.getExpr(), UNLIMITED_MAX_PARTITIONS); - List partitions = new LinkedList(); + List partitions = new LinkedList<>(); boolean hasUnknownPartitions = getMS().getPartitionsByExpr(dbName, tblName, req.getExpr(), req.getDefaultPartitionName(), req.getMaxParts(), partitions); ret = new PartitionsByExprResult(partitions, hasUnknownPartitions); @@ -5533,8 +5471,7 @@ public PartitionsByExprResult get_partitions_by_expr( return ret; } - private void rethrowException(Exception e) - throws MetaException, NoSuchObjectException, TException { + private void rethrowException(Exception e) throws TException { // TODO: Both of these are TException, why do we need these separate clauses? if (e instanceof MetaException) { throw (MetaException) e; @@ -5565,7 +5502,7 @@ public int get_num_partitions_by_filter(final String dbName, return ret; } - public int get_num_partitions_by_expr(final String dbName, + int get_num_partitions_by_expr(final String dbName, final String tblName, final byte[] expr) throws TException { startTableFunction("get_num_partitions_by_expr", dbName, tblName); @@ -5584,9 +5521,8 @@ public int get_num_partitions_by_expr(final String dbName, } @Override - public List get_partitions_by_names(final String dbName, - final String tblName, final List partNames) - throws MetaException, NoSuchObjectException, TException { + public List get_partitions_by_names(final String dbName, final String tblName, + final List partNames) throws TException { startTableFunction("get_partitions_by_names", dbName, tblName); fireReadTablePreEvent(dbName, tblName); @@ -5604,9 +5540,8 @@ public int get_num_partitions_by_expr(final String dbName, } @Override - public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, - String userName, List groupNames) throws MetaException, - TException { + public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, String userName, + List groupNames) throws TException { firePreEvent(new PreAuthorizationCallEvent(this)); if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { String partName = getPartName(hiveObject); @@ -5647,11 +5582,10 @@ private String getPartName(HiveObjectRef hiveObject) throws MetaException { private PrincipalPrivilegeSet get_column_privilege_set(final String dbName, final String tableName, final String partName, final String columnName, - final String userName, final List groupNames) throws MetaException, - TException { + final String userName, final List groupNames) throws TException { incrementCounter("get_column_privilege_set"); - PrincipalPrivilegeSet ret = null; + PrincipalPrivilegeSet ret; try { ret = getMS().getColumnPrivilegeSet( dbName, tableName, partName, columnName, userName, groupNames); @@ -5664,11 +5598,10 @@ private PrincipalPrivilegeSet get_column_privilege_set(final String dbName, } private PrincipalPrivilegeSet get_db_privilege_set(final String dbName, - final String userName, final List groupNames) throws MetaException, - TException { + final String userName, final List groupNames) throws TException { incrementCounter("get_db_privilege_set"); - PrincipalPrivilegeSet ret = null; + PrincipalPrivilegeSet ret; try { ret = getMS().getDBPrivilegeSet(dbName, userName, groupNames); } catch (MetaException e) { @@ -5682,10 +5615,10 @@ private PrincipalPrivilegeSet get_db_privilege_set(final String dbName, private PrincipalPrivilegeSet get_partition_privilege_set( final String dbName, final String tableName, final String partName, final String userName, final List groupNames) - throws MetaException, TException { + throws TException { incrementCounter("get_partition_privilege_set"); - PrincipalPrivilegeSet ret = null; + PrincipalPrivilegeSet ret; try { ret = getMS().getPartitionPrivilegeSet(dbName, tableName, partName, userName, groupNames); @@ -5699,10 +5632,10 @@ private PrincipalPrivilegeSet get_partition_privilege_set( private PrincipalPrivilegeSet get_table_privilege_set(final String dbName, final String tableName, final String userName, - final List groupNames) throws MetaException, TException { + final List groupNames) throws TException { incrementCounter("get_table_privilege_set"); - PrincipalPrivilegeSet ret = null; + PrincipalPrivilegeSet ret; try { ret = getMS().getTablePrivilegeSet(dbName, tableName, userName, groupNames); @@ -5718,14 +5651,14 @@ private PrincipalPrivilegeSet get_table_privilege_set(final String dbName, public boolean grant_role(final String roleName, final String principalName, final PrincipalType principalType, final String grantor, final PrincipalType grantorType, final boolean grantOption) - throws MetaException, TException { + throws TException { incrementCounter("add_role_member"); firePreEvent(new PreAuthorizationCallEvent(this)); if (PUBLIC.equals(roleName)) { throw new MetaException("No user can be added to " + PUBLIC +". Since all users implictly" + " belong to " + PUBLIC + " role."); } - Boolean ret = null; + Boolean ret; try { RawStore ms = getMS(); Role role = ms.getRole(roleName); @@ -5771,21 +5704,20 @@ private boolean isNewRoleAParent(String newRole, String curRole) throws MetaExce @Override public List list_roles(final String principalName, - final PrincipalType principalType) throws MetaException, TException { + final PrincipalType principalType) throws TException { incrementCounter("list_roles"); firePreEvent(new PreAuthorizationCallEvent(this)); return getMS().listRoles(principalName, principalType); } @Override - public boolean create_role(final Role role) - throws MetaException, TException { + public boolean create_role(final Role role) throws TException { incrementCounter("create_role"); firePreEvent(new PreAuthorizationCallEvent(this)); if (PUBLIC.equals(role.getRoleName())) { throw new MetaException(PUBLIC + " role implictly exists. It can't be created."); } - Boolean ret = null; + Boolean ret; try { ret = getMS().addRole(role.getRoleName(), role.getOwnerName()); } catch (MetaException e) { @@ -5797,14 +5729,13 @@ public boolean create_role(final Role role) } @Override - public boolean drop_role(final String roleName) - throws MetaException, TException { + public boolean drop_role(final String roleName) throws TException { incrementCounter("drop_role"); firePreEvent(new PreAuthorizationCallEvent(this)); if (ADMIN.equals(roleName) || PUBLIC.equals(roleName)) { throw new MetaException(PUBLIC + "," + ADMIN + " roles can't be dropped."); } - Boolean ret = null; + Boolean ret; try { ret = getMS().removeRole(roleName); } catch (MetaException e) { @@ -5816,10 +5747,10 @@ public boolean drop_role(final String roleName) } @Override - public List get_role_names() throws MetaException, TException { + public List get_role_names() throws TException { incrementCounter("get_role_names"); firePreEvent(new PreAuthorizationCallEvent(this)); - List ret = null; + List ret; try { ret = getMS().listRoleNames(); return ret; @@ -5831,11 +5762,10 @@ public boolean drop_role(final String roleName) } @Override - public boolean grant_privileges(final PrivilegeBag privileges) throws MetaException, - TException { + public boolean grant_privileges(final PrivilegeBag privileges) throws TException { incrementCounter("grant_privileges"); firePreEvent(new PreAuthorizationCallEvent(this)); - Boolean ret = null; + Boolean ret; try { ret = getMS().grantPrivileges(privileges); } catch (MetaException e) { @@ -5848,18 +5778,18 @@ public boolean grant_privileges(final PrivilegeBag privileges) throws MetaExcept @Override public boolean revoke_role(final String roleName, final String userName, - final PrincipalType principalType) throws MetaException, TException { + final PrincipalType principalType) throws TException { return revoke_role(roleName, userName, principalType, false); } private boolean revoke_role(final String roleName, final String userName, - final PrincipalType principalType, boolean grantOption) throws MetaException, TException { + final PrincipalType principalType, boolean grantOption) throws TException { incrementCounter("remove_role_member"); firePreEvent(new PreAuthorizationCallEvent(this)); if (PUBLIC.equals(roleName)) { throw new MetaException(PUBLIC + " role can't be revoked."); } - Boolean ret = null; + Boolean ret; try { RawStore ms = getMS(); Role mRole = ms.getRole(roleName); @@ -5874,7 +5804,7 @@ private boolean revoke_role(final String roleName, final String userName, @Override public GrantRevokeRoleResponse grant_revoke_role(GrantRevokeRoleRequest request) - throws MetaException, org.apache.thrift.TException { + throws TException { GrantRevokeRoleResponse response = new GrantRevokeRoleResponse(); boolean grantOption = false; if (request.isSetGrantOption()) { @@ -5903,7 +5833,7 @@ public GrantRevokeRoleResponse grant_revoke_role(GrantRevokeRoleRequest request) @Override public GrantRevokePrivilegeResponse grant_revoke_privileges(GrantRevokePrivilegeRequest request) - throws MetaException, org.apache.thrift.TException { + throws TException { GrantRevokePrivilegeResponse response = new GrantRevokePrivilegeResponse(); switch (request.getRequestType()) { case GRANT: { @@ -5928,16 +5858,15 @@ public GrantRevokePrivilegeResponse grant_revoke_privileges(GrantRevokePrivilege } @Override - public boolean revoke_privileges(final PrivilegeBag privileges) - throws MetaException, TException { + public boolean revoke_privileges(final PrivilegeBag privileges) throws TException { return revoke_privileges(privileges, false); } public boolean revoke_privileges(final PrivilegeBag privileges, boolean grantOption) - throws MetaException, TException { + throws TException { incrementCounter("revoke_privileges"); firePreEvent(new PreAuthorizationCallEvent(this)); - Boolean ret = null; + Boolean ret; try { ret = getMS().revokePrivileges(privileges, grantOption); } catch (MetaException e) { @@ -5949,9 +5878,9 @@ public boolean revoke_privileges(final PrivilegeBag privileges, boolean grantOpt } private PrincipalPrivilegeSet get_user_privilege_set(final String userName, - final List groupNames) throws MetaException, TException { + final List groupNames) throws TException { incrementCounter("get_user_privilege_set"); - PrincipalPrivilegeSet ret = null; + PrincipalPrivilegeSet ret; try { ret = getMS().getUserPrivilegeSet(userName, groupNames); } catch (MetaException e) { @@ -5965,7 +5894,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, @Override public List list_privileges(String principalName, PrincipalType principalType, HiveObjectRef hiveObject) - throws MetaException, TException { + throws TException { firePreEvent(new PreAuthorizationCallEvent(this)); if (hiveObject.getObjectType() == null) { return getAllPrivileges(principalName, principalType); @@ -6000,7 +5929,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, private List getAllPrivileges(String principalName, PrincipalType principalType) throws TException { - List privs = new ArrayList(); + List privs = new ArrayList<>(); privs.addAll(list_global_privileges(principalName, principalType)); privs.addAll(list_db_privileges(principalName, principalType, null)); privs.addAll(list_table_privileges(principalName, principalType, null, null)); @@ -6013,8 +5942,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, private List list_table_column_privileges( final String principalName, final PrincipalType principalType, - final String dbName, final String tableName, final String columnName) - throws MetaException, TException { + final String dbName, final String tableName, final String columnName) throws TException { incrementCounter("list_table_column_privileges"); try { @@ -6024,10 +5952,8 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, if (principalName == null) { return getMS().listTableColumnGrantsAll(dbName, tableName, columnName); } - List result = getMS() - .listPrincipalTableColumnGrants(principalName, principalType, + return getMS().listPrincipalTableColumnGrants(principalName, principalType, dbName, tableName, columnName); - return result; } catch (MetaException e) { throw e; } catch (Exception e) { @@ -6038,7 +5964,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, private List list_partition_column_privileges( final String principalName, final PrincipalType principalType, final String dbName, final String tableName, final List partValues, - final String columnName) throws MetaException, TException { + final String columnName) throws TException { incrementCounter("list_partition_column_privileges"); try { @@ -6051,11 +5977,8 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, return getMS().listPartitionColumnGrantsAll(dbName, tableName, partName, columnName); } - List result = - getMS().listPrincipalPartitionColumnGrants(principalName, principalType, dbName, + return getMS().listPrincipalPartitionColumnGrants(principalName, principalType, dbName, tableName, partValues, partName, columnName); - - return result; } catch (MetaException e) { throw e; } catch (Exception e) { @@ -6064,8 +5987,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, } private List list_db_privileges(final String principalName, - final PrincipalType principalType, final String dbName) - throws MetaException, TException { + final PrincipalType principalType, final String dbName) throws TException { incrementCounter("list_security_db_grant"); try { @@ -6087,7 +6009,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, private List list_partition_privileges( final String principalName, final PrincipalType principalType, final String dbName, final String tableName, final List partValues) - throws MetaException, TException { + throws TException { incrementCounter("list_security_partition_grant"); try { @@ -6099,10 +6021,8 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, if (principalName == null) { return getMS().listPartitionGrantsAll(dbName, tableName, partName); } - List result = getMS().listPrincipalPartitionGrants( + return getMS().listPrincipalPartitionGrants( principalName, principalType, dbName, tableName, partValues, partName); - - return result; } catch (MetaException e) { throw e; } catch (Exception e) { @@ -6112,8 +6032,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, private List list_table_privileges( final String principalName, final PrincipalType principalType, - final String dbName, final String tableName) throws MetaException, - TException { + final String dbName, final String tableName) throws TException { incrementCounter("list_security_table_grant"); try { @@ -6123,10 +6042,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, if (principalName == null) { return getMS().listTableGrantsAll(dbName, tableName); } - List result = getMS() - .listAllTableGrants(principalName, principalType, dbName, tableName); - - return result; + return getMS().listAllTableGrants(principalName, principalType, dbName, tableName); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -6135,18 +6051,14 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, } private List list_global_privileges( - final String principalName, final PrincipalType principalType) - throws MetaException, TException { + final String principalName, final PrincipalType principalType) throws TException { incrementCounter("list_security_user_grant"); try { if (principalName == null) { return getMS().listGlobalGrantsAll(); } - List result = getMS().listPrincipalGlobalGrants( - principalName, principalType); - - return result; + return getMS().listPrincipalGlobalGrants(principalName, principalType); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -6155,8 +6067,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, } @Override - public void cancel_delegation_token(String token_str_form) - throws MetaException, TException { + public void cancel_delegation_token(String token_str_form) throws TException { startFunction("cancel_delegation_token"); boolean success = false; Exception ex = null; @@ -6168,21 +6079,14 @@ public void cancel_delegation_token(String token_str_form) throw new MetaException(e.getMessage()); } catch (Exception e) { ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else if (e instanceof TException) { - throw (TException) e; - } else { - throw newMetaException(e); - } + throw newMetaException(e); } finally { endFunction("cancel_delegation_token", success, ex); } } @Override - public long renew_delegation_token(String token_str_form) - throws MetaException, TException { + public long renew_delegation_token(String token_str_form) throws TException { startFunction("renew_delegation_token"); Long ret = null; Exception ex = null; @@ -6193,13 +6097,7 @@ public long renew_delegation_token(String token_str_form) throw new MetaException(e.getMessage()); } catch (Exception e) { ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else if (e instanceof TException) { - throw (TException) e; - } else { - throw newMetaException(e); - } + throw newMetaException(e); } finally { endFunction("renew_delegation_token", ret != null, ex); } @@ -6207,9 +6105,8 @@ public long renew_delegation_token(String token_str_form) } @Override - public String get_delegation_token(String token_owner, - String renewer_kerberos_principal_name) - throws MetaException, TException { + public String get_delegation_token(String token_owner, String renewer_kerberos_principal_name) + throws TException { startFunction("get_delegation_token"); String ret = null; Exception ex = null; @@ -6217,21 +6114,12 @@ public String get_delegation_token(String token_owner, ret = HiveMetaStore.getDelegationToken(token_owner, renewer_kerberos_principal_name, getIPAddress()); - } catch (IOException e) { - ex = e; - throw new MetaException(e.getMessage()); - } catch (InterruptedException e) { + } catch (IOException | InterruptedException e) { ex = e; throw new MetaException(e.getMessage()); } catch (Exception e) { ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else if (e instanceof TException) { - throw (TException) e; - } else { - throw newMetaException(e); - } + throw newMetaException(e); } finally { endFunction("get_delegation_token", ret != null, ex); } @@ -6302,7 +6190,7 @@ public String get_token(String token_identifier) throws TException { @Override public List get_all_token_identifiers() throws TException { startFunction("get_all_token_identifiers."); - List ret = null; + List ret; Exception ex = null; try { ret = getMS().getAllTokenIdentifiers(); @@ -6320,9 +6208,9 @@ public String get_token(String token_identifier) throws TException { } @Override - public int add_master_key(String key) throws MetaException, TException { + public int add_master_key(String key) throws TException { startFunction("add_master_key."); - int ret = -1; + int ret; Exception ex = null; try { ret = getMS().addMasterKey(key); @@ -6340,8 +6228,7 @@ public int add_master_key(String key) throws MetaException, TException { } @Override - public void update_master_key(int seq_number, String key) throws NoSuchObjectException, - MetaException, TException { + public void update_master_key(int seq_number, String key) throws TException { startFunction("update_master_key."); Exception ex = null; try { @@ -6400,10 +6287,7 @@ public boolean remove_master_key(int key_seq) throws TException { @Override public void markPartitionForEvent(final String db_name, final String tbl_name, - final Map partName, final PartitionEventType evtType) throws - MetaException, TException, NoSuchObjectException, UnknownDBException, - UnknownTableException, - InvalidPartitionException, UnknownPartitionException { + final Map partName, final PartitionEventType evtType) throws TException { Table tbl = null; Exception ex = null; @@ -6432,12 +6316,8 @@ public void markPartitionForEvent(final String db_name, final String tbl_name, } catch (Exception original) { ex = original; LOG.error("Exception caught in mark partition event ", original); - if (original instanceof NoSuchObjectException) { - throw (NoSuchObjectException) original; - } else if (original instanceof UnknownTableException) { + if (original instanceof UnknownTableException) { throw (UnknownTableException) original; - } else if (original instanceof UnknownDBException) { - throw (UnknownDBException) original; } else if (original instanceof UnknownPartitionException) { throw (UnknownPartitionException) original; } else if (original instanceof InvalidPartitionException) { @@ -6458,9 +6338,7 @@ public void markPartitionForEvent(final String db_name, final String tbl_name, @Override public boolean isPartitionMarkedForEvent(final String db_name, final String tbl_name, - final Map partName, final PartitionEventType evtType) throws - MetaException, NoSuchObjectException, UnknownDBException, UnknownTableException, - TException, UnknownPartitionException, InvalidPartitionException { + final Map partName, final PartitionEventType evtType) throws TException { startPartitionFunction("isPartitionMarkedForEvent", db_name, tbl_name, partName); Boolean ret = null; @@ -6470,12 +6348,8 @@ public boolean isPartitionMarkedForEvent(final String db_name, final String tbl_ } catch (Exception original) { LOG.error("Exception caught for isPartitionMarkedForEvent ",original); ex = original; - if (original instanceof NoSuchObjectException) { - throw (NoSuchObjectException) original; - } else if (original instanceof UnknownTableException) { + if (original instanceof UnknownTableException) { throw (UnknownTableException) original; - } else if (original instanceof UnknownDBException) { - throw (UnknownDBException) original; } else if (original instanceof UnknownPartitionException) { throw (UnknownPartitionException) original; } else if (original instanceof InvalidPartitionException) { @@ -6493,17 +6367,16 @@ public boolean isPartitionMarkedForEvent(final String db_name, final String tbl_ } @Override - public List set_ugi(String username, List groupNames) throws MetaException, - TException { + public List set_ugi(String username, List groupNames) throws TException { Collections.addAll(groupNames, username); return groupNames; } @Override public boolean partition_name_has_valid_characters(List part_vals, - boolean throw_exception) throws TException, MetaException { + boolean throw_exception) throws TException { startFunction("partition_name_has_valid_characters"); - boolean ret = false; + boolean ret; Exception ex = null; try { if (throw_exception) { @@ -6514,10 +6387,10 @@ public boolean partition_name_has_valid_characters(List part_vals, partitionValidationPattern); } } catch (Exception e) { + ex = e; if (e instanceof MetaException) { throw (MetaException)e; } else { - ex = e; throw newMetaException(e); } } @@ -6545,9 +6418,7 @@ private void validateFunctionInfo(Function func) throws InvalidObjectException, } @Override - public void create_function(Function func) throws AlreadyExistsException, - InvalidObjectException, MetaException, NoSuchObjectException, - TException { + public void create_function(Function func) throws TException { validateFunctionInfo(func); boolean success = false; RawStore ms = getMS(); @@ -6643,8 +6514,7 @@ public void drop_function(String dbName, String funcName) } @Override - public void alter_function(String dbName, String funcName, Function newFunc) - throws InvalidOperationException, MetaException, TException { + public void alter_function(String dbName, String funcName, Function newFunc) throws TException { validateFunctionInfo(newFunc); boolean success = false; RawStore ms = getMS(); @@ -6701,8 +6571,7 @@ public GetAllFunctionsResponse get_all_functions() } @Override - public Function get_function(String dbName, String funcName) - throws MetaException, NoSuchObjectException, TException { + public Function get_function(String dbName, String funcName) throws TException { startFunction("get_function", ": " + dbName + "." + funcName); RawStore ms = getMS(); @@ -6746,36 +6615,32 @@ public OpenTxnsResponse open_txns(OpenTxnRequest rqst) throws TException { } @Override - public void abort_txn(AbortTxnRequest rqst) throws NoSuchTxnException, TException { + public void abort_txn(AbortTxnRequest rqst) throws TException { getTxnHandler().abortTxn(rqst); } @Override - public void abort_txns(AbortTxnsRequest rqst) throws NoSuchTxnException, TException { + public void abort_txns(AbortTxnsRequest rqst) throws TException { getTxnHandler().abortTxns(rqst); } @Override - public void commit_txn(CommitTxnRequest rqst) - throws NoSuchTxnException, TxnAbortedException, TException { + public void commit_txn(CommitTxnRequest rqst) throws TException { getTxnHandler().commitTxn(rqst); } @Override - public LockResponse lock(LockRequest rqst) - throws NoSuchTxnException, TxnAbortedException, TException { + public LockResponse lock(LockRequest rqst) throws TException { return getTxnHandler().lock(rqst); } @Override - public LockResponse check_lock(CheckLockRequest rqst) - throws NoSuchTxnException, TxnAbortedException, NoSuchLockException, TException { + public LockResponse check_lock(CheckLockRequest rqst) throws TException { return getTxnHandler().checkLock(rqst); } @Override - public void unlock(UnlockRequest rqst) - throws NoSuchLockException, TxnOpenException, TException { + public void unlock(UnlockRequest rqst) throws TException { getTxnHandler().unlock(rqst); } @@ -6785,8 +6650,7 @@ public ShowLocksResponse show_locks(ShowLocksRequest rqst) throws TException { } @Override - public void heartbeat(HeartbeatRequest ids) - throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, TException { + public void heartbeat(HeartbeatRequest ids) throws TException { getTxnHandler().heartbeat(ids); } @@ -6816,14 +6680,13 @@ public void flushCache() throws TException { } @Override - public void add_dynamic_partitions(AddDynamicPartitions rqst) - throws NoSuchTxnException, TxnAbortedException, TException { + public void add_dynamic_partitions(AddDynamicPartitions rqst) throws TException { getTxnHandler().addDynamicPartitions(rqst); } @Override public GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest request) - throws MetaException, TException { + throws TException { incrementCounter("get_principals_in_role"); firePreEvent(new PreAuthorizationCallEvent(this)); @@ -6844,7 +6707,7 @@ public GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleReq @Override public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( - GetRoleGrantsForPrincipalRequest request) throws MetaException, TException { + GetRoleGrantsForPrincipalRequest request) throws TException { incrementCounter("get_role_grants_for_principal"); firePreEvent(new PreAuthorizationCallEvent(this)); @@ -6866,18 +6729,17 @@ public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( } @Override - public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) - throws NoSuchObjectException, MetaException, TException { + public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws TException { String dbName = request.getDbName().toLowerCase(); String tblName = request.getTblName().toLowerCase(); startFunction("get_aggr_stats_for", ": db=" + request.getDbName() + " table=" + request.getTblName()); - List lowerCaseColNames = new ArrayList(request.getColNames().size()); + List lowerCaseColNames = new ArrayList<>(request.getColNames().size()); for (String colName : request.getColNames()) { lowerCaseColNames.add(colName.toLowerCase()); } - List lowerCasePartNames = new ArrayList(request.getPartNames().size()); + List lowerCasePartNames = new ArrayList<>(request.getPartNames().size()); for (String partName : request.getPartNames()) { lowerCasePartNames.add(lowerCaseConvertPartName(partName)); } @@ -6894,9 +6756,7 @@ public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) } @Override - public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) - throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, - TException { + public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TException { boolean ret = true; List csNews = request.getColStats(); if (csNews == null || csNews.isEmpty()) { @@ -6976,7 +6836,7 @@ public NotificationEventResponse get_next_notification(NotificationEventRequest authorizeProxyPrivilege(); } catch (Exception ex) { LOG.error("Not authorized to make the get_next_notification call. You can try to disable " + - HiveConf.ConfVars.METASTORE_EVENT_DB_NOTIFICATION_API_AUTH.varname, ex); + ConfVars.EVENT_DB_NOTIFICATION_API_AUTH.toString(), ex); throw new TException(ex); } @@ -6990,7 +6850,7 @@ public CurrentNotificationEventId get_current_notificationEventId() throws TExce authorizeProxyPrivilege(); } catch (Exception ex) { LOG.error("Not authorized to make the get_current_notificationEventId call. You can try to disable " + - HiveConf.ConfVars.METASTORE_EVENT_DB_NOTIFICATION_API_AUTH.varname, ex); + ConfVars.EVENT_DB_NOTIFICATION_API_AUTH.toString(), ex); throw new TException(ex); } @@ -7005,7 +6865,7 @@ public NotificationEventsCountResponse get_notification_events_count(Notificatio authorizeProxyPrivilege(); } catch (Exception ex) { LOG.error("Not authorized to make the get_notification_events_count call. You can try to disable " + - HiveConf.ConfVars.METASTORE_EVENT_DB_NOTIFICATION_API_AUTH.varname, ex); + ConfVars.EVENT_DB_NOTIFICATION_API_AUTH.toString(), ex); throw new TException(ex); } @@ -7015,17 +6875,18 @@ public NotificationEventsCountResponse get_notification_events_count(Notificatio private void authorizeProxyPrivilege() throws Exception { // Skip the auth in embedded mode or if the auth is disabled - if (!isMetaStoreRemote() || !hiveConf.getBoolVar(HiveConf.ConfVars.METASTORE_EVENT_DB_NOTIFICATION_API_AUTH)) { + if (!isMetaStoreRemote() || + !MetastoreConf.getBoolVar(conf, ConfVars.EVENT_DB_NOTIFICATION_API_AUTH)) { return; } String user = null; try { - user = Utils.getUGI().getShortUserName(); + user = SecurityUtils.getUGI().getShortUserName(); } catch (Exception ex) { LOG.error("Cannot obtain username", ex); throw ex; } - if (!MetaStoreUtils.checkUserHasHostProxyPrivileges(user, hiveConf, getIPAddress())) { + if (!MetaStoreUtils.checkUserHasHostProxyPrivileges(user, conf, getIPAddress())) { throw new MetaException("User " + user + " is not allowed to perform this API call"); } } @@ -7092,9 +6953,8 @@ public GetFileMetadataByExprResult get_file_metadata_by_expr(GetFileMetadataByEx return result; } - private final static Map EMPTY_MAP_FM1 = new HashMap(1); - private final static Map EMPTY_MAP_FM2 = - new HashMap(1); + private final static Map EMPTY_MAP_FM1 = new HashMap<>(1); + private final static Map EMPTY_MAP_FM2 = new HashMap<>(1); @Override public GetFileMetadataResult get_file_metadata(GetFileMetadataRequest req) throws TException { @@ -7182,7 +7042,7 @@ public CacheFileMetadataResult cache_file_metadata( fileMetadataManager.queueCacheMetadata(tbl.getSd().getLocation(), type); success = true; } else { - List partNames = null; + List partNames; if (partName != null) { partNames = Lists.newArrayList(partName); } else if (isAllPart) { @@ -7190,8 +7050,8 @@ public CacheFileMetadataResult cache_file_metadata( } else { throw new MetaException("Table is partitioned"); } - int batchSize = HiveConf.getIntVar( - hiveConf, ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX); + int batchSize = MetastoreConf.getIntVar( + conf, ConfVars.BATCH_RETRIEVE_OBJECTS_MAX); int index = 0; int successCount = 0, failCount = 0; HashSet failFormats = null; @@ -7248,7 +7108,7 @@ public CacheFileMetadataResult cache_file_metadata( } @VisibleForTesting - public void updateMetrics() throws MetaException { + void updateMetrics() throws MetaException { if (databaseCount != null) { tableCount.set(getMS().getTableCount()); partCount.set(getMS().getPartitionCount()); @@ -7257,8 +7117,7 @@ public void updateMetrics() throws MetaException { } @Override - public PrimaryKeysResponse get_primary_keys(PrimaryKeysRequest request) - throws MetaException, NoSuchObjectException, TException { + public PrimaryKeysResponse get_primary_keys(PrimaryKeysRequest request) throws TException { String db_name = request.getDb_name(); String tbl_name = request.getTbl_name(); startTableFunction("get_primary_keys", db_name, tbl_name); @@ -7276,8 +7135,7 @@ public PrimaryKeysResponse get_primary_keys(PrimaryKeysRequest request) } @Override - public ForeignKeysResponse get_foreign_keys(ForeignKeysRequest request) throws MetaException, - NoSuchObjectException, TException { + public ForeignKeysResponse get_foreign_keys(ForeignKeysRequest request) throws TException { String parent_db_name = request.getParent_db_name(); String parent_tbl_name = request.getParent_tbl_name(); String foreign_db_name = request.getForeign_db_name(); @@ -7312,7 +7170,7 @@ private void throwMetaException(Exception e) throws MetaException, @Override public UniqueConstraintsResponse get_unique_constraints(UniqueConstraintsRequest request) - throws MetaException, NoSuchObjectException, TException { + throws TException { String db_name = request.getDb_name(); String tbl_name = request.getTbl_name(); startTableFunction("get_unique_constraints", db_name, tbl_name); @@ -7324,8 +7182,6 @@ public UniqueConstraintsResponse get_unique_constraints(UniqueConstraintsRequest ex = e; if (e instanceof MetaException) { throw (MetaException) e; - } else if (e instanceof NoSuchObjectException) { - throw (NoSuchObjectException) e; } else { throw newMetaException(e); } @@ -7337,7 +7193,7 @@ public UniqueConstraintsResponse get_unique_constraints(UniqueConstraintsRequest @Override public NotNullConstraintsResponse get_not_null_constraints(NotNullConstraintsRequest request) - throws MetaException, NoSuchObjectException, TException { + throws TException { String db_name = request.getDb_name(); String tbl_name = request.getTbl_name(); startTableFunction("get_not_null_constraints", db_name, tbl_name); @@ -7349,8 +7205,6 @@ public NotNullConstraintsResponse get_not_null_constraints(NotNullConstraintsReq ex = e; if (e instanceof MetaException) { throw (MetaException) e; - } else if (e instanceof NoSuchObjectException) { - throw (NoSuchObjectException) e; } else { throw newMetaException(e); } @@ -7361,7 +7215,7 @@ public NotNullConstraintsResponse get_not_null_constraints(NotNullConstraintsReq } @Override - public String get_metastore_db_uuid() throws MetaException, TException { + public String get_metastore_db_uuid() throws TException { try { return getMS().getMetastoreDbUuid(); } catch (MetaException e) { @@ -7500,17 +7354,17 @@ public WMGetTriggersForResourePlanResponse get_triggers_for_resourceplan( } } - public static IHMSHandler newRetryingHMSHandler(IHMSHandler baseHandler, HiveConf hiveConf) + private static IHMSHandler newRetryingHMSHandler(IHMSHandler baseHandler, Configuration conf) throws MetaException { - return newRetryingHMSHandler(baseHandler, hiveConf, false); + return newRetryingHMSHandler(baseHandler, conf, false); } - public static IHMSHandler newRetryingHMSHandler(IHMSHandler baseHandler, HiveConf hiveConf, + private static IHMSHandler newRetryingHMSHandler(IHMSHandler baseHandler, Configuration conf, boolean local) throws MetaException { - return RetryingHMSHandler.getProxy(hiveConf, baseHandler, local); + return RetryingHMSHandler.getProxy(conf, baseHandler, local); } - public static Iface newRetryingHMSHandler(String name, HiveConf conf, boolean local) + static Iface newRetryingHMSHandler(String name, Configuration conf, boolean local) throws MetaException { HMSHandler baseHandler = new HiveMetaStore.HMSHandler(name, conf, false); return RetryingHMSHandler.getProxy(conf, baseHandler, local); @@ -7564,9 +7418,9 @@ public static long renewDelegationToken(String tokenStrForm private int port; @SuppressWarnings("static-access") - public HiveMetastoreCli(Configuration configuration) { + HiveMetastoreCli(Configuration configuration) { super("hivemetastore", true); - this.port = HiveConf.getIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT); + this.port = MetastoreConf.getIntVar(configuration, ConfVars.SERVER_PORT); // -p port OPTIONS.addOption(OptionBuilder @@ -7616,8 +7470,8 @@ public int getPort() { * @param args */ public static void main(String[] args) throws Throwable { - HiveConf.setLoadMetastoreConfig(true); - final HiveConf conf = new HiveConf(HMSHandler.class); + final Configuration conf = MetastoreConf.newMetastoreConf(); + shutdownHookMgr = ShutdownHookManager.get(); HiveMetastoreCli cli = new HiveMetastoreCli(conf); cli.parse(args); @@ -7632,12 +7486,12 @@ public static void main(String[] args) throws Throwable { // NOTE: It is critical to do this here so that log4j is reinitialized // before any of the other core hive classes are loaded try { - LogUtils.initHiveLog4j(); - } catch (LogInitializationException e) { + LogUtils.initHiveLog4j(conf); + } catch (LogUtils.LogInitializationException e) { HMSHandler.LOG.warn(e.getMessage()); } } - HiveStringUtils.startupShutdownMessage(HiveMetaStore.class, args, LOG); + startupShutdownMessage(HiveMetaStore.class, args, LOG); try { String msg = "Starting hive metastore on port " + cli.port; @@ -7653,31 +7507,28 @@ public static void main(String[] args) throws Throwable { } // Add shutdown hook. - ShutdownHookManager.addShutdownHook(new Runnable() { - @Override - public void run() { - String shutdownMsg = "Shutting down hive metastore."; - HMSHandler.LOG.info(shutdownMsg); - if (isCliVerbose) { - System.err.println(shutdownMsg); - } - if (conf.getBoolVar(ConfVars.METASTORE_METRICS)) { - try { - Metrics.shutdown(); - } catch (Exception e) { - LOG.error("error in Metrics deinit: " + e.getClass().getName() + " " + shutdownHookMgr.addShutdownHook(() -> { + String shutdownMsg = "Shutting down hive metastore."; + HMSHandler.LOG.info(shutdownMsg); + if (isCliVerbose) { + System.err.println(shutdownMsg); + } + if (MetastoreConf.getBoolVar(conf, ConfVars.METRICS_ENABLED)) { + try { + Metrics.shutdown(); + } catch (Exception e) { + LOG.error("error in Metrics deinit: " + e.getClass().getName() + " " + e.getMessage(), e); - } } - ThreadPool.shutdown(); } - }); + ThreadPool.shutdown(); + }, 10); // This will only initialize the cache if configured. CachedStore.initSharedCacheAsync(conf); //Start Metrics for Standalone (Remote) Mode - if (conf.getBoolVar(ConfVars.METASTORE_METRICS)) { + if (MetastoreConf.getBoolVar(conf, ConfVars.METRICS_ENABLED)) { try { Metrics.initialize(conf); } catch (Exception e) { @@ -7712,7 +7563,7 @@ public void run() { */ public static void startMetaStore(int port, HadoopThriftAuthBridge bridge) throws Throwable { - startMetaStore(port, bridge, new HiveConf(HMSHandler.class), null, null, null); + startMetaStore(port, bridge, MetastoreConf.newMetastoreConf(), null, null, null); } /** @@ -7723,7 +7574,7 @@ public static void startMetaStore(int port, HadoopThriftAuthBridge bridge) * @throws Throwable */ public static void startMetaStore(int port, HadoopThriftAuthBridge bridge, - HiveConf conf) throws Throwable { + Configuration conf) throws Throwable { startMetaStore(port, bridge, conf, null, null, null); } @@ -7737,27 +7588,27 @@ public static void startMetaStore(int port, HadoopThriftAuthBridge bridge, * @throws Throwable */ public static void startMetaStore(int port, HadoopThriftAuthBridge bridge, - HiveConf conf, Lock startLock, Condition startCondition, + Configuration conf, Lock startLock, Condition startCondition, AtomicBoolean startedServing) throws Throwable { try { isMetaStoreRemote = true; // Server will create new threads up to max as necessary. After an idle // period, it will destroy threads to keep the number of threads in the // pool to min. - long maxMessageSize = conf.getLongVar(HiveConf.ConfVars.METASTORESERVERMAXMESSAGESIZE); - int minWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMINTHREADS); - int maxWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMAXTHREADS); - boolean tcpKeepAlive = conf.getBoolVar(HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE); - boolean useFramedTransport = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT); - boolean useCompactProtocol = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_COMPACT_PROTOCOL); - boolean useSSL = conf.getBoolVar(ConfVars.HIVE_METASTORE_USE_SSL); - useSasl = conf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL); + long maxMessageSize = MetastoreConf.getLongVar(conf, ConfVars.SERVER_MAX_MESSAGE_SIZE); + int minWorkerThreads = MetastoreConf.getIntVar(conf, ConfVars.SERVER_MIN_THREADS); + int maxWorkerThreads = MetastoreConf.getIntVar(conf, ConfVars.SERVER_MAX_THREADS); + boolean tcpKeepAlive = MetastoreConf.getBoolVar(conf, ConfVars.TCP_KEEP_ALIVE); + boolean useFramedTransport = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_FRAMED_TRANSPORT); + boolean useCompactProtocol = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_COMPACT_PROTOCOL); + boolean useSSL = MetastoreConf.getBoolVar(conf, ConfVars.USE_SSL); + useSasl = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_SASL); if (useSasl) { // we are in secure mode. Login using keytab String kerberosName = SecurityUtil - .getServerPrincipal(conf.getVar(ConfVars.METASTORE_KERBEROS_PRINCIPAL), "0.0.0.0"); - String keyTabFile = conf.getVar(ConfVars.METASTORE_KERBEROS_KEYTAB_FILE); + .getServerPrincipal(MetastoreConf.getVar(conf, ConfVars.KERBEROS_PRINCIPAL), "0.0.0.0"); + String keyTabFile = MetastoreConf.getVar(conf, ConfVars.KERBEROS_KEYTAB_FILE); UserGroupInformation.loginUserFromKeytab(kerberosName, keyTabFile); } @@ -7775,7 +7626,7 @@ public static void startMetaStore(int port, HadoopThriftAuthBridge bridge, HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", conf, false); IHMSHandler handler = newRetryingHMSHandler(baseHandler, conf); - TServerSocket serverSocket = null; + TServerSocket serverSocket; if (useSasl) { // we are in secure mode. @@ -7783,9 +7634,9 @@ public static void startMetaStore(int port, HadoopThriftAuthBridge bridge, throw new HiveMetaException("Framed transport is not supported with SASL enabled."); } saslServer = bridge.createServer( - conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE), - conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL), - conf.getVar(HiveConf.ConfVars.METASTORE_CLIENT_KERBEROS_PRINCIPAL)); + MetastoreConf.getVar(conf, ConfVars.KERBEROS_KEYTAB_FILE), + MetastoreConf.getVar(conf, ConfVars.KERBEROS_PRINCIPAL), + MetastoreConf.getVar(conf, ConfVars.CLIENT_KERBEROS_PRINCIPAL)); // Start delegation token manager delegationTokenManager = new MetastoreDelegationTokenManager(); delegationTokenManager.startDelegationTokenSecretManager(conf, baseHandler, HadoopThriftAuthBridge.Server.ServerMode.METASTORE); @@ -7793,45 +7644,45 @@ public static void startMetaStore(int port, HadoopThriftAuthBridge bridge, transFactory = saslServer.createTransportFactory( MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL)); processor = saslServer.wrapProcessor( - new ThriftHiveMetastore.Processor(handler)); + new ThriftHiveMetastore.Processor<>(handler)); LOG.info("Starting DB backed MetaStore Server in Secure Mode"); } else { // we are in unsecure mode. - if (conf.getBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI)) { + if (MetastoreConf.getBoolVar(conf, ConfVars.EXECUTE_SET_UGI)) { transFactory = useFramedTransport ? new ChainedTTransportFactory(new TFramedTransport.Factory(), new TUGIContainingTransport.Factory()) : new TUGIContainingTransport.Factory(); - processor = new TUGIBasedProcessor(handler); + processor = new TUGIBasedProcessor<>(handler); LOG.info("Starting DB backed MetaStore Server with SetUGI enabled"); } else { transFactory = useFramedTransport ? new TFramedTransport.Factory() : new TTransportFactory(); - processor = new TSetIpAddressProcessor(handler); + processor = new TSetIpAddressProcessor<>(handler); LOG.info("Starting DB backed MetaStore Server"); } } if (!useSSL) { - serverSocket = HiveAuthUtils.getServerSocket(null, port); + serverSocket = SecurityUtils.getServerSocket(null, port); } else { - String keyStorePath = conf.getVar(ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PATH).trim(); + String keyStorePath = MetastoreConf.getVar(conf, ConfVars.SSL_KEYSTORE_PATH).trim(); if (keyStorePath.isEmpty()) { - throw new IllegalArgumentException(ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PATH.varname + throw new IllegalArgumentException(ConfVars.SSL_KEYSTORE_PATH.toString() + " Not configured for SSL connection"); } String keyStorePassword = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.SSL_KEYSTORE_PASSWORD); // enable SSL support for HMS - List sslVersionBlacklist = new ArrayList(); - for (String sslVersion : conf.getVar(ConfVars.HIVE_SSL_PROTOCOL_BLACKLIST).split(",")) { + List sslVersionBlacklist = new ArrayList<>(); + for (String sslVersion : MetastoreConf.getVar(conf, ConfVars.SSL_PROTOCOL_BLACKLIST).split(",")) { sslVersionBlacklist.add(sslVersion); } - serverSocket = HiveAuthUtils.getServerSSLSocket(null, port, keyStorePath, + serverSocket = SecurityUtils.getServerSSLSocket(null, port, keyStorePath, keyStorePassword, sslVersionBlacklist); } @@ -7948,7 +7799,7 @@ public void run() { * Start threads outside of the thrift service, such as the compactor threads. * @param conf Hive configuration object */ - private static void startMetaStoreThreads(final HiveConf conf, final Lock startLock, + private static void startMetaStoreThreads(final Configuration conf, final Lock startLock, final Condition startCondition, final AtomicBoolean startedServing) { // A thread is spun up to start these other threads. That's because we can't start them @@ -7996,16 +7847,16 @@ public void run() { t.start(); } - private static void startCompactorInitiator(HiveConf conf) throws Exception { - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON)) { + private static void startCompactorInitiator(Configuration conf) throws Exception { + if (MetastoreConf.getBoolVar(conf, ConfVars.COMPACTOR_INITIATOR_ON)) { MetaStoreThread initiator = instantiateThread("org.apache.hadoop.hive.ql.txn.compactor.Initiator"); initializeAndStartThread(initiator, conf); } } - private static void startCompactorWorkers(HiveConf conf) throws Exception { - int numWorkers = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_WORKER_THREADS); + private static void startCompactorWorkers(Configuration conf) throws Exception { + int numWorkers = MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_WORKER_THREADS); for (int i = 0; i < numWorkers; i++) { MetaStoreThread worker = instantiateThread("org.apache.hadoop.hive.ql.txn.compactor.Worker"); @@ -8013,8 +7864,8 @@ private static void startCompactorWorkers(HiveConf conf) throws Exception { } } - private static void startCompactorCleaner(HiveConf conf) throws Exception { - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON)) { + private static void startCompactorCleaner(Configuration conf) throws Exception { + if (MetastoreConf.getBoolVar(conf, ConfVars.COMPACTOR_INITIATOR_ON)) { MetaStoreThread cleaner = instantiateThread("org.apache.hadoop.hive.ql.txn.compactor.Cleaner"); initializeAndStartThread(cleaner, conf); @@ -8035,7 +7886,7 @@ private static MetaStoreThread instantiateThread(String classname) throws Except private static int nextThreadId = 1000000; - private static void initializeAndStartThread(MetaStoreThread thread, HiveConf conf) throws + private static void initializeAndStartThread(MetaStoreThread thread, Configuration conf) throws MetaException { LOG.info("Starting metastore thread of type " + thread.getClass().getName()); thread.setConf(conf); @@ -8043,9 +7894,8 @@ private static void initializeAndStartThread(MetaStoreThread thread, HiveConf co thread.init(new AtomicBoolean(), new AtomicBoolean()); thread.start(); } - - private static void startHouseKeeperService(HiveConf conf) throws Exception { - if(!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON)) { + private static void startHouseKeeperService(Configuration conf) throws Exception { + if(!MetastoreConf.getBoolVar(conf, ConfVars.COMPACTOR_INITIATOR_ON)) { return; } @@ -8070,17 +7920,60 @@ private static void startOneHouseKeeperService(RunnableConfigurable rc, Configur ThreadPool.getPool().scheduleAtFixedRate(rc, 0, interval, TimeUnit.MILLISECONDS); } - static Map createHandlerMap() { - Map fmHandlers = new HashMap<>(); - for (FileMetadataExprType v : FileMetadataExprType.values()) { - switch (v) { - case ORC_SARG: - fmHandlers.put(v, new OrcFileMetadataHandler()); - break; - default: - throw new AssertionError("Unsupported type " + v); - } - } - return fmHandlers; + /** + * Print a log message for starting up and shutting down + * @param clazz the class of the server + * @param args arguments + * @param LOG the target log object + */ + private static void startupShutdownMessage(Class clazz, String[] args, + final org.slf4j.Logger LOG) { + final String hostname = getHostname(); + final String classname = clazz.getSimpleName(); + LOG.info( + toStartupShutdownString("STARTUP_MSG: ", new String[] { + "Starting " + classname, + " host = " + hostname, + " args = " + Arrays.asList(args), + " version = " + MetastoreVersionInfo.getVersion(), + " classpath = " + System.getProperty("java.class.path"), + " build = " + MetastoreVersionInfo.getUrl() + " -r " + + MetastoreVersionInfo.getRevision() + + "; compiled by '" + MetastoreVersionInfo.getUser() + + "' on " + MetastoreVersionInfo.getDate()} + ) + ); + + shutdownHookMgr.addShutdownHook( + () -> LOG.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{ + "Shutting down " + classname + " at " + hostname})), 0); + + } + + /** + * Return a message for logging. + * @param prefix prefix keyword for the message + * @param msg content of the message + * @return a message for logging + */ + private static String toStartupShutdownString(String prefix, String [] msg) { + StringBuilder b = new StringBuilder(prefix); + b.append("\n/************************************************************"); + for(String s : msg) { + b.append("\n") + .append(prefix) + .append(s); + } + b.append("\n************************************************************/"); + return b.toString(); + } + + /** + * Return hostname without throwing exception. + * @return hostname + */ + private static String getHostname() { + try {return "" + InetAddress.getLocalHost();} + catch(UnknownHostException uhe) {return "" + uhe;} } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java index d3eee8548e..c880a9a417 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java @@ -60,7 +60,7 @@ static boolean updateConnectionURL(Configuration originalConf, Configuration act try { // We always call init because the hook name in the configuration could // have changed. - MetaStoreInit.initConnectionUrlHook(originalConf, updateData); + initConnectionUrlHook(originalConf, updateData); if (updateData.urlHook != null) { if (badUrl != null) { updateData.urlHook.notifyBadConnectionUrl(badUrl); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java index 2671c1fc57..2fcc162a90 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java @@ -80,10 +80,18 @@ public static PartitionExpressionProxy createExpressionProxy(Configuration conf) try { @SuppressWarnings("unchecked") Class clazz = - JavaUtils.getClass(className, PartitionExpressionProxy.class); + JavaUtils.getClass(className, PartitionExpressionProxy.class); return JavaUtils.newInstance( clazz, new Class[0], new Object[0]); } catch (MetaException e) { + if (e.getMessage().matches(".* class not found")) { + // TODO MS-SPLIT For now if we cannot load the default PartitionExpressionForMetastore + // class (since it's from ql) load the DefaultPartitionExpressionProxy, which just throws + // UnsupportedOperationExceptions. This allows existing Hive instances to work but also + // allows us to instantiate the metastore stand alone for testing. Not sure if this is + // the best long term solution. + return new DefaultPartitionExpressionProxy(); + } LOG.error("Error loading PartitionExpressionProxy", e); throw new RuntimeException("Error loading PartitionExpressionProxy: " + e.getMessage()); } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StorageSchemaReader.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StorageSchemaReader.java new file mode 100644 index 0000000000..6251e23991 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StorageSchemaReader.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Table; + +import java.util.List; + +/** + * An interface to implement reading schemas from stored data. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +interface StorageSchemaReader { + /** + * Read the schema from the storage representation of the table. + * @param tbl metastore table object + * @param envContext environment context + * @param conf current configuration file + * @return list of field schemas + * @throws MetaException if the table storage could not be read + */ + List readSchema(Table tbl, EnvironmentContext envContext, Configuration conf) + throws MetaException; +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java index 38b087533b..c0c960458e 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java similarity index 96% rename from metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java index 64f0b96b84..5285b54f0f 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,6 +25,7 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; import org.apache.hadoop.hive.metastore.security.TUGIContainingTransport; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -89,7 +90,7 @@ public boolean process(final TProtocol in, final TProtocol out) throws TExceptio // Store ugi in transport if the rpc is set_ugi if (msg.name.equalsIgnoreCase("set_ugi")){ try { - handleSetUGI(ugiTrans, (set_ugi)fn, msg, in, out); + handleSetUGI(ugiTrans, (ThriftHiveMetastore.Processor.set_ugi)fn, msg, in, out); } catch (TException e) { throw e; } catch (Exception e) { @@ -137,7 +138,7 @@ public Void run() { } private void handleSetUGI(TUGIContainingTransport ugiTrans, - set_ugi fn, TMessage msg, TProtocol iprot, TProtocol oprot) + ThriftHiveMetastore.Processor.set_ugi fn, TMessage msg, TProtocol iprot, TProtocol oprot) throws TException, SecurityException, NoSuchMethodException, IllegalArgumentException, IllegalAccessException, InvocationTargetException{ diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index f8ebc124c4..eb33d1f1c1 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -19,6 +19,8 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.DefaultStorageSchemaReader; +import org.apache.hadoop.hive.metastore.HiveAlterHandler; import org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager; import org.apache.hadoop.hive.metastore.utils.StringUtils; import org.slf4j.Logger; @@ -99,6 +101,7 @@ public String toString() { */ public static final MetastoreConf.ConfVars[] metaVars = { ConfVars.WAREHOUSE, + ConfVars.REPLDIR, ConfVars.THRIFT_URIS, ConfVars.SERVER_PORT, ConfVars.THRIFT_CONNECTION_RETRIES, @@ -239,7 +242,7 @@ public static ConfVars getMetaConf(String name) { "hive.metastore.aggregate.stats.cache.ttl", 600, TimeUnit.SECONDS, "Number of seconds for a cached node to be active in the cache before they become stale."), ALTER_HANDLER("metastore.alter.handler", "hive.metastore.alter.impl", - "org.apache.hadoop.hive.metastore.HiveAlterHandler", + HiveAlterHandler.class.getName(), "Alter handler. For now defaults to the Hive one. Really need a better default option"), ASYNC_LOG_ENABLED("metastore.async.log.enabled", "hive.async.log.enabled", true, "Whether to enable Log4j2's asynchronous logging. Asynchronous logging can give\n" + @@ -279,6 +282,10 @@ public static ConfVars getMetaConf(String name) { CLIENT_CONNECT_RETRY_DELAY("metastore.client.connect.retry.delay", "hive.metastore.client.connect.retry.delay", 1, TimeUnit.SECONDS, "Number of seconds for the client to wait between consecutive connection attempts"), + CLIENT_KERBEROS_PRINCIPAL("metastore.client.kerberos.principal", + "hive.metastore.client.kerberos.principal", + "", // E.g. "hive-metastore/_HOST@EXAMPLE.COM". + "The Kerberos principal associated with the HA cluster of hcat_servers."), CLIENT_SOCKET_LIFETIME("metastore.client.socket.lifetime", "hive.metastore.client.socket.lifetime", 0, TimeUnit.SECONDS, "MetaStore Client socket lifetime in seconds. After this time is exceeded, client\n" + @@ -439,6 +446,10 @@ public static ConfVars getMetaConf(String name) { "hive.metastore.event.message.factory", "org.apache.hadoop.hive.metastore.messaging.json.JSONMessageFactory", "Factory class for making encoding and decoding messages in the events generated."), + EVENT_DB_NOTIFICATION_API_AUTH("metastore.metastore.event.db.notification.api.auth", + "hive.metastore.event.db.notification.api.auth", true, + "Should metastore do authorization against database notification related APIs such as get_next_notification.\n" + + "If set to true, then only the superusers in proxy settings have the permission"), EXECUTE_SET_UGI("metastore.execute.setugi", "hive.metastore.execute.setugi", true, "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using \n" + "the client's reported user and group permissions. Note that this property must be set on \n" + @@ -576,6 +587,8 @@ public static ConfVars getMetaConf(String name) { "Inteval for cmroot cleanup thread."), REPLCMENABLED("metastore.repl.cm.enabled", "hive.repl.cm.enabled", false, "Turn on ChangeManager, so delete files will go to cmrootdir."), + REPLDIR("metastore.repl.rootdir", "hive.repl.rootdir", "/user/hive/repl/", + "HDFS root dir for all replication dumps."), REPL_COPYFILE_MAXNUMFILES("metastore.repl.copyfile.maxnumfiles", "hive.exec.copyfile.maxnumfiles", 1L, "Maximum number of files Hive uses to do sequential HDFS copies between directories." + @@ -584,6 +597,10 @@ public static ConfVars getMetaConf(String name) { "hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/, "Maximum file size (in bytes) that Hive uses to do single HDFS copies between directories." + "Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."), + REPL_DUMPDIR_CLEAN_FREQ("metastore.repl.dumpdir.clean.freq", "hive.repl.dumpdir.clean.freq", + 0, TimeUnit.SECONDS, "Frequency at which timer task runs to purge expired dump dirs."), + REPL_DUMPDIR_TTL("metastore.repl.dumpdir.ttl", "hive.repl.dumpdir.ttl", 7, TimeUnit.DAYS, + "TTL of dump dirs before cleanup."), SCHEMA_INFO_CLASS("metastore.schema.info.class", "hive.metastore.schema.info.class", "org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo", "Fully qualified class name for the metastore schema information class \n" @@ -646,6 +663,10 @@ public static ConfVars getMetaConf(String name) { "The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type."), STATS_DEFAULT_PUBLISHER("metastore.stats.default.publisher", "hive.stats.default.publisher", "", "The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is custom type."), + STORAGE_SCHEMA_READER_IMPL("metastore.storage.schema.reader.impl", NO_SUCH_KEY, + DefaultStorageSchemaReader.class.getName(), + "The class to use to read schemas from storage. It must implement " + + "org.apache.hadoop.hive.metastore.StorageSchemaReader"), STORE_MANAGER_TYPE("datanucleus.storeManagerType", "datanucleus.storeManagerType", "rdbms", "metadata store type"), SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES("metastore.support.special.characters.tablename", "hive.support.special.characters.tablename", true, @@ -920,6 +941,10 @@ public String getHiveName() { return hiveName; } + public Object getDefaultVal() { + return defaultVal; + } + @Override public String toString() { return varname; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/EventCleanerTask.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/EventCleanerTask.java index 230c0d3b16..cc9cc9d08e 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/EventCleanerTask.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/EventCleanerTask.java @@ -18,14 +18,12 @@ package org.apache.hadoop.hive.metastore.events; -import java.util.TimerTask; - import org.apache.hadoop.hive.metastore.IHMSHandler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.metastore.RawStore; -public class EventCleanerTask extends TimerTask{ +public class EventCleanerTask implements Runnable { public static final Logger LOG = LoggerFactory.getLogger(EventCleanerTask.class); private final IHMSHandler handler; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java index e031dbb236..56eb9ed73a 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java @@ -20,6 +20,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hive.metastore.HiveMetaStore; import org.apache.hadoop.hive.metastore.IHMSHandler; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; @@ -100,6 +101,15 @@ public EnvironmentContext getEnvironmentContext() { } /** + * You should use {@link #getIHMSHandler()} instead. + * @return handler. + */ + @Deprecated + public HiveMetaStore.HMSHandler getHandler() { + return (HiveMetaStore.HMSHandler)handler; + } + + /** * @return the handler */ public IHMSHandler getIHMSHandler() { diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java index 65084bd7b6..eefb5056a1 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java @@ -40,8 +40,7 @@ * @return the connection URL * @throws Exception */ - public String getJdoConnectionUrl(Configuration conf) - throws Exception; + String getJdoConnectionUrl(Configuration conf) throws Exception; /** * Alerts this that the connection URL was bad. Can be used to collect stats, @@ -49,5 +48,5 @@ public String getJdoConnectionUrl(Configuration conf) * * @param url */ - public void notifyBadConnectionUrl(String url); + void notifyBadConnectionUrl(String url); } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java similarity index 72% rename from metastore/src/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java index 3c72c9c5a1..46abb4b273 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,28 +17,35 @@ */ package org.apache.hadoop.hive.metastore.repl; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.RunnableConfigurable; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; -import java.util.TimerTask; import java.util.concurrent.TimeUnit; -public class DumpDirCleanerTask extends TimerTask { +public class DumpDirCleanerTask implements RunnableConfigurable { public static final Logger LOG = LoggerFactory.getLogger(DumpDirCleanerTask.class); - private final HiveConf conf; - private final Path dumpRoot; - private final long ttl; + private Configuration conf; + private Path dumpRoot; + private long ttl; - public DumpDirCleanerTask(HiveConf conf) { + @Override + public void setConf(Configuration conf) { this.conf = conf; - dumpRoot = new Path(conf.getVar(HiveConf.ConfVars.REPLDIR)); - ttl = conf.getTimeVar(ConfVars.REPL_DUMPDIR_TTL, TimeUnit.MILLISECONDS); + dumpRoot = new Path(MetastoreConf.getVar(conf, ConfVars.REPLDIR)); + ttl = MetastoreConf.getTimeVar(conf, ConfVars.REPL_DUMPDIR_TTL, TimeUnit.MILLISECONDS); + } + + @Override + public Configuration getConf() { + return conf; } @Override diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/CommonCliOptions.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/CommonCliOptions.java new file mode 100644 index 0000000000..24e4ebea87 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/CommonCliOptions.java @@ -0,0 +1,160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.utils; + +import java.util.Properties; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.GnuParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.OptionBuilder; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.logging.log4j.Level; + +/** + * Reusable code for Hive Cli's. + *

+ * Basic usage is: create an instance (usually a subclass if you want to + * all your own options or processing instructions), parse, and then use + * the resulting information. + *

+ * See org.apache.hadoop.hive.service.HiveServer or + * org.apache.hadoop.hive.metastore.HiveMetaStore + * for examples of use. + * + */ +public class CommonCliOptions { + /** + * Options for parsing the command line. + */ + protected final Options OPTIONS = new Options(); + + protected CommandLine commandLine; + + /** + * The name of this cli. + */ + protected final String cliname; + + private boolean verbose = false; + + /** + * Create an instance with common options (help, verbose, etc...). + * + * @param cliname the name of the command + * @param includeHiveConf include "hiveconf" as an option if true + */ + @SuppressWarnings("static-access") + public CommonCliOptions(String cliname, boolean includeHiveConf) { + this.cliname = cliname; + + // [-v|--verbose] + OPTIONS.addOption(new Option("v", "verbose", false, "Verbose mode")); + + // [-h|--help] + OPTIONS.addOption(new Option("h", "help", false, "Print help information")); + + if (includeHiveConf) { + OPTIONS.addOption(OptionBuilder + .withValueSeparator() + .hasArgs(2) + .withArgName("property=value") + .withLongOpt("hiveconf") + .withDescription("Use value for given property") + .create()); + } + } + + /** + * Add the hiveconf properties to the Java system properties, override + * anything therein. + * + * @return a copy of the properties specified in hiveconf + */ + public Properties addHiveconfToSystemProperties() { + Properties confProps = commandLine.getOptionProperties("hiveconf"); + for (String propKey : confProps.stringPropertyNames()) { + if (verbose) { + System.err.println( + "hiveconf: " + propKey + "=" + confProps.getProperty(propKey)); + } + if (propKey.equalsIgnoreCase("hive.root.logger")) { + splitAndSetLogger(propKey, confProps); + } else { + System.setProperty(propKey, confProps.getProperty(propKey)); + } + } + return confProps; + } + + public static void splitAndSetLogger(final String propKey, final Properties confProps) { + String propVal = confProps.getProperty(propKey); + if (propVal.contains(",")) { + String[] tokens = propVal.split(","); + for (String token : tokens) { + if (Level.getLevel(token) == null) { + System.setProperty("hive.root.logger", token); + } else { + System.setProperty("hive.log.level", token); + } + } + } else { + System.setProperty(propKey, confProps.getProperty(propKey)); + } + } + + /** + * Print usage information for the CLI. + */ + public void printUsage() { + new HelpFormatter().printHelp(cliname, OPTIONS); + } + + /** + * Parse the arguments. + * @param args + */ + public void parse(String[] args) { + try { + commandLine = new GnuParser().parse(OPTIONS, args); + + if (commandLine.hasOption('h')) { + printUsage(); + System.exit(1); + } + if (commandLine.hasOption('v')) { + verbose = true; + } + } catch (ParseException e) { + System.err.println(e.getMessage()); + printUsage(); + System.exit(1); + } + + } + + /** + * Should the client be verbose. + */ + public boolean isVerbose() { + return verbose; + } + +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java index 2dac899d91..c9deccc6c1 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.Trash; +import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.slf4j.Logger; @@ -37,6 +38,12 @@ import java.util.List; public class FileUtils { + private static final PathFilter SNAPSHOT_DIR_PATH_FILTER = new PathFilter() { + @Override + public boolean accept(Path p) { + return ".snapshot".equalsIgnoreCase(p.getName()); + } + }; private static final Logger LOG = LoggerFactory.getLogger(FileUtils.class); public static final PathFilter HIDDEN_FILES_PATH_FILTER = new PathFilter() { @@ -379,4 +386,30 @@ public static boolean equalsFileSystem(FileSystem fs1, FileSystem fs2) { //Once equality has been added in HDFS-9159, we should make use of it return fs1.getUri().equals(fs2.getUri()); } + + /** + * Check if the path contains a subdirectory named '.snapshot' + * @param p path to check + * @param fs filesystem of the path + * @return true if p contains a subdirectory named '.snapshot' + * @throws IOException + */ + public static boolean pathHasSnapshotSubDir(Path p, FileSystem fs) throws IOException { + // Hadoop is missing a public API to check for snapshotable directories. Check with the directory name + // until a more appropriate API is provided by HDFS-12257. + final FileStatus[] statuses = fs.listStatus(p, FileUtils.SNAPSHOT_DIR_PATH_FILTER); + return statuses != null && statuses.length != 0; + } + + public static void makeDir(Path path, Configuration conf) throws MetaException { + FileSystem fs; + try { + fs = path.getFileSystem(conf); + if (!fs.exists(path)) { + fs.mkdirs(path); + } + } catch (IOException e) { + throw new MetaException("Unable to : " + path); + } + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java index ecbddc3e7f..f494a8eef8 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java @@ -18,10 +18,20 @@ package org.apache.hadoop.hive.metastore.utils; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Joiner; +import com.google.common.base.Objects; +import com.google.common.base.Predicate; +import com.google.common.collect.Iterables; +import org.apache.commons.lang.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -39,6 +49,7 @@ import java.net.URI; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -218,4 +229,166 @@ private static DistributedFileSystem ensureDfs(FileSystem fs) { } return (DistributedFileSystem)fs; } + + public static class HadoopFileStatus { + + private final FileStatus fileStatus; + private final AclStatus aclStatus; + + public HadoopFileStatus(Configuration conf, FileSystem fs, Path file) throws IOException { + + FileStatus fileStatus = fs.getFileStatus(file); + AclStatus aclStatus = null; + if (Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true")) { + //Attempt extended Acl operations only if its enabled, but don't fail the operation regardless. + try { + aclStatus = fs.getAclStatus(file); + } catch (Exception e) { + LOG.info("Skipping ACL inheritance: File system for path " + file + " " + + "does not support ACLs but dfs.namenode.acls.enabled is set to true. "); + LOG.debug("The details are: " + e, e); + } + }this.fileStatus = fileStatus; + this.aclStatus = aclStatus; + } + + public FileStatus getFileStatus() { + return fileStatus; + } + + List getAclEntries() { + return aclStatus == null ? null : Collections.unmodifiableList(aclStatus.getEntries()); + } + + @VisibleForTesting + AclStatus getAclStatus() { + return this.aclStatus; + } + } + + /** + * Copy the permissions, group, and ACLs from a source {@link HadoopFileStatus} to a target {@link Path}. This method + * will only log a warning if permissions cannot be set, no exception will be thrown. + * + * @param conf the {@link Configuration} used when setting permissions and ACLs + * @param sourceStatus the source {@link HadoopFileStatus} to copy permissions and ACLs from + * @param targetGroup the group of the target {@link Path}, if this is set and it is equal to the source group, an + * extra set group operation is avoided + * @param fs the {@link FileSystem} that contains the target {@link Path} + * @param target the {@link Path} to copy permissions, group, and ACLs to + * @param recursion recursively set permissions and ACLs on the target {@link Path} + */ + public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus, + String targetGroup, FileSystem fs, Path target, boolean recursion) { + setFullFileStatus(conf, sourceStatus, targetGroup, fs, target, recursion, recursion ? new FsShell() : null); + } + + @VisibleForTesting + static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus, + String targetGroup, FileSystem fs, Path target, boolean recursion, FsShell fsShell) { + try { + FileStatus fStatus = sourceStatus.getFileStatus(); + String group = fStatus.getGroup(); + boolean aclEnabled = Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true"); + FsPermission sourcePerm = fStatus.getPermission(); + List aclEntries = null; + if (aclEnabled) { + if (sourceStatus.getAclEntries() != null) { + LOG.trace(sourceStatus.getAclStatus().toString()); + aclEntries = new ArrayList<>(sourceStatus.getAclEntries()); + removeBaseAclEntries(aclEntries); + + //the ACL api's also expect the tradition user/group/other permission in the form of ACL + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, sourcePerm.getUserAction())); + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, sourcePerm.getGroupAction())); + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, sourcePerm.getOtherAction())); + } + } + + if (recursion) { + //use FsShell to change group, permissions, and extended ACL's recursively + fsShell.setConf(conf); + //If there is no group of a file, no need to call chgrp + if (group != null && !group.isEmpty()) { + run(fsShell, new String[]{"-chgrp", "-R", group, target.toString()}); + } + if (aclEnabled) { + if (null != aclEntries) { + //Attempt extended Acl operations only if its enabled, 8791but don't fail the operation regardless. + try { + //construct the -setfacl command + String aclEntry = Joiner.on(",").join(aclEntries); + run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()}); + + } catch (Exception e) { + LOG.info("Skipping ACL inheritance: File system for path " + target + " " + + "does not support ACLs but dfs.namenode.acls.enabled is set to true. "); + LOG.debug("The details are: " + e, e); + } + } + } else { + String permission = Integer.toString(sourcePerm.toShort(), 8); + run(fsShell, new String[]{"-chmod", "-R", permission, target.toString()}); + } + } else { + if (group != null && !group.isEmpty()) { + if (targetGroup == null || + !group.equals(targetGroup)) { + fs.setOwner(target, null, group); + } + } + if (aclEnabled) { + if (null != aclEntries) { + fs.setAcl(target, aclEntries); + } + } else { + fs.setPermission(target, sourcePerm); + } + } + } catch (Exception e) { + LOG.warn( + "Unable to inherit permissions for file " + target + " from file " + sourceStatus.getFileStatus().getPath(), + e.getMessage()); + LOG.debug("Exception while inheriting permissions", e); + } + } + + /** + * Removes basic permission acls (unamed acls) from the list of acl entries + * @param entries acl entries to remove from. + */ + private static void removeBaseAclEntries(List entries) { + Iterables.removeIf(entries, new Predicate() { + @Override + public boolean apply(AclEntry input) { + if (input.getName() == null) { + return true; + } + return false; + } + }); + } + + /** + * Create a new AclEntry with scope, type and permission (no name). + * + * @param scope + * AclEntryScope scope of the ACL entry + * @param type + * AclEntryType ACL entry type + * @param permission + * FsAction set of permissions in the ACL entry + * @return AclEntry new AclEntry + */ + private static AclEntry newAclEntry(AclEntryScope scope, AclEntryType type, + FsAction permission) { + return new AclEntry.Builder().setScope(scope).setType(type) + .setPermission(permission).build(); + } + + private static void run(FsShell shell, String[] command) throws Exception { + LOG.debug(ArrayUtils.toString(command)); + int retval = shell.run(command); + LOG.debug("Return value is :" + retval); + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/LogUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/LogUtils.java new file mode 100644 index 0000000000..06fe6cb18f --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/LogUtils.java @@ -0,0 +1,140 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.utils; + +import java.io.File; +import java.net.URL; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.config.Configurator; +import org.apache.logging.log4j.core.impl.Log4jContextFactory; +import org.apache.logging.log4j.spi.DefaultThreadContextMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Utilities common to logging operations. + */ +public class LogUtils { + + private static final String HIVE_L4J = "hive-log4j2.properties"; + private static final Logger l4j = LoggerFactory.getLogger(LogUtils.class); + + @SuppressWarnings("serial") + public static class LogInitializationException extends Exception { + LogInitializationException(String msg) { + super(msg); + } + } + + /** + * Initialize log4j. + * + * @return an message suitable for display to the user + * @throws LogInitializationException if log4j fails to initialize correctly + */ + public static String initHiveLog4j(Configuration conf) + throws LogInitializationException { + return initHiveLog4jCommon(conf, MetastoreConf.ConfVars.LOG4J_FILE); + } + + private static String initHiveLog4jCommon(Configuration conf, ConfVars confVarName) + throws LogInitializationException { + if (MetastoreConf.getVar(conf, confVarName).equals("")) { + // if log4j configuration file not set, or could not found, use default setting + return initHiveLog4jDefault(conf, "", confVarName); + } else { + // if log4j configuration file found successfully, use HiveConf property value + String log4jFileName = MetastoreConf.getVar(conf, confVarName); + File log4jConfigFile = new File(log4jFileName); + boolean fileExists = log4jConfigFile.exists(); + if (!fileExists) { + // if property specified file not found in local file system + // use default setting + return initHiveLog4jDefault( + conf, "Not able to find conf file: " + log4jConfigFile, confVarName); + } else { + // property speficied file found in local file system + // use the specified file + final boolean async = checkAndSetAsyncLogging(conf); + // required for MDC based routing appender so that child threads can inherit the MDC context + System.setProperty(DefaultThreadContextMap.INHERITABLE_MAP, "true"); + Configurator.initialize(null, log4jFileName); + logConfigLocation(); + return "Logging initialized using configuration in " + log4jConfigFile + " Async: " + async; + } + } + } + + private static boolean checkAndSetAsyncLogging(final Configuration conf) { + final boolean asyncLogging = MetastoreConf.getBoolVar(conf, ConfVars.ASYNC_LOG_ENABLED); + if (asyncLogging) { + System.setProperty("Log4jContextSelector", + "org.apache.logging.log4j.core.async.AsyncLoggerContextSelector"); + // default is ClassLoaderContextSelector which is created during automatic logging + // initialization in a static initialization block. + // Changing ContextSelector at runtime requires creating new context factory which will + // internally create new context selector based on system property. + LogManager.setFactory(new Log4jContextFactory()); + } + return asyncLogging; + } + + private static String initHiveLog4jDefault(Configuration conf, String logMessage, ConfVars confVarName) + throws LogInitializationException { + URL hive_l4j = null; + switch (confVarName) { + case LOG4J_FILE: + hive_l4j = LogUtils.class.getClassLoader().getResource(HIVE_L4J); + break; + default: + break; + } + if (hive_l4j != null) { + final boolean async = checkAndSetAsyncLogging(conf); + System.setProperty(DefaultThreadContextMap.INHERITABLE_MAP, "true"); + Configurator.initialize(null, hive_l4j.toString()); + logConfigLocation(); + return (logMessage + "\n" + "Logging initialized using configuration in " + hive_l4j + + " Async: " + async); + } else { + throw new LogInitializationException( + logMessage + "Unable to initialize logging using " + + LogUtils.HIVE_L4J + ", not found on CLASSPATH!"); + } + } + + private static void logConfigLocation() throws LogInitializationException { + // Log a warning if hive-default.xml is found on the classpath + if (MetastoreConf.getHiveDefaultLocation() != null) { + l4j.warn("DEPRECATED: Ignoring hive-default.xml found on the CLASSPATH at " + + MetastoreConf.getHiveDefaultLocation().getPath()); + } + // Look for hive-site.xml on the CLASSPATH and log its location if found. + if (MetastoreConf.getHiveSiteLocation() == null) { + l4j.warn("hive-site.xml not found on CLASSPATH"); + } else { + l4j.debug("Using hive-site.xml found on CLASSPATH at " + + MetastoreConf.getHiveSiteLocation().getPath()); + } + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java index 50e4244857..bf25e50781 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java @@ -22,9 +22,12 @@ import com.google.common.collect.Maps; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.commons.collections.ListUtils; -import org.apache.commons.lang.*; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.metastore.ColumnType; import org.apache.hadoop.hive.metastore.TableType; @@ -35,6 +38,7 @@ import org.apache.hadoop.hive.metastore.api.Decimal; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.Partition; @@ -45,26 +49,37 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator; import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory; +import org.apache.hadoop.hive.metastore.columnstats.merge.ColumnStatsMerger; +import org.apache.hadoop.hive.metastore.columnstats.merge.ColumnStatsMergerFactory; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.security.SaslRpcServer; +import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.util.MachineList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.annotation.Nullable; +import java.io.File; +import java.lang.reflect.InvocationTargetException; import java.math.BigDecimal; import java.math.BigInteger; +import java.net.URL; +import java.net.URLClassLoader; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.security.MessageDigest; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.SortedMap; import java.util.SortedSet; import java.util.TreeMap; @@ -206,12 +221,12 @@ public static MetaException newMetaException(String errorMessage, Exception e) { singleObj.add(obj); ColumnStatistics singleCS = new ColumnStatistics(css.getStatsDesc(), singleObj); if (!map.containsKey(obj.getColName())) { - map.put(obj.getColName(), new ArrayList()); + map.put(obj.getColName(), new ArrayList<>()); } map.get(obj.getColName()).add(singleCS); } } - return MetaStoreUtils.aggrPartitionStats(map,dbName,tableName,partNames,colNames,useDensityFunctionForNDVEstimation, ndvTuner); + return aggrPartitionStats(map,dbName,tableName,partNames,colNames,useDensityFunctionForNDVEstimation, ndvTuner); } public static List aggrPartitionStats( @@ -401,7 +416,7 @@ public static String getPartitionValWithInvalidCharacter(List partVals, * if it doesn't match the pattern. */ public static boolean validateName(String name, Configuration conf) { - Pattern tpat = null; + Pattern tpat; String allowedCharacters = "\\w_"; if (conf != null && MetastoreConf.getBoolVar(conf, @@ -490,7 +505,7 @@ public static boolean requireCalStats(Configuration hiveConf, Partition oldPart, return false; } - if (MetaStoreUtils.isView(tbl)) { + if (isView(tbl)) { return false; } @@ -602,7 +617,7 @@ public static boolean updateTableStatsFast(Table tbl, FileStatus[] fileStatus, b params == null || !containsAllFastStats(params)) { if (params == null) { - params = new HashMap(); + params = new HashMap<>(); } if (!newDir) { // The table location already exists and may contain data. @@ -701,7 +716,7 @@ public static boolean updatePartitionStatsFast(PartitionSpecProxy.PartitionItera params == null || !containsAllFastStats(params)) { if (params == null) { - params = new HashMap(); + params = new HashMap<>(); } if (!madeDir) { // The partition location already existed and may contain data. Lets try to @@ -728,7 +743,7 @@ public static boolean columnsIncludedByNameType(List oldCols, return false; } - Map columnNameTypePairMap = new HashMap(newCols.size()); + Map columnNameTypePairMap = new HashMap<>(newCols.size()); for (FieldSchema newCol : newCols) { columnNameTypePairMap.put(newCol.getName().toLowerCase(), newCol.getType()); } @@ -747,4 +762,288 @@ public static boolean isInsertOnlyTableParam(Map params) { String transactionalProp = params.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES); return (transactionalProp != null && "insert_only".equalsIgnoreCase(transactionalProp)); } + + /** + * create listener instances as per the configuration. + * + * @param clazz Class of the listener + * @param conf configuration object + * @param listenerImplList Implementation class name + * @return instance of the listener + * @throws MetaException if there is any failure instantiating the class + */ + public static List getMetaStoreListeners(Class clazz, + Configuration conf, String listenerImplList) throws MetaException { + List listeners = new ArrayList(); + + if (StringUtils.isBlank(listenerImplList)) { + return listeners; + } + + String[] listenerImpls = listenerImplList.split(","); + for (String listenerImpl : listenerImpls) { + try { + T listener = (T) Class.forName( + listenerImpl.trim(), true, JavaUtils.getClassLoader()).getConstructor( + Configuration.class).newInstance(conf); + listeners.add(listener); + } catch (InvocationTargetException ie) { + throw new MetaException("Failed to instantiate listener named: "+ + listenerImpl + ", reason: " + ie.getCause()); + } catch (Exception e) { + throw new MetaException("Failed to instantiate listener named: "+ + listenerImpl + ", reason: " + e); + } + } + + return listeners; + } + + public static String validateSkewedColNames(List cols) { + if (CollectionUtils.isEmpty(cols)) { + return null; + } + for (String col : cols) { + if (!validateColumnName(col)) { + return col; + } + } + return null; + } + + public static String validateSkewedColNamesSubsetCol(List skewedColNames, + List cols) { + if (CollectionUtils.isEmpty(skewedColNames)) { + return null; + } + List colNames = new ArrayList<>(cols.size()); + for (FieldSchema fieldSchema : cols) { + colNames.add(fieldSchema.getName()); + } + // make a copy + List copySkewedColNames = new ArrayList<>(skewedColNames); + // remove valid columns + copySkewedColNames.removeAll(colNames); + if (copySkewedColNames.isEmpty()) { + return null; + } + return copySkewedColNames.toString(); + } + + public static boolean isNonNativeTable(Table table) { + if (table == null || table.getParameters() == null) { + return false; + } + return (table.getParameters().get(hive_metastoreConstants.META_TABLE_STORAGE) != null); + } + + public static boolean isIndexTable(Table table) { + if (table == null) { + return false; + } + return TableType.INDEX_TABLE.toString().equals(table.getTableType()); + } + + /** + * Given a list of partition columns and a partial mapping from + * some partition columns to values the function returns the values + * for the column. + * @param partCols the list of table partition columns + * @param partSpec the partial mapping from partition column to values + * @return list of values of for given partition columns, any missing + * values in partSpec is replaced by an empty string + */ + public static List getPvals(List partCols, + Map partSpec) { + List pvals = new ArrayList<>(partCols.size()); + for (FieldSchema field : partCols) { + String val = StringUtils.defaultString(partSpec.get(field.getName())); + pvals.add(val); + } + return pvals; + } + + /** + * @param schema1: The first schema to be compared + * @param schema2: The second schema to be compared + * @return true if the two schemas are the same else false + * for comparing a field we ignore the comment it has + */ + public static boolean compareFieldColumns(List schema1, List schema2) { + if (schema1.size() != schema2.size()) { + return false; + } + Iterator its1 = schema1.iterator(); + Iterator its2 = schema2.iterator(); + while (its1.hasNext()) { + FieldSchema f1 = its1.next(); + FieldSchema f2 = its2.next(); + // The default equals provided by thrift compares the comments too for + // equality, thus we need to compare the relevant fields here. + if (!StringUtils.equals(f1.getName(), f2.getName()) || + !StringUtils.equals(f1.getType(), f2.getType())) { + return false; + } + } + return true; + } + + public static boolean isArchived(Partition part) { + Map params = part.getParameters(); + return "TRUE".equalsIgnoreCase(params.get(hive_metastoreConstants.IS_ARCHIVED)); + } + + public static Path getOriginalLocation(Partition part) { + Map params = part.getParameters(); + assert(isArchived(part)); + String originalLocation = params.get(hive_metastoreConstants.ORIGINAL_LOCATION); + assert( originalLocation != null); + + return new Path(originalLocation); + } + + private static String ARCHIVING_LEVEL = "archiving_level"; + public static int getArchivingLevel(Partition part) throws MetaException { + if (!isArchived(part)) { + throw new MetaException("Getting level of unarchived partition"); + } + + String lv = part.getParameters().get(ARCHIVING_LEVEL); + if (lv != null) { + return Integer.parseInt(lv); + } + // partitions archived before introducing multiple archiving + return part.getValues().size(); + } + + public static boolean partitionNameHasValidCharacters(List partVals, + Pattern partitionValidationPattern) { + return getPartitionValWithInvalidCharacter(partVals, partitionValidationPattern) == null; + } + + // this function will merge csOld into csNew. + public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld) + throws InvalidObjectException { + List list = new ArrayList<>(); + if (csNew.getStatsObj().size() != csOld.getStatsObjSize()) { + // Some of the columns' stats are missing + // This implies partition schema has changed. We will merge columns + // present in both, overwrite stats for columns absent in metastore and + // leave alone columns stats missing from stats task. This last case may + // leave stats in stale state. This will be addressed later. + LOG.debug("New ColumnStats size is {}, but old ColumnStats size is {}", + csNew.getStatsObj().size(), csOld.getStatsObjSize()); + } + // In this case, we have to find out which columns can be merged. + Map map = new HashMap<>(); + // We build a hash map from colName to object for old ColumnStats. + for (ColumnStatisticsObj obj : csOld.getStatsObj()) { + map.put(obj.getColName(), obj); + } + for (int index = 0; index < csNew.getStatsObj().size(); index++) { + ColumnStatisticsObj statsObjNew = csNew.getStatsObj().get(index); + ColumnStatisticsObj statsObjOld = map.get(statsObjNew.getColName()); + if (statsObjOld != null) { + // If statsObjOld is found, we can merge. + ColumnStatsMerger merger = ColumnStatsMergerFactory.getColumnStatsMerger(statsObjNew, + statsObjOld); + merger.merge(statsObjNew, statsObjOld); + } + list.add(statsObjNew); + } + csNew.setStatsObj(list); + } + + /** + * Read and return the meta store Sasl configuration. Currently it uses the default + * Hadoop SASL configuration and can be configured using "hadoop.rpc.protection" + * HADOOP-10211, made a backward incompatible change due to which this call doesn't + * work with Hadoop 2.4.0 and later. + * @param conf + * @return The SASL configuration + */ + public static Map getMetaStoreSaslProperties(Configuration conf, boolean useSSL) { + // As of now Hive Meta Store uses the same configuration as Hadoop SASL configuration + + // If SSL is enabled, override the given value of "hadoop.rpc.protection" and set it to "authentication" + // This disables any encryption provided by SASL, since SSL already provides it + String hadoopRpcProtectionVal = conf.get(CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION); + String hadoopRpcProtectionAuth = SaslRpcServer.QualityOfProtection.AUTHENTICATION.toString(); + + if (useSSL && hadoopRpcProtectionVal != null && !hadoopRpcProtectionVal.equals(hadoopRpcProtectionAuth)) { + LOG.warn("Overriding value of " + CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION + " setting it from " + + hadoopRpcProtectionVal + " to " + hadoopRpcProtectionAuth + " because SSL is enabled"); + conf.set(CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION, hadoopRpcProtectionAuth); + } + return HadoopThriftAuthBridge.getBridge().getHadoopSaslProperties(conf); + } + + /** + * Add new elements to the classpath. + * + * @param newPaths + * Array of classpath elements + */ + public static ClassLoader addToClassPath(ClassLoader cloader, String[] newPaths) throws Exception { + URLClassLoader loader = (URLClassLoader) cloader; + List curPath = Arrays.asList(loader.getURLs()); + ArrayList newPath = new ArrayList<>(curPath.size()); + + // get a list with the current classpath components + for (URL onePath : curPath) { + newPath.add(onePath); + } + curPath = newPath; + + for (String onestr : newPaths) { + URL oneurl = urlFromPathString(onestr); + if (oneurl != null && !curPath.contains(oneurl)) { + curPath.add(oneurl); + } + } + + return new URLClassLoader(curPath.toArray(new URL[0]), loader); + } + + /** + * Create a URL from a string representing a path to a local file. + * The path string can be just a path, or can start with file:/, file:/// + * @param onestr path string + * @return + */ + private static URL urlFromPathString(String onestr) { + URL oneurl = null; + try { + if (onestr.startsWith("file:/")) { + oneurl = new URL(onestr); + } else { + oneurl = new File(onestr).toURL(); + } + } catch (Exception err) { + LOG.error("Bad URL " + onestr + ", ignoring path"); + } + return oneurl; + } + + /** + * Verify if the user is allowed to make DB notification related calls. + * Only the superusers defined in the Hadoop proxy user settings have the permission. + * + * @param user the short user name + * @param conf that contains the proxy user settings + * @return if the user has the permission + */ + public static boolean checkUserHasHostProxyPrivileges(String user, Configuration conf, String ipAddress) { + DefaultImpersonationProvider sip = ProxyUsers.getDefaultImpersonationProvider(); + // Just need to initialize the ProxyUsers for the first time, given that the conf will not change on the fly + if (sip == null) { + ProxyUsers.refreshSuperUserGroupsConfiguration(conf); + sip = ProxyUsers.getDefaultImpersonationProvider(); + } + Map> proxyHosts = sip.getProxyHosts(); + Collection hostEntries = proxyHosts.get(sip.getProxySuperuserIpConfKey(user)); + MachineList machineList = new MachineList(hostEntries); + ipAddress = (ipAddress == null) ? StringUtils.EMPTY : ipAddress; + return machineList.includes(ipAddress); + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java index b05c995ba0..41a18cb19c 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java @@ -34,14 +34,28 @@ import org.apache.zookeeper.client.ZooKeeperSaslClient; import javax.security.auth.login.AppConfigurationEntry; +import org.apache.thrift.transport.TSSLTransportFactory; +import org.apache.thrift.transport.TServerSocket; +import org.apache.thrift.transport.TTransportException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.ssl.SSLServerSocket; import javax.security.auth.login.LoginException; import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag; import java.io.IOException; import java.util.HashMap; import java.util.Map; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; public class SecurityUtils { + private static final Logger LOG = LoggerFactory.getLogger(SecurityUtils.class); + public static UserGroupInformation getUGI() throws LoginException, IOException { String doAs = System.getenv("HADOOP_USER_NAME"); if (doAs != null && doAs.length() > 0) { @@ -209,4 +223,65 @@ public static String getTokenStoreClassName(Configuration conf) { return tokenStoreClass; } } + + + /** + * @return the user name set in hadoop.job.ugi param or the current user from System + * @throws IOException if underlying Hadoop call throws LoginException + */ + public static String getUser() throws IOException { + try { + UserGroupInformation ugi = getUGI(); + return ugi.getUserName(); + } catch (LoginException le) { + throw new IOException(le); + } + } + + public static TServerSocket getServerSocket(String hiveHost, int portNum) throws TTransportException { + InetSocketAddress serverAddress; + if (hiveHost == null || hiveHost.isEmpty()) { + // Wildcard bind + serverAddress = new InetSocketAddress(portNum); + } else { + serverAddress = new InetSocketAddress(hiveHost, portNum); + } + return new TServerSocket(serverAddress); + } + + public static TServerSocket getServerSSLSocket(String hiveHost, int portNum, String keyStorePath, + String keyStorePassWord, List sslVersionBlacklist) throws TTransportException, + UnknownHostException { + TSSLTransportFactory.TSSLTransportParameters params = + new TSSLTransportFactory.TSSLTransportParameters(); + params.setKeyStore(keyStorePath, keyStorePassWord); + InetSocketAddress serverAddress; + if (hiveHost == null || hiveHost.isEmpty()) { + // Wildcard bind + serverAddress = new InetSocketAddress(portNum); + } else { + serverAddress = new InetSocketAddress(hiveHost, portNum); + } + TServerSocket thriftServerSocket = + TSSLTransportFactory.getServerSocket(portNum, 0, serverAddress.getAddress(), params); + if (thriftServerSocket.getServerSocket() instanceof SSLServerSocket) { + List sslVersionBlacklistLocal = new ArrayList<>(); + for (String sslVersion : sslVersionBlacklist) { + sslVersionBlacklistLocal.add(sslVersion.trim().toLowerCase()); + } + SSLServerSocket sslServerSocket = (SSLServerSocket) thriftServerSocket.getServerSocket(); + List enabledProtocols = new ArrayList<>(); + for (String protocol : sslServerSocket.getEnabledProtocols()) { + if (sslVersionBlacklistLocal.contains(protocol.toLowerCase())) { + LOG.debug("Disabling SSL Protocol: " + protocol); + } else { + enabledProtocols.add(protocol); + } + } + sslServerSocket.setEnabledProtocols(enabledProtocols.toArray(new String[0])); + LOG.info("SSL Server Socket Enabled Protocols: " + + Arrays.toString(sslServerSocket.getEnabledProtocols())); + } + return thriftServerSocket; + } } diff --git metastore/src/test/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java similarity index 99% rename from metastore/src/test/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java index 64cdfe063b..b1cd7db1aa 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java similarity index 99% rename from metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 84b70d88b0..3c61ff6772 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,10 +23,7 @@ import java.util.List; import java.util.Map; -import org.junit.Assert; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; @@ -68,8 +65,10 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; +import org.junit.Assert; /** * @@ -92,7 +91,7 @@ public Configuration getConf() { @Override public void setConf(Configuration arg0) { String expected = DummyJdoConnectionUrlHook.newUrl; - String actual = arg0.get(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname); + String actual = MetastoreConf.getVar(arg0, MetastoreConf.ConfVars.CONNECTURLKEY); Assert.assertEquals("The expected URL used by JDO to connect to the metastore: " + expected + " did not match the actual value when the Raw Store was initialized: " + actual, diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java similarity index 72% rename from metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java index f581c7d258..f1a08dd9e6 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,9 @@ package org.apache.hadoop.hive.metastore; -import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.junit.Test; public class TestHiveMetastoreCli { @@ -26,14 +28,14 @@ @Test public void testDefaultCliPortValue() { - HiveConf configuration = new HiveConf(); + Configuration configuration = MetastoreConf.newMetastoreConf(); HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration); - assert (cli.getPort() == HiveConf.getIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT)); + assert (cli.getPort() == MetastoreConf.getIntVar(configuration, ConfVars.SERVER_PORT)); } @Test public void testOverriddenCliPortValue() { - HiveConf configuration = new HiveConf(); + Configuration configuration = MetastoreConf.newMetastoreConf(); HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration); cli.parse(TestHiveMetastoreCli.CLI_ARGUMENTS); @@ -42,8 +44,8 @@ public void testOverriddenCliPortValue() { @Test public void testOverriddenMetastoreServerPortValue() { - HiveConf configuration = new HiveConf(); - HiveConf.setIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT, 12345); + Configuration configuration = MetastoreConf.newMetastoreConf(); + MetastoreConf.setLongVar(configuration, ConfVars.SERVER_PORT, 12345); HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration); @@ -52,8 +54,8 @@ public void testOverriddenMetastoreServerPortValue() { @Test public void testCliOverridesConfiguration() { - HiveConf configuration = new HiveConf(); - HiveConf.setIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT, 12345); + Configuration configuration = MetastoreConf.newMetastoreConf(); + MetastoreConf.setLongVar(configuration, ConfVars.SERVER_PORT, 12345); HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration); cli.parse(CLI_ARGUMENTS); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreConnectionUrlHook.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreConnectionUrlHook.java similarity index 56% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreConnectionUrlHook.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreConnectionUrlHook.java index 91a28884bf..cfcd959ad9 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreConnectionUrlHook.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreConnectionUrlHook.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,45 +18,28 @@ package org.apache.hadoop.hive.metastore; -import junit.framework.TestCase; - -import org.apache.hadoop.hive.cli.CliSessionState; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.junit.Test; /** * TestMetaStoreConnectionUrlHook * Verifies that when an instance of an implementation of RawStore is initialized, the connection * URL has already been updated by any metastore connect URL hooks. */ -public class TestMetaStoreConnectionUrlHook extends TestCase { - private HiveConf hiveConf; - - @Override - protected void setUp() throws Exception { - - super.setUp(); - } - - @Override - protected void tearDown() throws Exception { - super.tearDown(); - } +public class TestMetaStoreConnectionUrlHook { + @Test public void testUrlHook() throws Exception { - hiveConf = new HiveConf(this.getClass()); - hiveConf.setVar(HiveConf.ConfVars.METASTORECONNECTURLHOOK, - DummyJdoConnectionUrlHook.class.getName()); - hiveConf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, - DummyJdoConnectionUrlHook.initialUrl); - hiveConf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, - DummyRawStoreForJdoConnection.class.getName()); - hiveConf.setBoolean("hive.metastore.checkForDefaultDb", true); - SessionState.start(new CliSessionState(hiveConf)); + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setVar(conf, ConfVars.CONNECTURLHOOK, DummyJdoConnectionUrlHook.class.getName()); + MetastoreConf.setVar(conf, ConfVars.CONNECTURLKEY, DummyJdoConnectionUrlHook.initialUrl); + MetastoreConf.setVar(conf, ConfVars.RAW_STORE_IMPL, DummyRawStoreForJdoConnection.class.getName()); // Instantiating the HMSHandler with hive.metastore.checkForDefaultDb will cause it to // initialize an instance of the DummyRawStoreForJdoConnection HiveMetaStore.HMSHandler hms = new HiveMetaStore.HMSHandler( - "test_metastore_connection_url_hook_hms_handler", hiveConf); + "test_metastore_connection_url_hook_hms_handler", conf); } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index 5c73d256c3..6a39538045 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -23,6 +23,7 @@ import com.google.common.collect.ImmutableList; import org.apache.hadoop.hive.metastore.ObjectStore.RetryingExecutor; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; @@ -32,6 +33,9 @@ import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; +import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.Role; @@ -41,8 +45,11 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.messaging.EventMessage; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; +import org.apache.hadoop.hive.metastore.model.MNotificationLog; +import org.apache.hadoop.hive.metastore.model.MNotificationNextId; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; import org.junit.Assert; import org.junit.Assume; @@ -59,6 +66,12 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; public class TestObjectStore { private ObjectStore objectStore = null; @@ -458,5 +471,139 @@ public void testNonConfDatanucleusValueSet() { Assert.assertEquals(value, objectStore.getProp().getProperty(key)); Assert.assertNull(objectStore.getProp().getProperty(key1)); } + + /** + * Test notification operations + */ + // TODO MS-SPLIT uncomment once we move EventMessage over + @Test + public void testNotificationOps() throws InterruptedException { + final int NO_EVENT_ID = 0; + final int FIRST_EVENT_ID = 1; + final int SECOND_EVENT_ID = 2; + + NotificationEvent event = + new NotificationEvent(0, 0, EventMessage.EventType.CREATE_DATABASE.toString(), ""); + NotificationEventResponse eventResponse; + CurrentNotificationEventId eventId; + + // Verify that there is no notifications available yet + eventId = objectStore.getCurrentNotificationEventId(); + Assert.assertEquals(NO_EVENT_ID, eventId.getEventId()); + + // Verify that addNotificationEvent() updates the NotificationEvent with the new event ID + objectStore.addNotificationEvent(event); + Assert.assertEquals(FIRST_EVENT_ID, event.getEventId()); + objectStore.addNotificationEvent(event); + Assert.assertEquals(SECOND_EVENT_ID, event.getEventId()); + + // Verify that objectStore fetches the latest notification event ID + eventId = objectStore.getCurrentNotificationEventId(); + Assert.assertEquals(SECOND_EVENT_ID, eventId.getEventId()); + + // Verify that getNextNotification() returns all events + eventResponse = objectStore.getNextNotification(new NotificationEventRequest()); + Assert.assertEquals(2, eventResponse.getEventsSize()); + Assert.assertEquals(FIRST_EVENT_ID, eventResponse.getEvents().get(0).getEventId()); + Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(1).getEventId()); + + // Verify that getNextNotification(last) returns events after a specified event + eventResponse = objectStore.getNextNotification(new NotificationEventRequest(FIRST_EVENT_ID)); + Assert.assertEquals(1, eventResponse.getEventsSize()); + Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(0).getEventId()); + + // Verify that getNextNotification(last) returns zero events if there are no more notifications available + eventResponse = objectStore.getNextNotification(new NotificationEventRequest(SECOND_EVENT_ID)); + Assert.assertEquals(0, eventResponse.getEventsSize()); + + // Verify that cleanNotificationEvents() cleans up all old notifications + Thread.sleep(1); + objectStore.cleanNotificationEvents(1); + eventResponse = objectStore.getNextNotification(new NotificationEventRequest()); + Assert.assertEquals(0, eventResponse.getEventsSize()); + } + + @Ignore( + "This test is here to allow testing with other databases like mysql / postgres etc\n" + + " with user changes to the code. This cannot be run on apache derby because of\n" + + " https://db.apache.org/derby/docs/10.10/devguide/cdevconcepts842385.html" + ) + @Test + public void testConcurrentAddNotifications() throws ExecutionException, InterruptedException { + + final int NUM_THREADS = 10; + CyclicBarrier cyclicBarrier = new CyclicBarrier(NUM_THREADS, + () -> LoggerFactory.getLogger("test") + .debug(NUM_THREADS + " threads going to add notification")); + + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, + MockPartitionExpressionProxy.class.getName()); + /* + Below are the properties that need to be set based on what database this test is going to be run + */ + +// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver"); +// conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, +// "jdbc:mysql://localhost:3306/metastore_db"); +// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, ""); +// conf.setVar(HiveConf.ConfVars.METASTOREPWD, ""); + + /* + we have to add this one manually as for tests the db is initialized via the metastoreDiretSQL + and we don't run the schema creation sql that includes the an insert for notification_sequence + which can be locked. the entry in notification_sequence happens via notification_event insertion. + */ + objectStore.getPersistenceManager().newQuery(MNotificationLog.class, "eventType==''").execute(); + objectStore.getPersistenceManager().newQuery(MNotificationNextId.class, "nextEventId==-1").execute(); + + objectStore.addNotificationEvent( + new NotificationEvent(0, 0, + EventMessage.EventType.CREATE_DATABASE.toString(), + "CREATE DATABASE DB initial")); + + ExecutorService executorService = Executors.newFixedThreadPool(NUM_THREADS); + for (int i = 0; i < NUM_THREADS; i++) { + final int n = i; + + executorService.execute( + () -> { + ObjectStore store = new ObjectStore(); + store.setConf(conf); + + String eventType = EventMessage.EventType.CREATE_DATABASE.toString(); + NotificationEvent dbEvent = + new NotificationEvent(0, 0, eventType, + "CREATE DATABASE DB" + n); + System.out.println("ADDING NOTIFICATION"); + + try { + cyclicBarrier.await(); + } catch (InterruptedException | BrokenBarrierException e) { + throw new RuntimeException(e); + } + store.addNotificationEvent(dbEvent); + System.out.println("FINISH NOTIFICATION"); + }); + } + executorService.shutdown(); + Assert.assertTrue(executorService.awaitTermination(15, TimeUnit.SECONDS)); + + // we have to setup this again as the underlying PMF keeps getting reinitialized with original + // reference closed + ObjectStore store = new ObjectStore(); + store.setConf(conf); + + NotificationEventResponse eventResponse = store.getNextNotification( + new NotificationEventRequest()); + Assert.assertEquals(NUM_THREADS + 1, eventResponse.getEventsSize()); + long previousId = 0; + for (NotificationEvent event : eventResponse.getEvents()) { + Assert.assertTrue("previous:" + previousId + " current:" + event.getEventId(), + previousId < event.getEventId()); + Assert.assertTrue(previousId + 1 == event.getEventId()); + previousId = event.getEventId(); + } + } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java index b5f37ebd74..f91b062aab 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java @@ -19,8 +19,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.AccessControlException; @@ -30,8 +33,16 @@ import javax.security.auth.login.LoginException; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.Random; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + public class TestHdfsUtils { private Random rand = new Random(); @@ -190,4 +201,145 @@ public void rootReadWriteExecute() throws IOException, LoginException { } } + /** + * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, String, FileSystem, Path, boolean)} + * does not throw an exception when setting the group and without recursion. + */ + @Test + public void testSetFullFileStatusFailInheritGroup() throws IOException { + Configuration conf = new Configuration(); + conf.set("dfs.namenode.acls.enabled", "false"); + + HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class); + FileStatus mockSourceStatus = mock(FileStatus.class); + FileSystem fs = mock(FileSystem.class); + + when(mockSourceStatus.getGroup()).thenReturn("fakeGroup1"); + when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus); + doThrow(RuntimeException.class).when(fs).setOwner(any(Path.class), any(String.class), any(String.class)); + + HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "fakeGroup2", fs, new Path("fakePath"), false); + verify(fs).setOwner(any(Path.class), any(String.class), any(String.class)); + } + + /** + * Tests that link HdfsUtils#setFullFileStatus + * does not thrown an exception when setting ACLs and without recursion. + */ + @Test + public void testSetFullFileStatusFailInheritAcls() throws IOException { + Configuration conf = new Configuration(); + conf.set("dfs.namenode.acls.enabled", "true"); + + HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class); + FileStatus mockSourceStatus = mock(FileStatus.class); + AclStatus mockAclStatus = mock(AclStatus.class); + FileSystem mockFs = mock(FileSystem.class); + + when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777)); + when(mockAclStatus.toString()).thenReturn(""); + when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus); + when(mockHadoopFileStatus.getAclEntries()).thenReturn(new ArrayList<>()); + when(mockHadoopFileStatus.getAclStatus()).thenReturn(mockAclStatus); + doThrow(RuntimeException.class).when(mockFs).setAcl(any(Path.class), any(List.class)); + + HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, null, mockFs, new Path("fakePath"), false); + verify(mockFs).setAcl(any(Path.class), any(List.class)); + } + + /** + * Tests that HdfsUtils#setFullFileStatus + * does not thrown an exception when setting permissions and without recursion. + */ + @Test + public void testSetFullFileStatusFailInheritPerms() throws IOException { + Configuration conf = new Configuration(); + conf.set("dfs.namenode.acls.enabled", "false"); + + HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class); + FileStatus mockSourceStatus = mock(FileStatus.class); + FileSystem mockFs = mock(FileSystem.class); + + when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777)); + when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus); + doThrow(RuntimeException.class).when(mockFs).setPermission(any(Path.class), any(FsPermission.class)); + + HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, null, mockFs, new Path("fakePath"), + false); + verify(mockFs).setPermission(any(Path.class), any(FsPermission.class)); + } + + /** + * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, String, FileSystem, Path, boolean)} + * does not throw an exception when setting the group and with recursion. + */ + @Test + public void testSetFullFileStatusFailInheritGroupRecursive() throws Exception { + Configuration conf = new Configuration(); + conf.set("dfs.namenode.acls.enabled", "false"); + + String fakeSourceGroup = "fakeGroup1"; + String fakeTargetGroup = "fakeGroup2"; + Path fakeTarget = new Path("fakePath"); + HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class); + FileStatus mockSourceStatus = mock(FileStatus.class); + FsShell mockFsShell = mock(FsShell.class); + + when(mockSourceStatus.getGroup()).thenReturn(fakeSourceGroup); + when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus); + doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class)); + + HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, fakeTargetGroup, mock(FileSystem.class), fakeTarget, + true, mockFsShell); + verify(mockFsShell).run(new String[]{"-chgrp", "-R", fakeSourceGroup, fakeTarget.toString()}); + } + + /** + * Tests that HdfsUtils#setFullFileStatus + * does not thrown an exception when setting ACLs and with recursion. + */ + @Test + public void testSetFullFileStatusFailInheritAclsRecursive() throws Exception { + Configuration conf = new Configuration(); + conf.set("dfs.namenode.acls.enabled", "true"); + + Path fakeTarget = new Path("fakePath"); + HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class); + FileStatus mockSourceStatus = mock(FileStatus.class); + FsShell mockFsShell = mock(FsShell.class); + AclStatus mockAclStatus = mock(AclStatus.class); + + when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777)); + when(mockAclStatus.toString()).thenReturn(""); + when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus); + when(mockHadoopFileStatus.getAclEntries()).thenReturn(new ArrayList<>()); + when(mockHadoopFileStatus.getAclStatus()).thenReturn(mockAclStatus); + doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class)); + + HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "", mock(FileSystem.class), fakeTarget, true, mockFsShell); + verify(mockFsShell).run(new String[]{"-setfacl", "-R", "--set", any(String.class), fakeTarget.toString()}); + } + + /** + * Tests that HdfsUtils#setFullFileStatus + * does not thrown an exception when setting permissions and with recursion. + */ + @Test + public void testSetFullFileStatusFailInheritPermsRecursive() throws Exception { + Configuration conf = new Configuration(); + conf.set("dfs.namenode.acls.enabled", "false"); + + Path fakeTarget = new Path("fakePath"); + HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class); + FileStatus mockSourceStatus = mock(FileStatus.class); + FsShell mockFsShell = mock(FsShell.class); + + when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777)); + when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus); + doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class)); + + HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "", mock(FileSystem.class), fakeTarget, + true, mockFsShell); + verify(mockFsShell).run(new String[]{"-chmod", "-R", any(String.class), fakeTarget.toString()}); + } }