commit 275ed18c23a76a30c3dec00940e5afac7eb0e08c Author: Alan Gates Date: Wed Sep 6 19:00:14 2017 -0700 HIVE-17980 Moved HiveMetaStoreClient plus a few remaining classes. diff --git accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java index 5391a99a16..872f12bb2e 100644 --- accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java +++ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java @@ -32,10 +32,10 @@ import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe; import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters; import org.apache.hadoop.hive.metastore.HiveMetaHook; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveStoragePredicateHandler; diff --git druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java index 8117633a13..fe66a443d1 100644 --- druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java +++ druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java @@ -42,10 +42,10 @@ import org.apache.hadoop.hive.druid.serde.DruidSerDe; import org.apache.hadoop.hive.metastore.DefaultHiveMetaHook; import org.apache.hadoop.hive.metastore.HiveMetaHook; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; import org.apache.hadoop.hive.ql.plan.TableDesc; diff --git hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseMetaHook.java hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseMetaHook.java index 9fe07afa28..cd19a4f9a2 100644 --- hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseMetaHook.java +++ hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseMetaHook.java @@ -28,9 +28,9 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hive.metastore.HiveMetaHook; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.util.StringUtils; import java.io.Closeable; diff --git hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java index 0a3788fc69..276ffcf336 100644 --- hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java +++ hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hive.metastore.HiveMetaHook; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.index.IndexPredicateAnalyzer; import org.apache.hadoop.hive.ql.index.IndexSearchCondition; diff --git hcatalog/core/pom.xml hcatalog/core/pom.xml index 94e9fbe771..560e438c26 100644 --- hcatalog/core/pom.xml +++ hcatalog/core/pom.xml @@ -69,7 +69,7 @@ org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} test-jar test diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java index c1b63d9310..3569ffcfa7 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java @@ -76,7 +76,7 @@ // Since HiveMetaStoreClient is not threadsafe, hive clients are not shared across threads. // Thread local variable containing each thread's unique ID, is used as one of the keys for the cache - // causing each thread to get a different client even if the hiveConf is same. + // causing each thread to get a different client even if the conf is same. private static final ThreadLocal threadId = new ThreadLocal() { @Override diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java index 04d78eed05..39a4d2d97a 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; @@ -610,7 +609,8 @@ private void moveTaskOutputs(FileSystem fs, Path file, Path srcDir, } } else { - if(immutable && fs.exists(finalOutputPath) && !MetaStoreUtils.isDirEmpty(fs, finalOutputPath)) { + if(immutable && fs.exists(finalOutputPath) && + !org.apache.hadoop.hive.metastore.utils.FileUtils.isDirEmpty(fs, finalOutputPath)) { throw new HCatException(ErrorType.ERROR_DUPLICATE_PARTITION, "Data already exists in " + finalOutputPath + ", duplicate publish not possible."); diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java index 3ecb6080e6..10eda77f50 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java @@ -23,7 +23,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.utils.FileUtils; import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -202,7 +202,7 @@ private static void handleDuplicatePublish(JobContext context, OutputJobInfo out Path tablePath = new Path(table.getTTable().getSd().getLocation()); FileSystem fs = tablePath.getFileSystem(context.getConfiguration()); - if (!MetaStoreUtils.isDirEmpty(fs,tablePath)){ + if (!FileUtils.isDirEmpty(fs,tablePath)){ throw new HCatException(ErrorType.ERROR_NON_EMPTY_TABLE, table.getDbName() + "." + table.getTableName()); } diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InternalUtil.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InternalUtil.java index 8fd676fba1..6c4c5e1bbc 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InternalUtil.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InternalUtil.java @@ -20,10 +20,10 @@ package org.apache.hive.hcatalog.mapreduce; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.Deserializer; diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java index e5aa2b8d7a..bce246f7fb 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.Database; diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java index 6711a3edec..091efb61d4 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.SerDeInfo; diff --git hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java index ec5d1c4d6d..67fc34d7ac 100644 --- hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java +++ hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java @@ -27,7 +27,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; import org.apache.hadoop.hive.metastore.MetaStoreEventListenerConstants; import org.apache.hadoop.hive.metastore.RawStore; @@ -46,6 +45,8 @@ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.events.AddForeignKeyEvent; import org.apache.hadoop.hive.metastore.events.AddIndexEvent; import org.apache.hadoop.hive.metastore.events.AddNotNullConstraintEvent; @@ -92,27 +93,22 @@ private static final Logger LOG = LoggerFactory.getLogger(DbNotificationListener.class.getName()); private static CleanerThread cleaner = null; - // This is the same object as super.conf, but it's convenient to keep a copy of it as a - // HiveConf rather than a Configuration. - private HiveConf hiveConf; + private Configuration conf; private MessageFactory msgFactory; - private synchronized void init(HiveConf conf) throws MetaException { + private synchronized void init(Configuration conf) throws MetaException { if (cleaner == null) { cleaner = new CleanerThread(conf, RawStoreProxy.getProxy(conf, conf, - conf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL), 999999)); + MetastoreConf.getVar(conf, ConfVars.RAW_STORE_IMPL), 999999)); cleaner.start(); } } public DbNotificationListener(Configuration config) throws MetaException { super(config); - // The code in MetastoreUtils.getMetaStoreListeners() that calls this looks for a constructor - // with a Configuration parameter, so we have to declare config as Configuration. But it - // actually passes a HiveConf, which we need. So we'll do this ugly down cast. - hiveConf = (HiveConf)config; - init(hiveConf); + conf = config; + init(conf); msgFactory = MessageFactory.getInstance(); } @@ -123,14 +119,17 @@ public DbNotificationListener(Configuration config) throws MetaException { @Override public void onConfigChange(ConfigChangeEvent tableEvent) throws MetaException { String key = tableEvent.getKey(); - if (key.equals(HiveConf.ConfVars.METASTORE_EVENT_DB_LISTENER_TTL.toString())) { - // This weirdness of setting it in our hiveConf and then reading back does two things. + if (key.equals(ConfVars.EVENT_DB_LISTENER_TTL.toString()) || + key.equals(ConfVars.EVENT_DB_LISTENER_TTL.getHiveName())) { + // This weirdness of setting it in our conf and then reading back does two things. // One, it handles the conversion of the TimeUnit. Two, it keeps the value around for // later in case we need it again. - hiveConf.set(HiveConf.ConfVars.METASTORE_EVENT_DB_LISTENER_TTL.name(), - tableEvent.getNewValue()); - cleaner.setTimeToLive(hiveConf.getTimeVar(HiveConf.ConfVars.METASTORE_EVENT_DB_LISTENER_TTL, - TimeUnit.SECONDS)); + long time = MetastoreConf.convertTimeStr(tableEvent.getNewValue(), TimeUnit.SECONDS, + TimeUnit.SECONDS); + MetastoreConf.setTimeVar(getConf(), MetastoreConf.ConfVars.EVENT_DB_LISTENER_TTL, time, + TimeUnit.SECONDS); + cleaner.setTimeToLive(MetastoreConf.getTimeVar(getConf(), + MetastoreConf.ConfVars.EVENT_DB_LISTENER_TTL, TimeUnit.SECONDS)); } } @@ -205,7 +204,7 @@ public boolean accept(Path p) { try { if (locString != null) { Path loc = new Path(locString); - fs = loc.getFileSystem(hiveConf); + fs = loc.getFileSystem(conf); files = fs.listStatus(loc, VALID_FILES_FILTER); } } catch (IOException e) { @@ -573,7 +572,7 @@ private void process(NotificationEvent event, ListenerEvent listenerEvent) throw event.setMessageFormat(msgFactory.getMessageFormat()); LOG.debug("DbNotificationListener: Processing : {}:{}", event.getEventId(), event.getMessage()); - HMSHandler.getMSForConf(hiveConf).addNotificationEvent(event); + HMSHandler.getMSForConf(conf).addNotificationEvent(event); // Set the DB_NOTIFICATION_EVENT_ID for future reference by other listeners. if (event.isSetEventId()) { @@ -588,10 +587,10 @@ private void process(NotificationEvent event, ListenerEvent listenerEvent) throw private int ttl; static private long sleepTime = 60000; - CleanerThread(HiveConf conf, RawStore rs) { + CleanerThread(Configuration conf, RawStore rs) { super("CleanerThread"); this.rs = rs; - setTimeToLive(conf.getTimeVar(HiveConf.ConfVars.METASTORE_EVENT_DB_LISTENER_TTL, + setTimeToLive(MetastoreConf.getTimeVar(conf, ConfVars.EVENT_DB_LISTENER_TTL, TimeUnit.SECONDS)); setDaemon(true); } diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java index 58fba4f6a9..bd73f29201 100644 --- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java +++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java @@ -20,11 +20,10 @@ import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.serde2.AbstractSerDe; diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java index 13756e281d..7cfddfa3f0 100644 --- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java +++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java @@ -19,8 +19,8 @@ package org.apache.hive.hcatalog.streaming; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.serde2.AbstractSerDe; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.SerDeUtils; diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java index 78987ab6e9..736893b769 100644 --- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java +++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java @@ -25,9 +25,9 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.AbstractSerDe; import org.apache.hadoop.hive.serde2.RegexSerDe; diff --git hcatalog/webhcat/java-client/pom.xml hcatalog/webhcat/java-client/pom.xml index ea518549df..5693157edd 100644 --- hcatalog/webhcat/java-client/pom.xml +++ hcatalog/webhcat/java-client/pom.xml @@ -76,7 +76,7 @@ org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} test-jar test diff --git hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java index 17b9d03a21..5f7128dd46 100644 --- hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java +++ hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java @@ -29,7 +29,6 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.classification.InterfaceAudience; import org.apache.hadoop.hive.common.classification.InterfaceStability; import org.apache.hadoop.hive.conf.HiveConf; @@ -52,6 +51,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.utils.ObjectPair; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.SerializationUtilities; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -584,8 +584,7 @@ private void dropPartitionsUsingExpressions(Table table, Map par throws SemanticException, TException { LOG.info("HCatClient: Dropping partitions using partition-predicate Expressions."); ExprNodeGenericFuncDesc partitionExpression = new ExpressionBuilder(table, partitionSpec).build(); - ObjectPair serializedPartitionExpression = - new ObjectPair(partitionSpec.size(), + ObjectPair serializedPartitionExpression = new ObjectPair<>(partitionSpec.size(), SerializationUtilities.serializeExpressionToKryo(partitionExpression)); hmsClient.dropPartitions(table.getDbName(), table.getTableName(), Arrays.asList(serializedPartitionExpression), deleteData && !isExternal(table), // Delete data? diff --git hcatalog/webhcat/svr/pom.xml hcatalog/webhcat/svr/pom.xml index 6bceee1880..a22bc22430 100644 --- hcatalog/webhcat/svr/pom.xml +++ hcatalog/webhcat/svr/pom.xml @@ -197,7 +197,7 @@ org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} test-jar test diff --git itests/hcatalog-unit/pom.xml itests/hcatalog-unit/pom.xml index 2a2f8457ff..433147a247 100644 --- itests/hcatalog-unit/pom.xml +++ itests/hcatalog-unit/pom.xml @@ -98,7 +98,7 @@ org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} test diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/ManyMiniCluster.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/ManyMiniCluster.java index ad44bc2d62..2fe4045835 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/ManyMiniCluster.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/ManyMiniCluster.java @@ -310,8 +310,8 @@ private void setUpMetastore() throws Exception { System.setProperty("derby.stream.error.file", derbyLogFile.getPath()); -// Driver driver = new Driver(hiveConf); -// SessionState.start(new CliSessionState(hiveConf)); +// Driver driver = new Driver(conf); +// SessionState.start(new CliSessionState(conf)); hiveMetaStoreClient = new HiveMetaStoreClient(hiveConf); } diff --git itests/hive-blobstore/pom.xml itests/hive-blobstore/pom.xml index d1c732d821..fadb4f2dba 100644 --- itests/hive-blobstore/pom.xml +++ itests/hive-blobstore/pom.xml @@ -55,13 +55,13 @@ org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} test org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} tests test diff --git itests/hive-minikdc/pom.xml itests/hive-minikdc/pom.xml index 689e67955d..337535aaea 100644 --- itests/hive-minikdc/pom.xml +++ itests/hive-minikdc/pom.xml @@ -74,13 +74,13 @@ org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} test org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} test tests diff --git itests/hive-unit-hadoop2/pom.xml itests/hive-unit-hadoop2/pom.xml index f862dac5f6..ebf81c902b 100644 --- itests/hive-unit-hadoop2/pom.xml +++ itests/hive-unit-hadoop2/pom.xml @@ -76,7 +76,7 @@ org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} tests diff --git itests/hive-unit/pom.xml itests/hive-unit/pom.xml index 3a435a8a52..73a454f8aa 100644 --- itests/hive-unit/pom.xml +++ itests/hive-unit/pom.xml @@ -161,7 +161,7 @@ org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} tests diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java index bd8332c49c..f8ed4196bc 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly; import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener; import org.junit.Test; diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java index 8366fabf8a..91fc706d51 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hive.metastore.api.PartitionSpec; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.AfterClass; @@ -44,8 +45,11 @@ import org.junit.Test; import com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestFilterHooks { + private static final Logger LOG = LoggerFactory.getLogger(TestFilterHooks.class); public static class DummyMetaStoreFilterHookImpl extends DefaultMetaStoreFilterHookImpl { public static boolean blockResults = false; diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 98dad7a093..f344c47443 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -81,6 +81,7 @@ import org.apache.hadoop.hive.metastore.api.Type; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.HiveInputFormat; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; @@ -1270,7 +1271,7 @@ public void testComplexTypeApi() throws Exception { new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, "")); fam.getFields().add( new FieldSchema("members", - MetaStoreUtils.getListType(typ1.getName()), "")); + ColumnType.getListType(typ1.getName()), "")); ret = client.createType(fam); assertTrue("Unable to create type " + fam.getName(), ret); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java index 5267dcaa5f..c29a34dc37 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hive.metastore.events.DropTableEvent; import org.apache.hadoop.hive.metastore.events.ListenerEvent; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.io.HiveInputFormat; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.ql.session.SessionState; diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java index e3c9a4e5a9..c541193658 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; public class TestMarkPartitionRemote extends TestMarkPartition { diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java index 1bf76b5199..e7b899b8d1 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; public class TestMetaStoreAuthorization extends TestCase { diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java index 3bb340e157..e44cfca8ee 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; /** * TestMetaStoreEventListener. Test case for diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java index 7219774560..e8031066c2 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hive.metastore.events.PreEventContext; import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.processors.SetProcessor; import org.apache.hadoop.hive.ql.session.SessionState; diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java index a9053241cc..9623fedff9 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.events.ListenerEvent; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.session.SessionState; diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java index 3d58819b1a..49823134a7 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.session.SessionState; diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java index b4d258ca3e..11ebf4d961 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java @@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; /** * Test for unwrapping InvocationTargetException, which is thrown from diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java index 9d241a89c5..6f5a963e16 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.Assert; diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java index d4a809f142..ec84e66b8e 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; public class TestRemoteHiveMetaStore extends TestHiveMetaStore { diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java index 9ba1d75303..c7c35f3756 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.util.StringUtils; /** diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java index 2b7c5a1213..dfd80bc235 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.io.HiveInputFormat; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.serde.serdeConstants; diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 585411664c..55acd1df36 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hive.metastore.messaging.event.filters.DatabaseAndTableFilter; import org.apache.hadoop.hive.metastore.messaging.event.filters.EventBoundaryFilter; import org.apache.hadoop.hive.metastore.messaging.event.filters.MessageFormatFilter; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.CommandNeedRetryException; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.exec.repl.ReplDumpWork; diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java index 871cf3db76..edb46fd979 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java @@ -48,6 +48,8 @@ import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * TestHiveMetastoreAuthorizationProvider. Test case for @@ -65,6 +67,8 @@ * authorization providers like StorageBasedAuthorizationProvider */ public class TestMetastoreAuthorizationProvider extends TestCase { + private static final Logger LOG = LoggerFactory.getLogger(TestMetastoreAuthorizationProvider.class); + protected HiveConf clientHiveConf; protected HiveMetaStoreClient msc; protected Driver driver; diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/serde2/TestSerdeWithFieldComments.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/serde2/TestSerdeWithFieldComments.java index bb96a89e30..63f48f0596 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/serde2/TestSerdeWithFieldComments.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/serde2/TestSerdeWithFieldComments.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.serde2; import junit.framework.TestCase; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -61,7 +61,7 @@ public void testFieldComments() throws MetaException, SerDeException { Deserializer mockDe = mock(Deserializer.class); when(mockDe.getObjectInspector()).thenReturn(mockSOI); List result = - MetaStoreUtils.getFieldsFromDeserializer("testTable", mockDe); + HiveMetaStoreUtils.getFieldsFromDeserializer("testTable", mockDe); assertEquals(2, result.size()); assertEquals("first", result.get(0).getName()); diff --git itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftCliServiceMessageSize.java itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftCliServiceMessageSize.java index 0948efc30f..147f53bdf1 100644 --- itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftCliServiceMessageSize.java +++ itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftCliServiceMessageSize.java @@ -17,6 +17,13 @@ */ package org.apache.hive.service.cli.thrift; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.Statement; + import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; @@ -28,13 +35,6 @@ import org.junit.BeforeClass; import org.junit.Test; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.Statement; - -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.fail; - public class TestThriftCliServiceMessageSize { protected static int port; protected static String host = "localhost"; diff --git itests/qtest-accumulo/pom.xml itests/qtest-accumulo/pom.xml index 29ea4283a0..eae436b740 100644 --- itests/qtest-accumulo/pom.xml +++ itests/qtest-accumulo/pom.xml @@ -67,13 +67,13 @@ org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} test org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} tests test diff --git itests/qtest-spark/pom.xml itests/qtest-spark/pom.xml index b9b17b66a5..4b25223911 100644 --- itests/qtest-spark/pom.xml +++ itests/qtest-spark/pom.xml @@ -121,13 +121,13 @@ org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} test org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} tests test diff --git itests/qtest/pom.xml itests/qtest/pom.xml index 7f7d5f3ddf..1ac6b30bf0 100644 --- itests/qtest/pom.xml +++ itests/qtest/pom.xml @@ -58,13 +58,13 @@ org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} test org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} tests test diff --git itests/util/pom.xml itests/util/pom.xml index 16118b52cf..d311507cb4 100644 --- itests/util/pom.xml +++ itests/util/pom.xml @@ -107,7 +107,7 @@ org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} test-jar diff --git llap-server/pom.xml llap-server/pom.xml index 176110d014..61ccbd4339 100644 --- llap-server/pom.xml +++ llap-server/pom.xml @@ -268,7 +268,7 @@ org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} test-jar test diff --git metastore/pom.xml metastore/pom.xml index 12c01b6f3a..21c607f086 100644 --- metastore/pom.xml +++ metastore/pom.xml @@ -268,17 +268,6 @@ - org.apache.maven.plugins - maven-jar-plugin - - - - test-jar - - - - - org.codehaus.mojo build-helper-maven-plugin diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreUtils.java new file mode 100644 index 0000000000..a66c13507a --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreUtils.java @@ -0,0 +1,213 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.serde2.Deserializer; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.SerDeUtils; +import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; +import org.apache.hadoop.hive.serde2.objectinspector.StructField; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hive.common.util.ReflectionUtil; + +public class HiveMetaStoreUtils { + + protected static final Logger LOG = LoggerFactory.getLogger("hive.log"); + + /** + * getDeserializer + * + * Get the Deserializer for a table. + * + * @param conf + * - hadoop config + * @param table + * the table + * @return + * Returns instantiated deserializer by looking up class name of deserializer stored in + * storage descriptor of passed in table. Also, initializes the deserializer with schema + * of table. + * @exception MetaException + * if any problems instantiating the Deserializer + * + * todo - this should move somewhere into serde.jar + * + */ + static public Deserializer getDeserializer(Configuration conf, + org.apache.hadoop.hive.metastore.api.Table table, boolean skipConfError) throws + MetaException { + String lib = table.getSd().getSerdeInfo().getSerializationLib(); + if (lib == null) { + return null; + } + return getDeserializer(conf, table, skipConfError, lib); + } + + public static Deserializer getDeserializer(Configuration conf, + org.apache.hadoop.hive.metastore.api.Table table, boolean skipConfError, + String lib) throws MetaException { + try { + Deserializer deserializer = ReflectionUtil.newInstance(conf.getClassByName(lib). + asSubclass(Deserializer.class), conf); + if (skipConfError) { + SerDeUtils.initializeSerDeWithoutErrorCheck(deserializer, conf, + MetaStoreUtils.getTableMetadata(table), null); + } else { + SerDeUtils.initializeSerDe(deserializer, conf, MetaStoreUtils.getTableMetadata(table), null); + } + return deserializer; + } catch (RuntimeException e) { + throw e; + } catch (Throwable e) { + LOG.error("error in initSerDe: " + e.getClass().getName() + " " + + e.getMessage(), e); + throw new MetaException(e.getClass().getName() + " " + e.getMessage()); + } + } + + public static Class getDeserializerClass( + Configuration conf, org.apache.hadoop.hive.metastore.api.Table table) throws Exception { + String lib = table.getSd().getSerdeInfo().getSerializationLib(); + return lib == null ? null : conf.getClassByName(lib).asSubclass(Deserializer.class); + } + + /** + * getDeserializer + * + * Get the Deserializer for a partition. + * + * @param conf + * - hadoop config + * @param part + * the partition + * @param table the table + * @return + * Returns instantiated deserializer by looking up class name of deserializer stored in + * storage descriptor of passed in partition. Also, initializes the deserializer with + * schema of partition. + * @exception MetaException + * if any problems instantiating the Deserializer + * + */ + static public Deserializer getDeserializer(Configuration conf, + org.apache.hadoop.hive.metastore.api.Partition part, + org.apache.hadoop.hive.metastore.api.Table table) throws MetaException { + String lib = part.getSd().getSerdeInfo().getSerializationLib(); + try { + Deserializer deserializer = ReflectionUtil.newInstance(conf.getClassByName(lib). + asSubclass(Deserializer.class), conf); + SerDeUtils.initializeSerDe(deserializer, conf, MetaStoreUtils.getTableMetadata(table), + MetaStoreUtils.getPartitionMetadata(part, table)); + return deserializer; + } catch (RuntimeException e) { + throw e; + } catch (Throwable e) { + LOG.error("error in initSerDe: " + e.getClass().getName() + " " + + e.getMessage(), e); + throw new MetaException(e.getClass().getName() + " " + e.getMessage()); + } + } + + /** + * @param tableName name of the table + * @param deserializer deserializer to use + * @return the list of fields + * @throws SerDeException if the serde throws an exception + * @throws MetaException if one of the fields or types in the table is invalid + */ + public static List getFieldsFromDeserializer(String tableName, + Deserializer deserializer) throws SerDeException, MetaException { + ObjectInspector oi = deserializer.getObjectInspector(); + String[] names = tableName.split("\\."); + String last_name = names[names.length - 1]; + for (int i = 1; i < names.length; i++) { + + if (oi instanceof StructObjectInspector) { + StructObjectInspector soi = (StructObjectInspector) oi; + StructField sf = soi.getStructFieldRef(names[i]); + if (sf == null) { + throw new MetaException("Invalid Field " + names[i]); + } else { + oi = sf.getFieldObjectInspector(); + } + } else if (oi instanceof ListObjectInspector + && names[i].equalsIgnoreCase("$elem$")) { + ListObjectInspector loi = (ListObjectInspector) oi; + oi = loi.getListElementObjectInspector(); + } else if (oi instanceof MapObjectInspector + && names[i].equalsIgnoreCase("$key$")) { + MapObjectInspector moi = (MapObjectInspector) oi; + oi = moi.getMapKeyObjectInspector(); + } else if (oi instanceof MapObjectInspector + && names[i].equalsIgnoreCase("$value$")) { + MapObjectInspector moi = (MapObjectInspector) oi; + oi = moi.getMapValueObjectInspector(); + } else { + throw new MetaException("Unknown type for " + names[i]); + } + } + + ArrayList str_fields = new ArrayList<>(); + // rules on how to recurse the ObjectInspector based on its type + if (oi.getCategory() != Category.STRUCT) { + str_fields.add(new FieldSchema(last_name, oi.getTypeName(), + FROM_SERIALIZER)); + } else { + List fields = ((StructObjectInspector) oi) + .getAllStructFieldRefs(); + for (int i = 0; i < fields.size(); i++) { + StructField structField = fields.get(i); + String fieldName = structField.getFieldName(); + String fieldTypeName = structField.getFieldObjectInspector().getTypeName(); + String fieldComment = determineFieldComment(structField.getFieldComment()); + + str_fields.add(new FieldSchema(fieldName, fieldTypeName, fieldComment)); + } + } + return str_fields; + } + + private static final String FROM_SERIALIZER = "from deserializer"; + private static String determineFieldComment(String comment) { + return (comment == null) ? FROM_SERIALIZER : comment; + } + + /** + * Convert TypeInfo to FieldSchema. + */ + public static FieldSchema getFieldSchemaFromTypeInfo(String fieldName, + TypeInfo typeInfo) { + return new FieldSchema(fieldName, typeInfo.getTypeName(), + "generated by TypeInfoUtils.getFieldSchemaFromTypeInfo"); + } + +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java deleted file mode 100644 index 15bd803540..0000000000 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ /dev/null @@ -1,1095 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.metastore; - -import java.io.IOException; -import java.lang.reflect.Constructor; -import java.net.InetSocketAddress; -import java.net.ServerSocket; -import java.net.Socket; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Properties; -import java.util.Set; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import com.google.common.base.Predicates; -import com.google.common.collect.Maps; -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hive.common.JavaUtils; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; -import org.apache.hadoop.hive.metastore.api.EnvironmentContext; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; -import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.Deserializer; -import org.apache.hadoop.hive.serde2.SerDeException; -import org.apache.hadoop.hive.serde2.SerDeUtils; -import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; -import org.apache.hadoop.hive.serde2.objectinspector.StructField; -import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; -import org.apache.hadoop.security.SaslRpcServer; -import org.apache.hive.common.util.ReflectionUtil; - -public class MetaStoreUtils { - - private static final Logger LOG = LoggerFactory.getLogger("hive.log"); - - // Right now we only support one special character '/'. - // More special characters can be added accordingly in the future. - // NOTE: - // If the following array is updated, please also be sure to update the - // configuration parameter documentation - // HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES in HiveConf as well. - public static final char[] specialCharactersInTableNames = new char[] { '/' }; - - public static void populateQuickStats(FileStatus[] fileStatus, Map params) { - org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.populateQuickStats(fileStatus, params); - } - - public static boolean updateTableStatsFast(Table tbl, FileStatus[] fileStatus, boolean newDir, - boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException { - return org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.updateTableStatsFast( - tbl, fileStatus, newDir, forceRecompute, environmentContext); - } - - public static boolean updatePartitionStatsFast(Partition part, Warehouse wh, EnvironmentContext environmentContext) - throws MetaException { - return org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.updatePartitionStatsFast( - part, wh, environmentContext); - } - - /** - * getDeserializer - * - * Get the Deserializer for a table. - * - * @param conf - * - hadoop config - * @param table - * the table - * @return - * Returns instantiated deserializer by looking up class name of deserializer stored in - * storage descriptor of passed in table. Also, initializes the deserializer with schema - * of table. - * @exception MetaException - * if any problems instantiating the Deserializer - * - * todo - this should move somewhere into serde.jar - * - */ - static public Deserializer getDeserializer(Configuration conf, - org.apache.hadoop.hive.metastore.api.Table table, boolean skipConfError) throws - MetaException { - String lib = table.getSd().getSerdeInfo().getSerializationLib(); - if (lib == null) { - return null; - } - return getDeserializer(conf, table, skipConfError, lib); - } - - public static Deserializer getDeserializer(Configuration conf, - org.apache.hadoop.hive.metastore.api.Table table, boolean skipConfError, - String lib) throws MetaException { - try { - Deserializer deserializer = ReflectionUtil.newInstance(conf.getClassByName(lib). - asSubclass(Deserializer.class), conf); - if (skipConfError) { - SerDeUtils.initializeSerDeWithoutErrorCheck(deserializer, conf, - MetaStoreUtils.getTableMetadata(table), null); - } else { - SerDeUtils.initializeSerDe(deserializer, conf, MetaStoreUtils.getTableMetadata(table), null); - } - return deserializer; - } catch (RuntimeException e) { - throw e; - } catch (Throwable e) { - LOG.error("error in initSerDe: " + e.getClass().getName() + " " - + e.getMessage(), e); - throw new MetaException(e.getClass().getName() + " " + e.getMessage()); - } - } - - public static Class getDeserializerClass( - Configuration conf, org.apache.hadoop.hive.metastore.api.Table table) throws Exception { - String lib = table.getSd().getSerdeInfo().getSerializationLib(); - return lib == null ? null : conf.getClassByName(lib).asSubclass(Deserializer.class); - } - - /** - * getDeserializer - * - * Get the Deserializer for a partition. - * - * @param conf - * - hadoop config - * @param part - * the partition - * @param table the table - * @return - * Returns instantiated deserializer by looking up class name of deserializer stored in - * storage descriptor of passed in partition. Also, initializes the deserializer with - * schema of partition. - * @exception MetaException - * if any problems instantiating the Deserializer - * - */ - static public Deserializer getDeserializer(Configuration conf, - org.apache.hadoop.hive.metastore.api.Partition part, - org.apache.hadoop.hive.metastore.api.Table table) throws MetaException { - String lib = part.getSd().getSerdeInfo().getSerializationLib(); - try { - Deserializer deserializer = ReflectionUtil.newInstance(conf.getClassByName(lib). - asSubclass(Deserializer.class), conf); - SerDeUtils.initializeSerDe(deserializer, conf, MetaStoreUtils.getTableMetadata(table), - MetaStoreUtils.getPartitionMetadata(part, table)); - return deserializer; - } catch (RuntimeException e) { - throw e; - } catch (Throwable e) { - LOG.error("error in initSerDe: " + e.getClass().getName() + " " - + e.getMessage(), e); - throw new MetaException(e.getClass().getName() + " " + e.getMessage()); - } - } - - /** - * Given a list of partition columns and a partial mapping from - * some partition columns to values the function returns the values - * for the column. - * @param partCols the list of table partition columns - * @param partSpec the partial mapping from partition column to values - * @return list of values of for given partition columns, any missing - * values in partSpec is replaced by an empty string - */ - public static List getPvals(List partCols, - Map partSpec) { - List pvals = new ArrayList(partCols.size()); - for (FieldSchema field : partCols) { - String val = StringUtils.defaultString(partSpec.get(field.getName())); - pvals.add(val); - } - return pvals; - } - - /** - * validateName - * - * Checks the name conforms to our standars which are: "[a-zA-z_0-9]+". checks - * this is just characters and numbers and _ - * - * @param name - * the name to validate - * @param conf - * hive configuration - * @return true or false depending on conformance - * if it doesn't match the pattern. - */ - static public boolean validateName(String name, Configuration conf) { - Pattern tpat = null; - String allowedCharacters = "\\w_"; - if (conf != null - && HiveConf.getBoolVar(conf, - HiveConf.ConfVars.HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES)) { - for (Character c : specialCharactersInTableNames) { - allowedCharacters += c; - } - } - tpat = Pattern.compile("[" + allowedCharacters + "]+"); - Matcher m = tpat.matcher(name); - return m.matches(); - } - - /* - * At the Metadata level there are no restrictions on Column Names. - */ - public static boolean validateColumnName(String name) { - return true; - } - - public static final String TYPE_FROM_DESERIALIZER = ""; - - public static String getListType(String t) { - return "array<" + t + ">"; - } - - static HashMap typeToThriftTypeMap; - static { - typeToThriftTypeMap = new HashMap(); - typeToThriftTypeMap.put( - org.apache.hadoop.hive.serde.serdeConstants.BOOLEAN_TYPE_NAME, "bool"); - typeToThriftTypeMap.put( - org.apache.hadoop.hive.serde.serdeConstants.TINYINT_TYPE_NAME, "byte"); - typeToThriftTypeMap.put( - org.apache.hadoop.hive.serde.serdeConstants.SMALLINT_TYPE_NAME, "i16"); - typeToThriftTypeMap.put( - org.apache.hadoop.hive.serde.serdeConstants.INT_TYPE_NAME, "i32"); - typeToThriftTypeMap.put( - org.apache.hadoop.hive.serde.serdeConstants.BIGINT_TYPE_NAME, "i64"); - typeToThriftTypeMap.put( - org.apache.hadoop.hive.serde.serdeConstants.DOUBLE_TYPE_NAME, "double"); - typeToThriftTypeMap.put( - org.apache.hadoop.hive.serde.serdeConstants.FLOAT_TYPE_NAME, "float"); - typeToThriftTypeMap.put( - org.apache.hadoop.hive.serde.serdeConstants.LIST_TYPE_NAME, "list"); - typeToThriftTypeMap.put( - org.apache.hadoop.hive.serde.serdeConstants.MAP_TYPE_NAME, "map"); - typeToThriftTypeMap.put( - org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME, "string"); - typeToThriftTypeMap.put( - org.apache.hadoop.hive.serde.serdeConstants.BINARY_TYPE_NAME, "binary"); - // These 4 types are not supported yet. - // We should define a complex type date in thrift that contains a single int - // member, and DynamicSerDe - // should convert it to date type at runtime. - typeToThriftTypeMap.put( - org.apache.hadoop.hive.serde.serdeConstants.DATE_TYPE_NAME, "date"); - typeToThriftTypeMap.put( - org.apache.hadoop.hive.serde.serdeConstants.DATETIME_TYPE_NAME, "datetime"); - typeToThriftTypeMap - .put(org.apache.hadoop.hive.serde.serdeConstants.TIMESTAMP_TYPE_NAME, - "timestamp"); - typeToThriftTypeMap.put( - org.apache.hadoop.hive.serde.serdeConstants.DECIMAL_TYPE_NAME, "decimal"); - typeToThriftTypeMap.put( - org.apache.hadoop.hive.serde.serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME, - org.apache.hadoop.hive.serde.serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME); - typeToThriftTypeMap.put( - org.apache.hadoop.hive.serde.serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME, - org.apache.hadoop.hive.serde.serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME); - } - - private static Set hiveThriftTypeMap; //for validation - static { - hiveThriftTypeMap = new HashSet(); - hiveThriftTypeMap.addAll(serdeConstants.PrimitiveTypes); - hiveThriftTypeMap.addAll(org.apache.hadoop.hive.serde.serdeConstants.CollectionTypes); - hiveThriftTypeMap.add(org.apache.hadoop.hive.serde.serdeConstants.UNION_TYPE_NAME); - hiveThriftTypeMap.add(org.apache.hadoop.hive.serde.serdeConstants.STRUCT_TYPE_NAME); - } - - /** - * Convert type to ThriftType. We do that by tokenizing the type and convert - * each token. - */ - public static String typeToThriftType(String type) { - StringBuilder thriftType = new StringBuilder(); - int last = 0; - boolean lastAlphaDigit = Character.isLetterOrDigit(type.charAt(last)); - for (int i = 1; i <= type.length(); i++) { - if (i == type.length() - || Character.isLetterOrDigit(type.charAt(i)) != lastAlphaDigit) { - String token = type.substring(last, i); - last = i; - String thriftToken = typeToThriftTypeMap.get(token); - thriftType.append(thriftToken == null ? token : thriftToken); - lastAlphaDigit = !lastAlphaDigit; - } - } - return thriftType.toString(); - } - - /** - * Convert FieldSchemas to Thrift DDL. - */ - public static String getDDLFromFieldSchema(String structName, - List fieldSchemas) { - StringBuilder ddl = new StringBuilder(); - ddl.append("struct "); - ddl.append(structName); - ddl.append(" { "); - boolean first = true; - for (FieldSchema col : fieldSchemas) { - if (first) { - first = false; - } else { - ddl.append(", "); - } - ddl.append(typeToThriftType(col.getType())); - ddl.append(' '); - ddl.append(col.getName()); - } - ddl.append("}"); - - LOG.trace("DDL: {}", ddl); - return ddl.toString(); - } - - public static Properties getTableMetadata( - org.apache.hadoop.hive.metastore.api.Table table) { - return MetaStoreUtils.getSchema(table.getSd(), table.getSd(), table - .getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys()); - } - - public static Properties getPartitionMetadata( - org.apache.hadoop.hive.metastore.api.Partition partition, - org.apache.hadoop.hive.metastore.api.Table table) { - return MetaStoreUtils - .getSchema(partition.getSd(), partition.getSd(), partition - .getParameters(), table.getDbName(), table.getTableName(), - table.getPartitionKeys()); - } - - public static Properties getSchema( - org.apache.hadoop.hive.metastore.api.Partition part, - org.apache.hadoop.hive.metastore.api.Table table) { - return MetaStoreUtils.getSchema(part.getSd(), table.getSd(), table - .getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys()); - } - - /** - * Get partition level schema from table level schema. - * This function will use the same column names, column types and partition keys for - * each partition Properties. Their values are copied from the table Properties. This - * is mainly to save CPU and memory. CPU is saved because the first time the - * StorageDescriptor column names are accessed, JDO needs to execute a SQL query to - * retrieve the data. If we know the data will be the same as the table level schema - * and they are immutable, we should just reuse the table level schema objects. - * - * @param sd The Partition level Storage Descriptor. - * @param tblsd The Table level Storage Descriptor. - * @param parameters partition level parameters - * @param databaseName DB name - * @param tableName table name - * @param partitionKeys partition columns - * @param tblSchema The table level schema from which this partition should be copied. - * @return the properties - */ - public static Properties getPartSchemaFromTableSchema( - org.apache.hadoop.hive.metastore.api.StorageDescriptor sd, - org.apache.hadoop.hive.metastore.api.StorageDescriptor tblsd, - Map parameters, String databaseName, String tableName, - List partitionKeys, - Properties tblSchema) { - - // Inherent most properties from table level schema and overwrite some properties - // in the following code. - // This is mainly for saving CPU and memory to reuse the column names, types and - // partition columns in the table level schema. - Properties schema = (Properties) tblSchema.clone(); - - // InputFormat - String inputFormat = sd.getInputFormat(); - if (inputFormat == null || inputFormat.length() == 0) { - String tblInput = - schema.getProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_INPUT_FORMAT); - if (tblInput == null) { - inputFormat = org.apache.hadoop.mapred.SequenceFileInputFormat.class.getName(); - } else { - inputFormat = tblInput; - } - } - schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_INPUT_FORMAT, - inputFormat); - - // OutputFormat - String outputFormat = sd.getOutputFormat(); - if (outputFormat == null || outputFormat.length() == 0) { - String tblOutput = - schema.getProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_OUTPUT_FORMAT); - if (tblOutput == null) { - outputFormat = org.apache.hadoop.mapred.SequenceFileOutputFormat.class.getName(); - } else { - outputFormat = tblOutput; - } - } - schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_OUTPUT_FORMAT, - outputFormat); - - // Location - if (sd.getLocation() != null) { - schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_LOCATION, - sd.getLocation()); - } - - // Bucket count - schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.BUCKET_COUNT, - Integer.toString(sd.getNumBuckets())); - - if (sd.getBucketCols() != null && sd.getBucketCols().size() > 0) { - schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.BUCKET_FIELD_NAME, - sd.getBucketCols().get(0)); - } - - // SerdeInfo - if (sd.getSerdeInfo() != null) { - - // We should not update the following 3 values if SerDeInfo contains these. - // This is to keep backward compatible with getSchema(), where these 3 keys - // are updated after SerDeInfo properties got copied. - String cols = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_COLUMNS; - String colTypes = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_COLUMN_TYPES; - String parts = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS; - - for (Map.Entry param : sd.getSerdeInfo().getParameters().entrySet()) { - String key = param.getKey(); - if (schema.get(key) != null && - (key.equals(cols) || key.equals(colTypes) || key.equals(parts))) { - continue; - } - schema.put(key, (param.getValue() != null) ? param.getValue() : StringUtils.EMPTY); - } - - if (sd.getSerdeInfo().getSerializationLib() != null) { - schema.setProperty(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_LIB, - sd.getSerdeInfo().getSerializationLib()); - } - } - - // skipping columns since partition level field schemas are the same as table level's - // skipping partition keys since it is the same as table level partition keys - - if (parameters != null) { - for (Entry e : parameters.entrySet()) { - schema.setProperty(e.getKey(), e.getValue()); - } - } - - return schema; - } - - public static Properties addCols(Properties schema, List cols) { - - StringBuilder colNameBuf = new StringBuilder(); - StringBuilder colTypeBuf = new StringBuilder(); - StringBuilder colComment = new StringBuilder(); - - boolean first = true; - String columnNameDelimiter = getColumnNameDelimiter(cols); - for (FieldSchema col : cols) { - if (!first) { - colNameBuf.append(columnNameDelimiter); - colTypeBuf.append(":"); - colComment.append('\0'); - } - colNameBuf.append(col.getName()); - colTypeBuf.append(col.getType()); - colComment.append((null != col.getComment()) ? col.getComment() : StringUtils.EMPTY); - first = false; - } - schema.setProperty( - org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_COLUMNS, - colNameBuf.toString()); - schema.setProperty(serdeConstants.COLUMN_NAME_DELIMITER, columnNameDelimiter); - String colTypes = colTypeBuf.toString(); - schema.setProperty( - org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_COLUMN_TYPES, - colTypes); - schema.setProperty("columns.comments", colComment.toString()); - - return schema; - - } - - public static Properties getSchemaWithoutCols(org.apache.hadoop.hive.metastore.api.StorageDescriptor sd, - org.apache.hadoop.hive.metastore.api.StorageDescriptor tblsd, - Map parameters, String databaseName, String tableName, - List partitionKeys) { - Properties schema = new Properties(); - String inputFormat = sd.getInputFormat(); - if (inputFormat == null || inputFormat.length() == 0) { - inputFormat = org.apache.hadoop.mapred.SequenceFileInputFormat.class - .getName(); - } - schema.setProperty( - org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_INPUT_FORMAT, - inputFormat); - String outputFormat = sd.getOutputFormat(); - if (outputFormat == null || outputFormat.length() == 0) { - outputFormat = org.apache.hadoop.mapred.SequenceFileOutputFormat.class - .getName(); - } - schema.setProperty( - org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_OUTPUT_FORMAT, - outputFormat); - - schema.setProperty( - org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_NAME, - databaseName + "." + tableName); - - if (sd.getLocation() != null) { - schema.setProperty( - org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_LOCATION, - sd.getLocation()); - } - schema.setProperty( - org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.BUCKET_COUNT, Integer - .toString(sd.getNumBuckets())); - if (sd.getBucketCols() != null && sd.getBucketCols().size() > 0) { - schema.setProperty( - org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.BUCKET_FIELD_NAME, sd - .getBucketCols().get(0)); - } - if (sd.getSerdeInfo() != null) { - for (Map.Entry param : sd.getSerdeInfo().getParameters().entrySet()) { - schema.put(param.getKey(), (param.getValue() != null) ? param.getValue() : StringUtils.EMPTY); - } - - if (sd.getSerdeInfo().getSerializationLib() != null) { - schema.setProperty( - org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_LIB, sd - .getSerdeInfo().getSerializationLib()); - } - } - - if (sd.getCols() != null) { - schema.setProperty( - org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_DDL, - getDDLFromFieldSchema(tableName, sd.getCols())); - } - - String partString = StringUtils.EMPTY; - String partStringSep = StringUtils.EMPTY; - String partTypesString = StringUtils.EMPTY; - String partTypesStringSep = StringUtils.EMPTY; - for (FieldSchema partKey : partitionKeys) { - partString = partString.concat(partStringSep); - partString = partString.concat(partKey.getName()); - partTypesString = partTypesString.concat(partTypesStringSep); - partTypesString = partTypesString.concat(partKey.getType()); - if (partStringSep.length() == 0) { - partStringSep = "/"; - partTypesStringSep = ":"; - } - } - if (partString.length() > 0) { - schema - .setProperty( - org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, - partString); - schema - .setProperty( - org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES, - partTypesString); - } - - if (parameters != null) { - for (Entry e : parameters.entrySet()) { - // add non-null parameters to the schema - if ( e.getValue() != null) { - schema.setProperty(e.getKey(), e.getValue()); - } - } - } - - return schema; - } - - public static Properties getSchema( - org.apache.hadoop.hive.metastore.api.StorageDescriptor sd, - org.apache.hadoop.hive.metastore.api.StorageDescriptor tblsd, - Map parameters, String databaseName, String tableName, - List partitionKeys) { - - return addCols(getSchemaWithoutCols(sd, tblsd, parameters, databaseName, tableName, partitionKeys), tblsd.getCols()); - } - - public static String getColumnNameDelimiter(List fieldSchemas) { - // we first take a look if any fieldSchemas contain COMMA - for (int i = 0; i < fieldSchemas.size(); i++) { - if (fieldSchemas.get(i).getName().contains(",")) { - return String.valueOf(SerDeUtils.COLUMN_COMMENTS_DELIMITER); - } - } - return String.valueOf(SerDeUtils.COMMA); - } - - /** - * Convert FieldSchemas to columnNames. - */ - public static String getColumnNamesFromFieldSchema(List fieldSchemas) { - String delimiter = getColumnNameDelimiter(fieldSchemas); - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < fieldSchemas.size(); i++) { - if (i > 0) { - sb.append(delimiter); - } - sb.append(fieldSchemas.get(i).getName()); - } - return sb.toString(); - } - - /** - * Convert FieldSchemas to columnTypes. - */ - public static String getColumnTypesFromFieldSchema( - List fieldSchemas) { - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < fieldSchemas.size(); i++) { - if (i > 0) { - sb.append(","); - } - sb.append(fieldSchemas.get(i).getType()); - } - return sb.toString(); - } - - public static String getColumnCommentsFromFieldSchema(List fieldSchemas) { - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < fieldSchemas.size(); i++) { - if (i > 0) { - sb.append(SerDeUtils.COLUMN_COMMENTS_DELIMITER); - } - sb.append(fieldSchemas.get(i).getComment()); - } - return sb.toString(); - } - - public static int startMetaStore() throws Exception { - return startMetaStore(HadoopThriftAuthBridge.getBridge(), null); - } - - public static int startMetaStore(final HadoopThriftAuthBridge bridge, HiveConf conf) throws Exception { - int port = findFreePort(); - startMetaStore(port, bridge, conf); - return port; - } - - public static int startMetaStore(HiveConf conf) throws Exception { - return startMetaStore(HadoopThriftAuthBridge.getBridge(), conf); - } - - public static void startMetaStore(final int port, final HadoopThriftAuthBridge bridge) throws Exception { - startMetaStore(port, bridge, null); - } - - public static void startMetaStore(final int port, - final HadoopThriftAuthBridge bridge, HiveConf hiveConf) - throws Exception{ - if (hiveConf == null) { - hiveConf = new HiveConf(HMSHandler.class); - } - final HiveConf finalHiveConf = hiveConf; - Thread thread = new Thread(new Runnable() { - @Override - public void run() { - try { - HiveMetaStore.startMetaStore(port, bridge, finalHiveConf); - } catch (Throwable e) { - LOG.error("Metastore Thrift Server threw an exception...",e); - } - } - }); - thread.setDaemon(true); - thread.start(); - loopUntilHMSReady(port); - } - - /** - * A simple connect test to make sure that the metastore is up - * @throws Exception - */ - private static void loopUntilHMSReady(int port) throws Exception { - int retries = 0; - Exception exc = null; - while (true) { - try { - Socket socket = new Socket(); - socket.connect(new InetSocketAddress(port), 5000); - socket.close(); - return; - } catch (Exception e) { - if (retries++ > 60) { //give up - exc = e; - break; - } - Thread.sleep(1000); - } - } - // something is preventing metastore from starting - // print the stack from all threads for debugging purposes - LOG.error("Unable to connect to metastore server: " + exc.getMessage()); - LOG.info("Printing all thread stack traces for debugging before throwing exception."); - LOG.info(getAllThreadStacksAsString()); - throw exc; - } - - private static String getAllThreadStacksAsString() { - Map threadStacks = Thread.getAllStackTraces(); - StringBuilder sb = new StringBuilder(); - for (Map.Entry entry : threadStacks.entrySet()) { - Thread t = entry.getKey(); - sb.append(System.lineSeparator()); - sb.append("Name: ").append(t.getName()).append(" State: ").append(t.getState()); - addStackString(entry.getValue(), sb); - } - return sb.toString(); - } - - private static void addStackString(StackTraceElement[] stackElems, StringBuilder sb) { - sb.append(System.lineSeparator()); - for (StackTraceElement stackElem : stackElems) { - sb.append(stackElem).append(System.lineSeparator()); - } - } - - /** - * Finds a free port on the machine. - * - * @return - * @throws IOException - */ - public static int findFreePort() throws IOException { - ServerSocket socket= new ServerSocket(0); - int port = socket.getLocalPort(); - socket.close(); - return port; - } - - /** - * Finds a free port on the machine, but allow the - * ability to specify a port number to not use, no matter what. - */ - public static int findFreePortExcepting(int portToExclude) throws IOException { - ServerSocket socket1 = null; - ServerSocket socket2 = null; - try { - socket1 = new ServerSocket(0); - socket2 = new ServerSocket(0); - if (socket1.getLocalPort() != portToExclude) { - return socket1.getLocalPort(); - } - // If we're here, then socket1.getLocalPort was the port to exclude - // Since both sockets were open together at a point in time, we're - // guaranteed that socket2.getLocalPort() is not the same. - return socket2.getLocalPort(); - } finally { - if (socket1 != null){ - socket1.close(); - } - if (socket2 != null){ - socket2.close(); - } - } - } - - /** - * Catches exceptions that can't be handled and bundles them to MetaException - * - * @param e - * @throws MetaException - */ - static void logAndThrowMetaException(Exception e) throws MetaException { - String exInfo = "Got exception: " + e.getClass().getName() + " " - + e.getMessage(); - LOG.error(exInfo, e); - LOG.error("Converting exception to MetaException"); - throw new MetaException(exInfo); - } - - /** - * @param tableName - * @param deserializer - * @return the list of fields - * @throws SerDeException - * @throws MetaException - */ - public static List getFieldsFromDeserializer(String tableName, - Deserializer deserializer) throws SerDeException, MetaException { - ObjectInspector oi = deserializer.getObjectInspector(); - String[] names = tableName.split("\\."); - String last_name = names[names.length - 1]; - for (int i = 1; i < names.length; i++) { - - if (oi instanceof StructObjectInspector) { - StructObjectInspector soi = (StructObjectInspector) oi; - StructField sf = soi.getStructFieldRef(names[i]); - if (sf == null) { - throw new MetaException("Invalid Field " + names[i]); - } else { - oi = sf.getFieldObjectInspector(); - } - } else if (oi instanceof ListObjectInspector - && names[i].equalsIgnoreCase("$elem$")) { - ListObjectInspector loi = (ListObjectInspector) oi; - oi = loi.getListElementObjectInspector(); - } else if (oi instanceof MapObjectInspector - && names[i].equalsIgnoreCase("$key$")) { - MapObjectInspector moi = (MapObjectInspector) oi; - oi = moi.getMapKeyObjectInspector(); - } else if (oi instanceof MapObjectInspector - && names[i].equalsIgnoreCase("$value$")) { - MapObjectInspector moi = (MapObjectInspector) oi; - oi = moi.getMapValueObjectInspector(); - } else { - throw new MetaException("Unknown type for " + names[i]); - } - } - - ArrayList str_fields = new ArrayList(); - // rules on how to recurse the ObjectInspector based on its type - if (oi.getCategory() != Category.STRUCT) { - str_fields.add(new FieldSchema(last_name, oi.getTypeName(), - FROM_SERIALIZER)); - } else { - List fields = ((StructObjectInspector) oi) - .getAllStructFieldRefs(); - for (int i = 0; i < fields.size(); i++) { - StructField structField = fields.get(i); - String fieldName = structField.getFieldName(); - String fieldTypeName = structField.getFieldObjectInspector().getTypeName(); - String fieldComment = determineFieldComment(structField.getFieldComment()); - - str_fields.add(new FieldSchema(fieldName, fieldTypeName, fieldComment)); - } - } - return str_fields; - } - - private static final String FROM_SERIALIZER = "from deserializer"; - private static String determineFieldComment(String comment) { - return (comment == null) ? FROM_SERIALIZER : comment; - } - - /** - * Convert TypeInfo to FieldSchema. - */ - public static FieldSchema getFieldSchemaFromTypeInfo(String fieldName, - TypeInfo typeInfo) { - return new FieldSchema(fieldName, typeInfo.getTypeName(), - "generated by TypeInfoUtils.getFieldSchemaFromTypeInfo"); - } - - /** - * Determines whether a table is an external table. - * - * @param table table of interest - * - * @return true if external - */ - public static boolean isExternalTable(Table table) { - if (table == null) { - return false; - } - Map params = table.getParameters(); - if (params == null) { - return false; - } - - return "TRUE".equalsIgnoreCase(params.get("EXTERNAL")); - } - - public static boolean isArchived( - org.apache.hadoop.hive.metastore.api.Partition part) { - Map params = part.getParameters(); - return "TRUE".equalsIgnoreCase(params.get(hive_metastoreConstants.IS_ARCHIVED)); - } - - /** - * Filter that filters out hidden files - */ - private static final PathFilter hiddenFileFilter = new PathFilter() { - @Override - public boolean accept(Path p) { - String name = p.getName(); - return !name.startsWith("_") && !name.startsWith("."); - } - }; - - /** - * Utility method that determines if a specified directory already has - * contents (non-hidden files) or not - useful to determine if an - * immutable table already has contents, for example. - * - * @param path - * @throws IOException - */ - public static boolean isDirEmpty(FileSystem fs, Path path) throws IOException { - - if (fs.exists(path)) { - FileStatus[] status = fs.globStatus(new Path(path, "*"), hiddenFileFilter); - if (status.length > 0) { - return false; - } - } - return true; - } - - public static String getIndexTableName(String dbName, String baseTblName, String indexName) { - return dbName + "__" + baseTblName + "_" + indexName + "__"; - } - - public static boolean isIndexTable(Table table) { - if (table == null) { - return false; - } - return TableType.INDEX_TABLE.toString().equals(table.getTableType()); - } - - public static boolean isMaterializedViewTable(Table table) { - if (table == null) { - return false; - } - return TableType.MATERIALIZED_VIEW.toString().equals(table.getTableType()); - } - - public static boolean isView(Table table) { - if (table == null) { - return false; - } - return TableType.VIRTUAL_VIEW.toString().equals(table.getTableType()); - } - - @SuppressWarnings("unchecked") - public static Class getClass(String rawStoreClassName) - throws MetaException { - try { - return (Class) - Class.forName(rawStoreClassName, true, JavaUtils.getClassLoader()); - } catch (ClassNotFoundException e) { - throw new MetaException(rawStoreClassName + " class not found"); - } - } - - /** - * Create an object of the given class. - * @param theClass - * @param parameterTypes - * an array of parameterTypes for the constructor - * @param initargs - * the list of arguments for the constructor - */ - public static T newInstance(Class theClass, Class[] parameterTypes, - Object[] initargs) { - // Perform some sanity checks on the arguments. - if (parameterTypes.length != initargs.length) { - throw new IllegalArgumentException( - "Number of constructor parameter types doesn't match number of arguments"); - } - for (int i = 0; i < parameterTypes.length; i++) { - Class clazz = parameterTypes[i]; - if (initargs[i] != null && !(clazz.isInstance(initargs[i]))) { - throw new IllegalArgumentException("Object : " + initargs[i] - + " is not an instance of " + clazz); - } - } - - try { - Constructor meth = theClass.getDeclaredConstructor(parameterTypes); - meth.setAccessible(true); - return meth.newInstance(initargs); - } catch (Exception e) { - throw new RuntimeException("Unable to instantiate " + theClass.getName(), e); - } - } - - /** - * @param schema1: The first schema to be compared - * @param schema2: The second schema to be compared - * @return true if the two schemas are the same else false - * for comparing a field we ignore the comment it has - */ - public static boolean compareFieldColumns(List schema1, List schema2) { - if (schema1.size() != schema2.size()) { - return false; - } - Iterator its1 = schema1.iterator(); - Iterator its2 = schema2.iterator(); - while (its1.hasNext()) { - FieldSchema f1 = its1.next(); - FieldSchema f2 = its2.next(); - // The default equals provided by thrift compares the comments too for - // equality, thus we need to compare the relevant fields here. - if (!StringUtils.equals(f1.getName(), f2.getName()) || - !StringUtils.equals(f1.getType(), f2.getType())) { - return false; - } - } - return true; - } - - /** - * Read and return the meta store Sasl configuration. Currently it uses the default - * Hadoop SASL configuration and can be configured using "hadoop.rpc.protection" - * HADOOP-10211, made a backward incompatible change due to which this call doesn't - * work with Hadoop 2.4.0 and later. - * @param conf - * @return The SASL configuration - */ - public static Map getMetaStoreSaslProperties(HiveConf conf, boolean useSSL) { - // As of now Hive Meta Store uses the same configuration as Hadoop SASL configuration - - // If SSL is enabled, override the given value of "hadoop.rpc.protection" and set it to "authentication" - // This disables any encryption provided by SASL, since SSL already provides it - String hadoopRpcProtectionVal = conf.get(CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION); - String hadoopRpcProtectionAuth = SaslRpcServer.QualityOfProtection.AUTHENTICATION.toString(); - - if (useSSL && hadoopRpcProtectionVal != null && !hadoopRpcProtectionVal.equals(hadoopRpcProtectionAuth)) { - LOG.warn("Overriding value of " + CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION + " setting it from " - + hadoopRpcProtectionVal + " to " + hadoopRpcProtectionAuth + " because SSL is enabled"); - conf.set(CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION, hadoopRpcProtectionAuth); - } - return HadoopThriftAuthBridge.getBridge().getHadoopSaslProperties(conf); - } - - - public static String ARCHIVING_LEVEL = "archiving_level"; - public static int getArchivingLevel(Partition part) throws MetaException { - if (!isArchived(part)) { - throw new MetaException("Getting level of unarchived partition"); - } - - String lv = part.getParameters().get(ARCHIVING_LEVEL); - if (lv != null) { - return Integer.parseInt(lv); - } - // partitions archived before introducing multiple archiving - return part.getValues().size(); - } - - public static String[] getQualifiedName(String defaultDbName, String tableName) { - String[] names = tableName.split("\\."); - if (names.length == 1) { - return new String[] { defaultDbName, tableName}; - } - return names; - } - - public static List getColumnNames(List schema) { - List cols = new ArrayList<>(schema.size()); - for (FieldSchema fs : schema) { - cols.add(fs.getName()); - } - return cols; - } -} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java index 80fae281cc..59bcd5ca34 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java @@ -44,8 +44,8 @@ } } - Deserializer s = MetaStoreUtils.getDeserializer(conf, tbl, false); - return MetaStoreUtils.getFieldsFromDeserializer(tbl.getTableName(), s); + Deserializer s = HiveMetaStoreUtils.getDeserializer(conf, tbl, false); + return HiveMetaStoreUtils.getFieldsFromDeserializer(tbl.getTableName(), s); } catch (Exception e) { StringUtils.stringifyException(e); throw new MetaException(e.getMessage()); diff --git ql/pom.xml ql/pom.xml index 7ed8f27320..f35a4c87a0 100644 --- ql/pom.xml +++ ql/pom.xml @@ -470,7 +470,7 @@ org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} test-jar test diff --git ql/src/java/org/apache/hadoop/hive/ql/Driver.java ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 4d52d748f1..d3df015288 100644 --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -51,7 +51,8 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.conf.HiveVariableSource; import org.apache.hadoop.hive.conf.VariableSubstitution; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.ColumnType; +import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Schema; import org.apache.hadoop.hive.ql.exec.ConditionalTask; @@ -309,7 +310,7 @@ private static Schema getSchema(BaseSemanticAnalyzer sem, HiveConf conf) { String tableName = "result"; List lst = null; try { - lst = MetaStoreUtils.getFieldsFromDeserializer(tableName, td.getDeserializer(conf)); + lst = HiveMetaStoreUtils.getFieldsFromDeserializer(tableName, td.getDeserializer(conf)); } catch (Exception e) { LOG.warn("Error getting schema: " + org.apache.hadoop.util.StringUtils.stringifyException(e)); @@ -338,7 +339,7 @@ public Schema getThriftSchema() throws Exception { // Go over the schema and convert type to thrift type if (lst != null) { for (FieldSchema f : lst) { - f.setType(MetaStoreUtils.typeToThriftType(f.getType())); + f.setType(ColumnType.typeToThriftType(f.getType())); } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/QueryState.java ql/src/java/org/apache/hadoop/hive/ql/QueryState.java index 7d5aa8b179..f3a46dbcaf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/QueryState.java +++ ql/src/java/org/apache/hadoop/hive/ql/QueryState.java @@ -138,8 +138,8 @@ public Builder withGenerateNewQueryId(boolean generateNewQueryId) { /** * The source HiveConf object used to create the QueryState. If runAsync is false, and the - * confOverLay is empty then we will reuse the hiveConf object as a backing datastore for the - * QueryState. We will create a clone of the hiveConf object otherwise. + * confOverLay is empty then we will reuse the conf object as a backing datastore for the + * QueryState. We will create a clone of the conf object otherwise. * @param hiveConf The source HiveConf * @return The builder */ @@ -153,7 +153,7 @@ public Builder withHiveConf(HiveConf hiveConf) { * - runAsync false * - confOverlay null * - generateNewQueryId false - * - hiveConf null + * - conf null * @return The generated QueryState object */ public QueryState build() { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java index f7fad94bfe..a5912eb914 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java @@ -30,10 +30,10 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index d6bf7461a8..55ef8de9a5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -72,7 +72,7 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.DefaultHiveMetaHook; import org.apache.hadoop.hive.metastore.HiveMetaHook; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; import org.apache.hadoop.hive.metastore.PartitionDropOptions; import org.apache.hadoop.hive.metastore.StatObjectConverter; import org.apache.hadoop.hive.metastore.TableType; @@ -111,6 +111,7 @@ import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; import org.apache.hadoop.hive.metastore.txn.TxnStore; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; @@ -4219,7 +4220,7 @@ private static StorageDescriptor retrieveStorageDescriptor(Table tbl, Partition // the fields so that new SerDe could operate. Note that this may fail if some fields // from old SerDe are too long to be stored in metastore, but there's nothing we can do. try { - Deserializer oldSerde = MetaStoreUtils.getDeserializer( + Deserializer oldSerde = HiveMetaStoreUtils.getDeserializer( conf, tbl.getTTable(), false, oldSerdeName); tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getTableName(), oldSerde)); } catch (MetaException ex) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java index b85a243310..fb78bd8a1b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hive.common.ValidReadTxnList; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.HiveContextAwareRecordReader; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 6d1377370c..f5a5e713bb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -24,10 +24,10 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.ErrorMsg; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java index 567126edf8..c1dbd24018 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java @@ -28,6 +28,8 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.CompilationOpContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.QueryState; @@ -43,8 +45,6 @@ import org.apache.hadoop.hive.ql.stats.BasicStatsTask; import org.apache.hadoop.hive.ql.stats.ColStatsProcessor; import org.apache.hadoop.hive.ql.stats.IStatsProcessor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import com.google.common.util.concurrent.ThreadFactoryBuilder; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index f7850fd1ba..d68d646eb4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -104,11 +104,11 @@ import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.Driver.LockedDriverState; import org.apache.hadoop.hive.ql.ErrorMsg; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java index 5c338b89c9..6c1afa6555 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java @@ -1184,10 +1184,10 @@ public boolean checkOrWaitForTheFile(FileSystem srcFs, Path src, Path dest, Conf /** * Creates and initializes a JobConf object that can be used to execute * the DAG. The configuration object will contain configurations from mapred-site - * overlaid with key/value pairs from the hiveConf object. Finally it will also + * overlaid with key/value pairs from the conf object. Finally it will also * contain some hive specific configurations that do not change from DAG to DAG. * - * @param hiveConf Current hiveConf for the execution + * @param hiveConf Current conf for the execution * @return JobConf base configuration for job execution * @throws IOException */ diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index 1beb839b1f..b35df69885 100755 --- ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hive.common.StringInternUtils; import org.apache.hadoop.hive.common.ValidReadTxnList; import org.apache.hadoop.hive.common.ValidTxnList; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.SerializationUtilities; import org.apache.hive.common.util.Ref; import org.slf4j.Logger; diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index fbcd579220..50bdce89a4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -81,8 +81,8 @@ import org.apache.hadoop.hive.metastore.HiveMetaHookLoader; import org.apache.hadoop.hive.metastore.HiveMetaStore; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.PartitionDropOptions; import org.apache.hadoop.hive.metastore.RawStore; import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient; @@ -137,6 +137,7 @@ import org.apache.hadoop.hive.metastore.api.WMMapping; import org.apache.hadoop.hive.metastore.api.WMPool; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; @@ -876,7 +877,7 @@ public void createTable(Table tbl, boolean ifNotExists, tbl.setDbName(SessionState.get().getCurrentDatabase()); } if (tbl.getCols().size() == 0 || tbl.getSd().getColsSize() == 0) { - tbl.setFields(MetaStoreUtils.getFieldsFromDeserializer(tbl.getTableName(), + tbl.setFields(HiveMetaStoreUtils.getFieldsFromDeserializer(tbl.getTableName(), tbl.getDeserializer())); } tbl.checkValidity(conf); @@ -918,7 +919,7 @@ public void createTable(Table tbl, boolean ifNotExists) throws HiveException { public static List getFieldsFromDeserializerForMsStorage( Table tbl, Deserializer deserializer) throws SerDeException, MetaException { - List schema = MetaStoreUtils.getFieldsFromDeserializer( + List schema = HiveMetaStoreUtils.getFieldsFromDeserializer( tbl.getTableName(), deserializer); for (FieldSchema field : schema) { field.setType(MetaStoreUtils.TYPE_FROM_DESERIALIZER); @@ -2762,10 +2763,10 @@ public boolean dropPartition(String dbName, String tableName, List partV List partSpecs, PartitionDropOptions dropOptions) throws HiveException { try { Table tbl = getTable(dbName, tblName); - List> partExprs = - new ArrayList>(partSpecs.size()); + List> partExprs = + new ArrayList<>(partSpecs.size()); for (DropTableDesc.PartSpec partSpec : partSpecs) { - partExprs.add(new ObjectPair(partSpec.getPrefixLength(), + partExprs.add(new org.apache.hadoop.hive.metastore.utils.ObjectPair<>(partSpec.getPrefixLength(), SerializationUtilities.serializeExpressionToKryo(partSpec.getPartSpec()))); } List tParts = getMSC().dropPartitions( @@ -4189,7 +4190,7 @@ private String getUserName() { public static List getFieldsFromDeserializer(String name, Deserializer serde) throws HiveException { try { - return MetaStoreUtils.getFieldsFromDeserializer(name, serde); + return HiveMetaStoreUtils.getFieldsFromDeserializer(name, serde); } catch (SerDeException e) { throw new HiveException("Error in getting fields from serde. " + e.getMessage(), e); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java index 44026fa33e..6d10c106c5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java @@ -28,13 +28,14 @@ import java.util.Properties; import org.apache.hadoop.hive.common.StringInternUtils; +import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -228,7 +229,7 @@ public Path getDataLocation() { final public Deserializer getDeserializer() { if (deserializer == null) { try { - deserializer = MetaStoreUtils.getDeserializer(SessionState.getSessionConf(), + deserializer = HiveMetaStoreUtils.getDeserializer(SessionState.getSessionConf(), tPartition, table.getTTable()); } catch (MetaException e) { throw new RuntimeException(e); @@ -246,8 +247,8 @@ public Properties getMetadataFromPartitionSchema() { } public Properties getSchemaFromTableSchema(Properties tblSchema) { - return MetaStoreUtils.getPartSchemaFromTableSchema(tPartition.getSd(), table.getTTable().getSd(), - tPartition.getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys(), + return MetaStoreUtils.getPartSchemaFromTableSchema(tPartition.getSd(), + tPartition.getParameters(), tblSchema); } @@ -493,7 +494,7 @@ public void setTPartition( SessionState.getSessionConf(), serializationLib, table.getParameters())) { return Hive.getFieldsFromDeserializerForMsStorage(table, getDeserializer()); } - return MetaStoreUtils.getFieldsFromDeserializer(table.getTableName(), getDeserializer()); + return HiveMetaStoreUtils.getFieldsFromDeserializer(table.getTableName(), getDeserializer()); } catch (Exception e) { LOG.error("Unable to get cols from serde: " + tPartition.getSd().getSerdeInfo().getSerializationLib(), e); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index 1c262000c8..80c7804dc1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -30,17 +30,16 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.io.HdfsUtils; import org.apache.hadoop.hive.metastore.HiveMetaHookLoader; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; @@ -61,6 +60,8 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.utils.SecurityUtils; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.shims.HadoopShims; import org.apache.hadoop.hive.shims.ShimLoader; @@ -69,12 +70,12 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements IMetaStoreClient { - SessionHiveMetaStoreClient(HiveConf conf, Boolean allowEmbedded) throws MetaException { + SessionHiveMetaStoreClient(Configuration conf, Boolean allowEmbedded) throws MetaException { super(conf, null, allowEmbedded); } SessionHiveMetaStoreClient( - HiveConf conf, HiveMetaHookLoader hookLoader, Boolean allowEmbedded) throws MetaException { + Configuration conf, HiveMetaHookLoader hookLoader, Boolean allowEmbedded) throws MetaException { super(conf, hookLoader, allowEmbedded); } @@ -618,7 +619,7 @@ private void dropTempTable(org.apache.hadoop.hive.metastore.api.Table table, boo tablePath = new Path(table.getSd().getLocation()); if (!getWh().isWritable(tablePath.getParent())) { throw new MetaException("Table metadata not deleted since " + tablePath.getParent() + - " is not writable by " + conf.getUser()); + " is not writable by " + SecurityUtils.getUser()); } } catch (IOException err) { MetaException metaException = diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index a1cad9ee2a..0debff669a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -39,7 +39,7 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -49,6 +49,7 @@ import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat; @@ -268,7 +269,7 @@ final public Deserializer getDeserializer() { } final public Class getDeserializerClass() throws Exception { - return MetaStoreUtils.getDeserializerClass(SessionState.getSessionConf(), tTable); + return HiveMetaStoreUtils.getDeserializerClass(SessionState.getSessionConf(), tTable); } final public Deserializer getDeserializer(boolean skipConfError) { @@ -280,7 +281,7 @@ final public Deserializer getDeserializer(boolean skipConfError) { final public Deserializer getDeserializerFromMetaStore(boolean skipConfError) { try { - return MetaStoreUtils.getDeserializer(SessionState.getSessionConf(), tTable, skipConfError); + return HiveMetaStoreUtils.getDeserializer(SessionState.getSessionConf(), tTable, skipConfError); } catch (MetaException e) { throw new RuntimeException(e); } @@ -640,7 +641,7 @@ private boolean isField(String col) { SessionState.getSessionConf(), serializationLib, tTable.getParameters())) { return Hive.getFieldsFromDeserializerForMsStorage(this, getDeserializer()); } else { - return MetaStoreUtils.getFieldsFromDeserializer(getTableName(), getDeserializer()); + return HiveMetaStoreUtils.getFieldsFromDeserializer(getTableName(), getDeserializer()); } } catch (Exception e) { LOG.error("Unable to get field from serde: " + serializationLib, e); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java index 1b7b425df9..f3d878d3f7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java @@ -27,6 +27,7 @@ import java.util.Map; import java.util.Set; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hive.common.util.HiveStringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,7 +38,6 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.WMResourcePlan; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java index c62d98f9d1..a7fc3e9e7f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java @@ -34,7 +34,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.MapJoinOperator; import org.apache.hadoop.hive.ql.exec.Operator; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java index fc1d4f972c..31d2b2342b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java @@ -26,7 +26,6 @@ import java.util.Stack; import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java index b5891ab701..9100c92045 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java @@ -32,7 +32,6 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.FilterOperator; import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.io.AcidUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java index a3a35ff7fd..85f198b6cd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java @@ -31,11 +31,11 @@ import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.type.HiveDecimal; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SamplingOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SamplingOptimizer.java index b28315afe8..6dd08d204c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SamplingOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SamplingOptimizer.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.optimizer.physical; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.GroupByOperator; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.OperatorUtils; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index a06a672331..a09b7961c2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -24,6 +24,7 @@ import org.antlr.runtime.tree.CommonTree; import org.antlr.runtime.tree.Tree; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -33,7 +34,6 @@ import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java index 2400f9ce51..b2532361a6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java @@ -19,46 +19,13 @@ Licensed to the Apache Software Foundation (ASF) under one package org.apache.hadoop.hive.ql.parse; -import org.apache.hadoop.hive.ql.metadata.HiveException; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.Serializable; -import java.net.URI; -import java.util.HashSet; -import java.util.List; - -import org.antlr.runtime.tree.Tree; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.common.ValidReadTxnList; -import org.apache.hadoop.hive.common.ValidTxnList; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; -import org.apache.hadoop.hive.ql.Context; -import org.apache.hadoop.hive.ql.ErrorMsg; -import org.apache.hadoop.hive.ql.QueryState; -import org.apache.hadoop.hive.ql.exec.ReplCopyTask; -import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.exec.TaskFactory; -import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.hooks.ReadEntity; -import org.apache.hadoop.hive.ql.hooks.WriteEntity; -import org.apache.hadoop.hive.ql.metadata.Hive; import org.antlr.runtime.tree.Tree; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.metadata.InvalidTableException; -import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.metadata.PartitionIterable; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.plan.CopyWork; -import org.slf4j.Logger; import org.apache.hadoop.hive.ql.parse.repl.dump.TableExport; import org.apache.hadoop.hive.ql.plan.ExportWork; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index a1b6cda3e8..83d53bc157 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 07742e0485..28e3621d32 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -65,7 +65,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.conf.HiveConf.StrictChecks; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.TransactionalValidationListener; import org.apache.hadoop.hive.metastore.Warehouse; @@ -6816,6 +6815,24 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) checkImmutableTable(qb, dest_tab, dest_path, false); + // check for partition + List parts = dest_tab.getPartitionKeys(); + if (parts != null && parts.size() > 0) { // table is partitioned + if (partSpec == null || partSpec.size() == 0) { // user did NOT specify partition + throw new SemanticException(generateErrorMessage( + qb.getParseInfo().getDestForClause(dest), + ErrorMsg.NEED_PARTITION_ERROR.getMsg())); + } + dpCtx = qbm.getDPCtx(dest); + if (dpCtx == null) { + dest_tab.validatePartColumnNames(partSpec, false); + dpCtx = new DynamicPartitionCtx(dest_tab, partSpec, + conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), + conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE)); + qbm.setDPCtx(dest, dpCtx); + } + } + // Check for dynamic partitions. dpCtx = checkDynPart(qb, qbm, dest_tab, partSpec, dest); if (dpCtx != null && dpCtx.getSPPath() != null) { @@ -7402,7 +7419,7 @@ private void checkImmutableTable(QB qb, Table dest_tab, Path dest_path, boolean } try { FileSystem fs = dest_path.getFileSystem(conf); - if (! MetaStoreUtils.isDirEmpty(fs,dest_path)){ + if (! org.apache.hadoop.hive.metastore.utils.FileUtils.isDirEmpty(fs,dest_path)){ LOG.warn("Attempted write into an immutable table : " + dest_tab.getTableName() + " : " + dest_path); throw new SemanticException( diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java index 9e130dc6d1..f6fb42c9b6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; @@ -36,6 +35,7 @@ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.DDLTask; import org.apache.hadoop.hive.ql.exec.Utilities; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java index 92c97681dd..7f536feda2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java @@ -23,7 +23,6 @@ import java.util.Objects; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.Explain.Level; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java index 4683c9c7a9..66a4aa11be 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.ql.plan; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.plan.Explain.Level; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java index d3a43dfe84..915bddc928 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java @@ -31,8 +31,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.CopyOnFirstWriteProperties; import org.apache.hadoop.hive.common.StringInternUtils; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; @@ -87,7 +87,7 @@ public PartitionDesc(final Partition part, final TableDesc tableDesc) throws Hiv if (Utilities.isInputFileFormatSelfDescribing(this)) { // if IF is self describing no need to send column info per partition, since its not used anyway. Table tbl = part.getTable(); - setProperties(MetaStoreUtils.getSchemaWithoutCols(part.getTPartition().getSd(), part.getTPartition().getSd(), + setProperties(MetaStoreUtils.getSchemaWithoutCols(part.getTPartition().getSd(), part.getParameters(), tbl.getDbName(), tbl.getTableName(), tbl.getPartitionKeys())); } else { setProperties(part.getMetadataFromPartitionSchema()); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java index 81cc27918d..226118a14c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java @@ -34,9 +34,10 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.llap.LlapOutputFormat; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.RowSchema; import org.apache.hadoop.hive.ql.exec.TableScanOperator; @@ -591,7 +592,7 @@ public static TableDesc getReduceValueTableDesc(List fieldSchemas) // last one for union column. List schemas = new ArrayList(length + 1); for (int i = 0; i < length; i++) { - schemas.add(MetaStoreUtils.getFieldSchemaFromTypeInfo( + schemas.add(HiveMetaStoreUtils.getFieldSchemaFromTypeInfo( fieldPrefix + outputColumnNames.get(i), cols.get(i).getTypeInfo())); } @@ -608,7 +609,7 @@ public static TableDesc getReduceValueTableDesc(List fieldSchemas) unionTypes.add(TypeInfoFactory.getStructTypeInfo(names, types)); } if (outputColumnNames.size() - length > 0) { - schemas.add(MetaStoreUtils.getFieldSchemaFromTypeInfo( + schemas.add(HiveMetaStoreUtils.getFieldSchemaFromTypeInfo( fieldPrefix + outputColumnNames.get(length), TypeInfoFactory.getUnionTypeInfo(unionTypes))); } @@ -624,7 +625,7 @@ public static TableDesc getReduceValueTableDesc(List fieldSchemas) String fieldPrefix) { List schemas = new ArrayList(cols.size()); for (int i = 0; i < cols.size(); i++) { - schemas.add(MetaStoreUtils.getFieldSchemaFromTypeInfo(fieldPrefix + schemas.add(HiveMetaStoreUtils.getFieldSchemaFromTypeInfo(fieldPrefix + outputColumnNames.get(i + start), cols.get(i).getTypeInfo())); } return schemas; @@ -637,7 +638,7 @@ public static TableDesc getReduceValueTableDesc(List fieldSchemas) List cols, String fieldPrefix) { List schemas = new ArrayList(cols.size()); for (int i = 0; i < cols.size(); i++) { - schemas.add(MetaStoreUtils.getFieldSchemaFromTypeInfo(fieldPrefix + i, + schemas.add(HiveMetaStoreUtils.getFieldSchemaFromTypeInfo(fieldPrefix + i, cols.get(i).getTypeInfo())); } return schemas; @@ -667,7 +668,7 @@ public static TableDesc getReduceValueTableDesc(List fieldSchemas) if (name.equals(String.valueOf(i))) { name = fieldPrefix + name; } - schemas.add(MetaStoreUtils.getFieldSchemaFromTypeInfo(name, cols.get(i) + schemas.add(HiveMetaStoreUtils.getFieldSchemaFromTypeInfo(name, cols.get(i) .getType())); } return schemas; diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java index 520257101a..8569ffb102 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java @@ -23,6 +23,7 @@ import com.google.common.base.Function; import com.google.common.collect.Iterators; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -30,7 +31,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.MetaStorePreEventListener; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java index ecf3b9d7ee..69b076a08a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java @@ -34,10 +34,10 @@ import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.Task; @@ -201,7 +201,7 @@ private String getAggregationPrefix(Table table, Partition partition) throws Met private String getAggregationPrefix0(Table table, Partition partition) throws MetaException { // prefix is of the form dbName.tblName - String prefix = table.getDbName() + "." + org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(table.getTableName()); + String prefix = table.getDbName() + "." + MetaStoreUtils.encodeTableName(table.getTableName()); // FIXME: this is a secret contract; reusein getAggrKey() creates a more closer relation to the StatsGatherer // prefix = work.getAggKey(); prefix = prefix.toLowerCase(); diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java index 8daa5c05f6..ab3cfc88fb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.CompactionRequest; import org.apache.hadoop.hive.metastore.api.CompactionResponse; import org.apache.hadoop.hive.metastore.api.CompactionType; diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java index 319e0eedb0..2a1545f1da 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java @@ -95,7 +95,7 @@ private void dumpBucketData(Table table, long txnId, int stmtId, int bucketNum) Path bucket = AcidUtils.createBucketFile(new Path(new Path(getWarehouseDir(), table.toString().toLowerCase()), AcidUtils.deltaSubdir(txnId, txnId, stmtId)), bucketNum); FileOutputStream delta = new FileOutputStream(testName.getMethodName() + "_" + bucket.getParent().getName() + "_" + bucket.getName()); // try { -// FileDump.printJsonData(hiveConf, bucket.toString(), delta); +// FileDump.printJsonData(conf, bucket.toString(), delta); // } // catch(FileNotFoundException ex) { ;//this happens if you change BUCKET_COUNT @@ -350,7 +350,6 @@ public void testTimeOutReaper() throws Exception { hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 1, TimeUnit.SECONDS); // Have to reset the conf when we change it so that the change takes affect houseKeeperService.setConf(hiveConf); - //hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILHEARTBEATER, true); runStatementOnDriver("start transaction"); runStatementOnDriver("select count(*) from " + Table.ACIDTBL + " where a = 17"); pause(750); diff --git service/pom.xml service/pom.xml index 6e5ce8e39a..9ad755537d 100644 --- service/pom.xml +++ service/pom.xml @@ -252,7 +252,7 @@ org.apache.hive - hive-metastore + hive-standalone-metastore ${project.version} test-jar test diff --git service/src/java/org/apache/hive/service/cli/thrift/EmbeddedThriftBinaryCLIService.java service/src/java/org/apache/hive/service/cli/thrift/EmbeddedThriftBinaryCLIService.java index e9a5830f18..fbfd57a2ef 100644 --- service/src/java/org/apache/hive/service/cli/thrift/EmbeddedThriftBinaryCLIService.java +++ service/src/java/org/apache/hive/service/cli/thrift/EmbeddedThriftBinaryCLIService.java @@ -38,7 +38,7 @@ public EmbeddedThriftBinaryCLIService() { @Override public synchronized void init(HiveConf hiveConf) { // Null HiveConf is passed in jdbc driver side code since driver side is supposed to be - // independent of hiveConf object. Create new HiveConf object here in this case. + // independent of conf object. Create new HiveConf object here in this case. if (hiveConf == null) { hiveConf = new HiveConf(); } diff --git service/src/java/org/apache/hive/service/server/HiveServer2.java service/src/java/org/apache/hive/service/server/HiveServer2.java index 89f7356c0d..223be6aa32 100644 --- service/src/java/org/apache/hive/service/server/HiveServer2.java +++ service/src/java/org/apache/hive/service/server/HiveServer2.java @@ -158,7 +158,7 @@ public void run() { } addService(thriftCLIService); super.init(hiveConf); - // Set host name in hiveConf + // Set host name in conf try { hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST.varname, getServerHost()); } catch (Throwable t) { diff --git service/src/test/org/apache/hive/service/cli/thrift/ThriftCliServiceTestWithCookie.java service/src/test/org/apache/hive/service/cli/thrift/ThriftCliServiceTestWithCookie.java index 1dac783321..1ac5da8787 100644 --- service/src/test/org/apache/hive/service/cli/thrift/ThriftCliServiceTestWithCookie.java +++ service/src/test/org/apache/hive/service/cli/thrift/ThriftCliServiceTestWithCookie.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hive.service.Service; import org.apache.hive.service.auth.HiveAuthConstants; -import org.apache.hive.service.auth.HiveAuthConstants.AuthTypes; import org.apache.hive.service.cli.OperationHandle; import org.apache.hive.service.cli.OperationState; import org.apache.hive.service.cli.OperationStatus; diff --git standalone-metastore/pom.xml standalone-metastore/pom.xml index 1701182dcf..cce3282165 100644 --- standalone-metastore/pom.xml +++ standalone-metastore/pom.xml @@ -208,6 +208,22 @@ + + org.apache.hadoop + hadoop-mapreduce-client-core + ${hadoop.version} + true + + + org.slf4j + slf4j-log4j12 + + + commmons-logging + commons-logging + + + org.apache.hive @@ -502,7 +518,7 @@ jdbc:derby:memory:${test.tmp.dir}/junit_metastore_db;create=true false ${test.tmp.dir} - ${test.warehouse.scheme}${test.warehouse.dir} + ${test.warehouse.scheme}${test.warehouse.dir} ${log4j.conf.dir} @@ -532,6 +548,17 @@ + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + org.codehaus.mojo build-helper-maven-plugin 3.0.0 diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ColumnType.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ColumnType.java index a41b5ee462..d5dea4dc3c 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ColumnType.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ColumnType.java @@ -86,8 +86,6 @@ public static final String COLUMN_NAME_DELIMITER = "column.name.delimiter"; - public static final String SERIALIZATION_FORMAT = "serialization.format"; - public static final Set PrimitiveTypes = StringUtils.asSet( VOID_TYPE_NAME, BOOLEAN_TYPE_NAME, @@ -239,4 +237,65 @@ public static boolean areColTypesCompatible(String from, String to) { } return false; } + + // These aren't column types, they are info for how things are stored in thrift. + // It didn't seem useful to create another Constants class just for these though. + public static final String SERIALIZATION_FORMAT = "serialization.format"; + + public static final String SERIALIZATION_LIB = "serialization.lib"; + + public static final String SERIALIZATION_DDL = "serialization.ddl"; + + public static final char COLUMN_COMMENTS_DELIMITER = '\0'; + + private static HashMap typeToThriftTypeMap; + static { + typeToThriftTypeMap = new HashMap<>(); + typeToThriftTypeMap.put(BOOLEAN_TYPE_NAME, "bool"); + typeToThriftTypeMap.put(TINYINT_TYPE_NAME, "byte"); + typeToThriftTypeMap.put(SMALLINT_TYPE_NAME, "i16"); + typeToThriftTypeMap.put(INT_TYPE_NAME, "i32"); + typeToThriftTypeMap.put(BIGINT_TYPE_NAME, "i64"); + typeToThriftTypeMap.put(DOUBLE_TYPE_NAME, "double"); + typeToThriftTypeMap.put(FLOAT_TYPE_NAME, "float"); + typeToThriftTypeMap.put(LIST_TYPE_NAME, "list"); + typeToThriftTypeMap.put(MAP_TYPE_NAME, "map"); + typeToThriftTypeMap.put(STRING_TYPE_NAME, "string"); + typeToThriftTypeMap.put(BINARY_TYPE_NAME, "binary"); + // These 4 types are not supported yet. + // We should define a complex type date in thrift that contains a single int + // member, and DynamicSerDe + // should convert it to date type at runtime. + typeToThriftTypeMap.put(DATE_TYPE_NAME, "date"); + typeToThriftTypeMap.put(DATETIME_TYPE_NAME, "datetime"); + typeToThriftTypeMap.put(TIMESTAMP_TYPE_NAME, "timestamp"); + typeToThriftTypeMap.put(DECIMAL_TYPE_NAME, "decimal"); + typeToThriftTypeMap.put(INTERVAL_YEAR_MONTH_TYPE_NAME, INTERVAL_YEAR_MONTH_TYPE_NAME); + typeToThriftTypeMap.put(INTERVAL_DAY_TIME_TYPE_NAME, INTERVAL_DAY_TIME_TYPE_NAME); + } + + /** + * Convert type to ThriftType. We do that by tokenizing the type and convert + * each token. + */ + public static String typeToThriftType(String type) { + StringBuilder thriftType = new StringBuilder(); + int last = 0; + boolean lastAlphaDigit = Character.isLetterOrDigit(type.charAt(last)); + for (int i = 1; i <= type.length(); i++) { + if (i == type.length() + || Character.isLetterOrDigit(type.charAt(i)) != lastAlphaDigit) { + String token = type.substring(last, i); + last = i; + String thriftToken = typeToThriftTypeMap.get(token); + thriftType.append(thriftToken == null ? token : thriftToken); + lastAlphaDigit = !lastAlphaDigit; + } + } + return thriftType.toString(); + } + + public static String getListType(String t) { + return "array<" + t + ">"; + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 13967d5186..89354a2d34 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -67,7 +67,7 @@ */ public class HiveAlterHandler implements AlterHandler { - protected Configuration hiveConf; + protected Configuration conf; private static final Logger LOG = LoggerFactory.getLogger(HiveAlterHandler.class .getName()); @@ -76,13 +76,13 @@ // is not in the scope of the fix for HIVE-17942. @Override public Configuration getConf() { - return hiveConf; + return conf; } @Override @SuppressWarnings("nls") public void setConf(Configuration conf) { - hiveConf = conf; + this.conf = conf; } @Override @@ -266,7 +266,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, if (dataWasMoved) { int partsToProcess = parts.size(); - int partitionBatchSize = MetastoreConf.getIntVar(hiveConf, + int partitionBatchSize = MetastoreConf.getIntVar(handler.getConf(), MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX); int batchStart = 0; while (partsToProcess > 0) { @@ -293,7 +293,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, } } else { // operations other than table rename - if (MetaStoreUtils.requireCalStats(handler.getConf(), null, null, newt, environmentContext) && + if (MetaStoreUtils.requireCalStats(null, null, newt, environmentContext) && !isPartitionedTable) { Database db = msdb.getDatabase(newDbName); // Update table stats. For partitioned table, we update stats in alterPartition() @@ -447,7 +447,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String try { msdb.openTransaction(); oldPart = msdb.getPartition(dbname, name, new_part.getValues()); - if (MetaStoreUtils.requireCalStats(handler.getConf(), oldPart, new_part, tbl, environmentContext)) { + if (MetaStoreUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) { // if stats are same, no need to update if (MetaStoreUtils.isFastStatsSame(oldPart, new_part)) { MetaStoreUtils.updateBasicState(environmentContext, new_part.getParameters()); @@ -580,7 +580,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String new_part.getSd().setLocation(oldPart.getSd().getLocation()); } - if (MetaStoreUtils.requireCalStats(handler.getConf(), oldPart, new_part, tbl, environmentContext)) { + if (MetaStoreUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) { MetaStoreUtils.updatePartitionStatsFast(new_part, wh, false, true, environmentContext); } @@ -672,7 +672,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String oldParts.add(oldTmpPart); partValsList.add(tmpPart.getValues()); - if (MetaStoreUtils.requireCalStats(handler.getConf(), oldTmpPart, tmpPart, tbl, environmentContext)) { + if (MetaStoreUtils.requireCalStats(oldTmpPart, tmpPart, tbl, environmentContext)) { // Check if stats are same, no need to update if (MetaStoreUtils.isFastStatsSame(oldTmpPart, tmpPart)) { MetaStoreUtils.updateBasicState(environmentContext, tmpPart.getParameters()); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java similarity index 93% rename from metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 4afc03bb36..fc254c6f53 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.metastore; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.isIndexTable; import java.io.IOException; import java.lang.reflect.Constructor; @@ -49,25 +48,22 @@ import javax.security.auth.login.LoginException; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.ValidTxnList; -import org.apache.hadoop.hive.common.auth.HiveAuthUtils; -import org.apache.hadoop.hive.common.classification.InterfaceAudience; -import org.apache.hadoop.hive.common.classification.InterfaceAudience.Public; -import org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.conf.HiveConfUtil; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.SecurityUtils; -import org.apache.hadoop.hive.shims.Utils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.utils.ObjectPair; +import org.apache.hadoop.hive.metastore.utils.SecurityUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.apache.thrift.TApplicationException; @@ -92,8 +88,8 @@ * For users who require retry mechanism when the connection between metastore and client is * broken, RetryingMetaStoreClient class should be used. */ -@Public -@Unstable +@InterfaceAudience.Public +@InterfaceStability.Evolving public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable { /** * Capabilities of the current client. If this client talks to a MetaStore server in a manner @@ -112,7 +108,7 @@ private boolean isConnected = false; private URI metastoreUris[]; private final HiveMetaHookLoader hookLoader; - protected final HiveConf conf; // Keep a copy of HiveConf so if Session conf changes, we may need to get a new HMS client. + protected final Configuration conf; // Keep a copy of HiveConf so if Session conf changes, we may need to get a new HMS client. protected boolean fastpath = false; private String tokenStrForm; private final boolean localMetaStore; @@ -130,63 +126,53 @@ static final protected Logger LOG = LoggerFactory.getLogger(HiveMetaStoreClient.class); - public HiveMetaStoreClient(HiveConf conf) throws MetaException { + public HiveMetaStoreClient(Configuration conf) throws MetaException { this(conf, null, true); } - public HiveMetaStoreClient(HiveConf conf, HiveMetaHookLoader hookLoader) throws MetaException { + public HiveMetaStoreClient(Configuration conf, HiveMetaHookLoader hookLoader) throws MetaException { this(conf, hookLoader, true); } - public HiveMetaStoreClient(HiveConf conf, HiveMetaHookLoader hookLoader, Boolean allowEmbedded) + public HiveMetaStoreClient(Configuration conf, HiveMetaHookLoader hookLoader, Boolean allowEmbedded) throws MetaException { this.hookLoader = hookLoader; if (conf == null) { - conf = new HiveConf(HiveMetaStoreClient.class); + conf = MetastoreConf.newMetastoreConf(); this.conf = conf; } else { - this.conf = new HiveConf(conf); + this.conf = new Configuration(conf); } - version = HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) ? TEST_VERSION : VERSION; + version = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) ? TEST_VERSION : VERSION; filterHook = loadFilterHooks(); - fileMetadataBatchSize = HiveConf.getIntVar( - conf, HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX); + fileMetadataBatchSize = MetastoreConf.getIntVar( + conf, ConfVars.BATCH_RETRIEVE_OBJECTS_MAX); - String msUri = conf.getVar(ConfVars.METASTOREURIS); - localMetaStore = HiveConfUtil.isEmbeddedMetaStore(msUri); + String msUri = MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS); + localMetaStore = MetastoreConf.isEmbeddedMetaStore(msUri); if (localMetaStore) { if (!allowEmbedded) { throw new MetaException("Embedded metastore is not allowed here. Please configure " - + ConfVars.METASTOREURIS.varname + "; it is currently set to [" + msUri + "]"); + + ConfVars.THRIFT_URIS.toString() + "; it is currently set to [" + msUri + "]"); } // instantiate the metastore server handler directly instead of connecting // through the network - if (conf.getBoolVar(ConfVars.METASTORE_FASTPATH)) { - client = new HiveMetaStore.HMSHandler("hive client", this.conf, true); - fastpath = true; - } else { - client = HiveMetaStore.newRetryingHMSHandler("hive client", this.conf, true); - } + client = HiveMetaStore.newRetryingHMSHandler("hive client", this.conf, true); isConnected = true; snapshotActiveConf(); return; - } else { - if (conf.getBoolVar(ConfVars.METASTORE_FASTPATH)) { - throw new RuntimeException("You can't set hive.metastore.fastpath to true when you're " + - "talking to the thrift metastore service. You must run the metastore locally."); - } } // get the number retries - retries = HiveConf.getIntVar(conf, HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES); - retryDelaySeconds = conf.getTimeVar( - ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS); + retries = MetastoreConf.getIntVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES); + retryDelaySeconds = MetastoreConf.getTimeVar(conf, + ConfVars.CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS); // user wants file store based configuration - if (conf.getVar(HiveConf.ConfVars.METASTOREURIS) != null) { - String metastoreUrisString[] = conf.getVar( - HiveConf.ConfVars.METASTOREURIS).split(","); + if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS) != null) { + String metastoreUrisString[] = MetastoreConf.getVar(conf, + ConfVars.THRIFT_URIS).split(","); metastoreUris = new URI[metastoreUrisString.length]; try { int i = 0; @@ -245,7 +231,7 @@ public Void run() throws Exception { String delegationTokenStr = getDelegationToken(proxyUser, proxyUser); SecurityUtils.setTokenStr(UserGroupInformation.getCurrentUser(), delegationTokenStr, delegationTokenPropString); - this.conf.setVar(ConfVars.METASTORE_TOKEN_SIGNATURE, delegationTokenPropString); + MetastoreConf.setVar(this.conf, ConfVars.TOKEN_SIGNATURE, delegationTokenPropString); close(); } catch (Exception e) { LOG.error("Error while setting delegation token for " + proxyUser, e); @@ -261,26 +247,15 @@ public Void run() throws Exception { } private MetaStoreFilterHook loadFilterHooks() throws IllegalStateException { - Class authProviderClass = conf. - getClass(HiveConf.ConfVars.METASTORE_FILTER_HOOK.varname, - DefaultMetaStoreFilterHookImpl.class, + Class authProviderClass = MetastoreConf. + getClass(conf, ConfVars.FILTER_HOOK, DefaultMetaStoreFilterHookImpl.class, MetaStoreFilterHook.class); String msg = "Unable to create instance of " + authProviderClass.getName() + ": "; try { Constructor constructor = authProviderClass.getConstructor(Configuration.class); return constructor.newInstance(conf); - } catch (NoSuchMethodException e) { - throw new IllegalStateException(msg + e.getMessage(), e); - } catch (SecurityException e) { - throw new IllegalStateException(msg + e.getMessage(), e); - } catch (InstantiationException e) { - throw new IllegalStateException(msg + e.getMessage(), e); - } catch (IllegalAccessException e) { - throw new IllegalStateException(msg + e.getMessage(), e); - } catch (IllegalArgumentException e) { - throw new IllegalStateException(msg + e.getMessage(), e); - } catch (InvocationTargetException e) { + } catch (NoSuchMethodException | SecurityException | IllegalAccessException | InstantiationException | IllegalArgumentException | InvocationTargetException e) { throw new IllegalStateException(msg + e.getMessage(), e); } } @@ -311,7 +286,7 @@ public boolean isLocalMetaStore() { } @Override - public boolean isCompatibleWith(HiveConf conf) { + public boolean isCompatibleWith(Configuration conf) { // Make a copy of currentMetaVars, there is a race condition that // currentMetaVars might be changed during the execution of the method Map currentMetaVarsCopy = currentMetaVars; @@ -319,13 +294,13 @@ public boolean isCompatibleWith(HiveConf conf) { return false; // recreate } boolean compatible = true; - for (ConfVars oneVar : HiveConf.metaVars) { + for (ConfVars oneVar : MetastoreConf.metaVars) { // Since metaVars are all of different types, use string for comparison - String oldVar = currentMetaVarsCopy.get(oneVar.varname); - String newVar = conf.get(oneVar.varname, ""); + String oldVar = currentMetaVarsCopy.get(oneVar.getVarname()); + String newVar = MetastoreConf.getAsString(conf, oneVar); if (oldVar == null || (oneVar.isCaseSensitive() ? !oldVar.equals(newVar) : !oldVar.equalsIgnoreCase(newVar))) { - LOG.info("Mestastore configuration " + oneVar.varname + + LOG.info("Mestastore configuration " + oneVar.toString() + " changed from " + oldVar + " to " + newVar); compatible = false; } @@ -335,7 +310,7 @@ public boolean isCompatibleWith(HiveConf conf) { @Override public void setHiveAddedJars(String addedJars) { - HiveConf.setVar(conf, ConfVars.HIVEADDEDJARS, addedJars); + MetastoreConf.setVar(conf, ConfVars.ADDED_JARS, addedJars); } @Override @@ -408,12 +383,12 @@ public void renamePartition(final String dbname, final String name, final List(HiveConf.metaVars.length); - for (ConfVars oneVar : HiveConf.metaVars) { - currentMetaVars.put(oneVar.varname, conf.get(oneVar.varname, "")); + currentMetaVars = new HashMap<>(MetastoreConf.metaVars.length); + for (ConfVars oneVar : MetastoreConf.metaVars) { + currentMetaVars.put(oneVar.getVarname(), MetastoreConf.getAsString(conf, oneVar)); } } @@ -599,15 +575,12 @@ public String getMetaConf(String key) throws TException { * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partition(org.apache.hadoop.hive.metastore.api.Partition) */ @Override - public Partition add_partition(Partition new_part) - throws InvalidObjectException, AlreadyExistsException, MetaException, - TException { + public Partition add_partition(Partition new_part) throws TException { return add_partition(new_part, null); } public Partition add_partition(Partition new_part, EnvironmentContext envContext) - throws InvalidObjectException, AlreadyExistsException, MetaException, - TException { + throws TException { Partition p = client.add_partition_with_environment_context(new_part, envContext); return fastpath ? p : deepCopy(p); } @@ -621,18 +594,15 @@ public Partition add_partition(Partition new_part, EnvironmentContext envContext * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partitions(List) */ @Override - public int add_partitions(List new_parts) - throws InvalidObjectException, AlreadyExistsException, MetaException, - TException { + public int add_partitions(List new_parts) throws TException { return client.add_partitions(new_parts); } @Override public List add_partitions( - List parts, boolean ifNotExists, boolean needResults) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + List parts, boolean ifNotExists, boolean needResults) throws TException { if (parts.isEmpty()) { - return needResults ? new ArrayList() : null; + return needResults ? new ArrayList<>() : null; } Partition part = parts.get(0); AddPartitionsRequest req = new AddPartitionsRequest( @@ -661,14 +631,12 @@ public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TExcept */ @Override public Partition appendPartition(String db_name, String table_name, - List part_vals) throws InvalidObjectException, - AlreadyExistsException, MetaException, TException { + List part_vals) throws TException { return appendPartition(db_name, table_name, part_vals, null); } public Partition appendPartition(String db_name, String table_name, List part_vals, - EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException, - MetaException, TException { + EnvironmentContext envContext) throws TException { Partition p = client.append_partition_with_environment_context(db_name, table_name, part_vals, envContext); return fastpath ? p : deepCopy(p); @@ -676,13 +644,12 @@ public Partition appendPartition(String db_name, String table_name, List @Override public Partition appendPartition(String dbName, String tableName, String partName) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + throws TException { return appendPartition(dbName, tableName, partName, null); } public Partition appendPartition(String dbName, String tableName, String partName, - EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException, - MetaException, TException { + EnvironmentContext envContext) throws TException { Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName, partName, envContext); return fastpath ? p : deepCopy(p); @@ -926,7 +893,7 @@ public boolean dropPartition(String dbName, String tableName, String partName, b } private static EnvironmentContext getEnvironmentContextWithIfPurgeSet() { - Map warehouseOptions = new HashMap(); + Map warehouseOptions = new HashMap<>(); warehouseOptions.put("ifPurge", "TRUE"); return new EnvironmentContext(warehouseOptions); } @@ -985,7 +952,7 @@ public boolean dropPartition(String db_name, String tbl_name, List part_ List> partExprs, PartitionDropOptions options) throws TException { RequestPartsSpec rps = new RequestPartsSpec(); - List exprs = new ArrayList(partExprs.size()); + List exprs = new ArrayList<>(partExprs.size()); for (ObjectPair partExpr : partExprs) { DropPartitionsExpr dpe = new DropPartitionsExpr(); dpe.setExpr(partExpr.getSecond()); @@ -1052,8 +1019,8 @@ public void dropTable(String dbname, String name, boolean deleteData, //build new environmentContext with ifPurge; EnvironmentContext envContext = null; if(ifPurge){ - Map warehouseOptions = null; - warehouseOptions = new HashMap(); + Map warehouseOptions; + warehouseOptions = new HashMap<>(); warehouseOptions.put("ifPurge", "TRUE"); envContext = new EnvironmentContext(warehouseOptions); } @@ -1112,7 +1079,7 @@ public void dropTable(String dbname, String name, boolean deleteData, } return; } - if (isIndexTable(tbl)) { + if (MetaStoreUtils.isIndexTable(tbl)) { throw new UnsupportedOperationException("Cannot drop index tables"); } HiveMetaHook hook = getHook(tbl); @@ -1189,7 +1156,7 @@ public boolean dropType(String type) throws NoSuchObjectException, MetaException Map result = null; Map fromClient = client.get_type_all(name); if (fromClient != null) { - result = new LinkedHashMap(); + result = new LinkedHashMap<>(); for (String key : fromClient.keySet()) { result.put(key, deepCopy(fromClient.get(key))); } @@ -1312,7 +1279,7 @@ public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr if (max_parts >= 0) { req.setMaxParts(max_parts); } - PartitionsByExprResult r = null; + PartitionsByExprResult r; try { r = client.get_partitions_by_expr(req); } catch (TApplicationException te) { @@ -1489,7 +1456,7 @@ public Type getType(String name) throws NoSuchObjectException, MetaException, TE sources.put(meta.getDbName() + "." + meta.getTableName(), meta); List tables = dbTables.get(meta.getDbName()); if (tables == null) { - dbTables.put(meta.getDbName(), tables = new ArrayList()); + dbTables.put(meta.getDbName(), tables = new ArrayList<>()); } tables.add(meta.getTableName()); } @@ -1809,7 +1776,7 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri throws MetaException, TException, UnknownTableException, UnknownDBException { EnvironmentContext envCxt = null; - String addedJars = conf.getVar(ConfVars.HIVEADDEDJARS); + String addedJars = MetastoreConf.getVar(conf, ConfVars.ADDED_JARS); if(org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) { Map props = new HashMap(); props.put("hive.added.jars.path", addedJars); @@ -2104,7 +2071,7 @@ public String getDelegationToken(String renewerKerberosPrincipalName) throws MetaException, TException, IOException { //a convenience method that makes the intended owner for the delegation //token request the current user - String owner = conf.getUser(); + String owner = SecurityUtils.getUser(); return getDelegationToken(owner, renewerKerberosPrincipalName); } @@ -2493,7 +2460,7 @@ public AggrStats getAggrColStatsFor(String dbName, String tblName, List colNames, List partNames) throws NoSuchObjectException, MetaException, TException { if (colNames.isEmpty() || partNames.isEmpty()) { LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); - return new AggrStats(new ArrayList(),0); // Nothing to aggregate + return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate } PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames); return client.get_aggr_stats_for(req); @@ -2612,7 +2579,7 @@ public void putFileMetadata(List fileIds, List metadata) throw } @Override - public boolean isSameConfObj(HiveConf c) { + public boolean isSameConfObj(Configuration c) { return conf == c; } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 40e79306a0..573ac0173d 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,13 +27,11 @@ import java.util.Map; import java.util.Map.Entry; -import org.apache.hadoop.hive.common.ObjectPair; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ValidTxnList; -import org.apache.hadoop.hive.common.classification.InterfaceAudience; -import org.apache.hadoop.hive.common.classification.InterfaceAudience.Public; -import org.apache.hadoop.hive.common.classification.InterfaceStability.Evolving; import org.apache.hadoop.hive.common.classification.RetrySemantics; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.annotation.NoReconnect; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; @@ -110,20 +108,21 @@ import org.apache.hadoop.hive.metastore.api.WMMapping; import org.apache.hadoop.hive.metastore.api.WMPool; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.hadoop.hive.metastore.utils.ObjectPair; import org.apache.thrift.TException; /** * Wrapper around hive metastore thrift api */ -@Public -@Evolving +@InterfaceAudience.Public +@InterfaceStability.Evolving public interface IMetaStoreClient { /** * Returns whether current client is compatible with conf argument or not * @return */ - boolean isCompatibleWith(HiveConf conf); + boolean isCompatibleWith(Configuration conf); /** * Set added jars path info to MetaStoreClient. @@ -807,8 +806,8 @@ boolean dropPartition(String db_name, String tbl_name, List part_vals, PartitionDropOptions options) throws TException; List dropPartitions(String dbName, String tblName, - List> partExprs, boolean deleteData, - boolean ifExists) throws NoSuchObjectException, MetaException, TException; + List> partExprs, boolean deleteData, + boolean ifExists) throws NoSuchObjectException, MetaException, TException; List dropPartitions(String dbName, String tblName, List> partExprs, boolean deleteData, @@ -1725,7 +1724,7 @@ public AggrStats getAggrColStatsFor(String dbName, String tblName, */ void putFileMetadata(List fileIds, List metadata) throws TException; - boolean isSameConfObj(HiveConf c); + boolean isSameConfObj(Configuration c); boolean cacheFileMetadata(String dbName, String tableName, String partName, boolean allParts) throws TException; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java index 2fd22683ef..0add00369a 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java @@ -84,7 +84,7 @@ public static RawStore getProxy(Configuration hiveConf, Configuration conf, Stri private void init() throws MetaException { // Using the hook on startup ensures that the hook always has priority // over settings in *.xml. The thread local conf needs to be used because at this point - // it has already been initialized using hiveConf. + // it has already been initialized using conf. MetaStoreInit.updateConnectionURL(hiveConf, getConf(), null, metaStoreInitData); } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java similarity index 84% rename from metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java index 9a6d808856..f97f638ba6 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,15 +27,17 @@ import java.lang.reflect.UndeclaredThrowableException; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; -import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.classification.RetrySemantics; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.common.classification.InterfaceAudience.Public; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.annotation.NoReconnect; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.security.UserGroupInformation; @@ -53,7 +55,7 @@ * each call. * */ -@Public +@InterfaceAudience.Public public class RetryingMetaStoreClient implements InvocationHandler { private static final Logger LOG = LoggerFactory.getLogger(RetryingMetaStoreClient.class.getName()); @@ -68,9 +70,9 @@ private boolean localMetaStore; - protected RetryingMetaStoreClient(HiveConf hiveConf, Class[] constructorArgTypes, - Object[] constructorArgs, ConcurrentHashMap metaCallTimeMap, - Class msClientClass) throws MetaException { + protected RetryingMetaStoreClient(Configuration conf, Class[] constructorArgTypes, + Object[] constructorArgs, ConcurrentHashMap metaCallTimeMap, + Class msClientClass) throws MetaException { this.ugi = getUGI(); @@ -78,20 +80,19 @@ protected RetryingMetaStoreClient(HiveConf hiveConf, Class[] constructorArgTy LOG.warn("RetryingMetaStoreClient unable to determine current user UGI."); } - this.retryLimit = hiveConf.getIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES); - this.retryDelaySeconds = hiveConf.getTimeVar( - HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS); + this.retryLimit = MetastoreConf.getIntVar(conf, ConfVars.THRIFT_FAILURE_RETRIES); + this.retryDelaySeconds = MetastoreConf.getTimeVar(conf, + ConfVars.CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS); this.metaCallTimeMap = metaCallTimeMap; - this.connectionLifeTimeInMillis = hiveConf.getTimeVar( - HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_LIFETIME, TimeUnit.MILLISECONDS); + this.connectionLifeTimeInMillis = MetastoreConf.getTimeVar(conf, + ConfVars.CLIENT_SOCKET_LIFETIME, TimeUnit.MILLISECONDS); this.lastConnectionTime = System.currentTimeMillis(); - String msUri = hiveConf.getVar(HiveConf.ConfVars.METASTOREURIS); + String msUri = MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS); localMetaStore = (msUri == null) || msUri.trim().isEmpty(); reloginExpiringKeytabUser(); - this.base = (IMetaStoreClient) MetaStoreUtils.newInstance( - msClientClass, constructorArgTypes, constructorArgs); + this.base = JavaUtils.newInstance(msClientClass, constructorArgTypes, constructorArgs); LOG.info("RetryingMetaStoreClient proxy=" + msClientClass + " ugi=" + this.ugi + " retries=" + this.retryLimit + " delay=" + this.retryDelaySeconds @@ -99,24 +100,24 @@ protected RetryingMetaStoreClient(HiveConf hiveConf, Class[] constructorArgTy } public static IMetaStoreClient getProxy( - HiveConf hiveConf, boolean allowEmbedded) throws MetaException { - return getProxy(hiveConf, new Class[]{HiveConf.class, HiveMetaHookLoader.class, Boolean.class}, + Configuration hiveConf, boolean allowEmbedded) throws MetaException { + return getProxy(hiveConf, new Class[]{Configuration.class, HiveMetaHookLoader.class, Boolean.class}, new Object[]{hiveConf, null, allowEmbedded}, null, HiveMetaStoreClient.class.getName() ); } @VisibleForTesting - public static IMetaStoreClient getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader, + public static IMetaStoreClient getProxy(Configuration hiveConf, HiveMetaHookLoader hookLoader, String mscClassName) throws MetaException { return getProxy(hiveConf, hookLoader, null, mscClassName, true); } - public static IMetaStoreClient getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader, + public static IMetaStoreClient getProxy(Configuration hiveConf, HiveMetaHookLoader hookLoader, ConcurrentHashMap metaCallTimeMap, String mscClassName, boolean allowEmbedded) throws MetaException { return getProxy(hiveConf, - new Class[] {HiveConf.class, HiveMetaHookLoader.class, Boolean.class}, + new Class[] {Configuration.class, HiveMetaHookLoader.class, Boolean.class}, new Object[] {hiveConf, hookLoader, allowEmbedded}, metaCallTimeMap, mscClassName @@ -125,24 +126,24 @@ public static IMetaStoreClient getProxy(HiveConf hiveConf, HiveMetaHookLoader ho /** * This constructor is meant for Hive internal use only. - * Please use getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader) for external purpose. + * Please use getProxy(HiveConf conf, HiveMetaHookLoader hookLoader) for external purpose. */ - public static IMetaStoreClient getProxy(HiveConf hiveConf, Class[] constructorArgTypes, + public static IMetaStoreClient getProxy(Configuration hiveConf, Class[] constructorArgTypes, Object[] constructorArgs, String mscClassName) throws MetaException { return getProxy(hiveConf, constructorArgTypes, constructorArgs, null, mscClassName); } /** * This constructor is meant for Hive internal use only. - * Please use getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader) for external purpose. + * Please use getProxy(HiveConf conf, HiveMetaHookLoader hookLoader) for external purpose. */ - public static IMetaStoreClient getProxy(HiveConf hiveConf, Class[] constructorArgTypes, + public static IMetaStoreClient getProxy(Configuration hiveConf, Class[] constructorArgTypes, Object[] constructorArgs, ConcurrentHashMap metaCallTimeMap, String mscClassName) throws MetaException { @SuppressWarnings("unchecked") Class baseClass = - (Class)MetaStoreUtils.getClass(mscClassName); + JavaUtils.getClass(mscClassName, IMetaStoreClient.class); RetryingMetaStoreClient handler = new RetryingMetaStoreClient(hiveConf, constructorArgTypes, constructorArgs, @@ -153,9 +154,9 @@ public static IMetaStoreClient getProxy(HiveConf hiveConf, Class[] constructo @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { - Object ret = null; + Object ret; int retriesMade = 0; - TException caughtException = null; + TException caughtException; boolean allowReconnect = ! method.isAnnotationPresent(NoReconnect.class); boolean allowRetry = true; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index d18ddc89cb..b46cc38a22 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -463,6 +463,9 @@ public static ConfVars getMetaConf(String name) { "hive.metastore.event.message.factory", "org.apache.hadoop.hive.metastore.messaging.json.JSONMessageFactory", "Factory class for making encoding and decoding messages in the events generated."), + EVENT_DB_LISTENER_TTL("metastore.event.db.listener.timetolive", + "hive.metastore.event.db.listener.timetolive", 86400, TimeUnit.SECONDS, + "time after which events will be removed from the database listener queue"), EVENT_DB_NOTIFICATION_API_AUTH("metastore.metastore.event.db.notification.api.auth", "hive.metastore.event.db.notification.api.auth", true, "Should metastore do authorization against database notification related APIs such as get_next_notification.\n" + @@ -799,6 +802,19 @@ public static ConfVars getMetaConf(String name) { "internal use only, true when in testing tez"), // We need to track this as some listeners pass it through our config and we need to honor // the system properties. + HIVE_AUTHORIZATION_MANAGER("hive.security.authorization.manager", + "hive.security.authorization.manager", + "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory", + "The Hive client authorization manager class name. The user defined authorization class should implement \n" + + "interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider."), + HIVE_METASTORE_AUTHENTICATOR_MANAGER("hive.security.metastore.authenticator.manager", + "hive.security.metastore.authenticator.manager", + "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator", + "authenticator manager class name to be used in the metastore for authentication. \n" + + "The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider."), + HIVE_METASTORE_AUTHORIZATION_AUTH_READS("hive.security.metastore.authorization.auth.reads", + "hive.security.metastore.authorization.auth.reads", true, + "If this is true, metastore authorizer authorizes read actions on database, table"), HIVE_METASTORE_AUTHORIZATION_MANAGER(NO_SUCH_KEY, "hive.security.metastore.authorization.manager", "org.apache.hadoop.hive.ql.security.authorization.DefaultHiveMetastoreAuthorizationProvider", diff --git metastore/src/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java similarity index 97% rename from metastore/src/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java index 8f90c7ac10..7d8c1d49d9 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,10 +18,10 @@ */ package org.apache.hadoop.hive.metastore.messaging; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.api.NotificationEvent; import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.messaging.event.filters.DatabaseAndTableFilter; import org.apache.thrift.TException; @@ -55,7 +55,7 @@ public int getBatchSize() throws IOException { if (batchSize == null){ try { batchSize = Integer.parseInt( - msc.getConfigValue(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX.varname, "50")); + msc.getConfigValue(MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX.toString(), "50")); // TODO: we're asking the metastore what its configuration for this var is - we may // want to revisit to pull from client side instead. The reason I have it this way // is because the metastore is more likely to have a reasonable config for this than @@ -152,7 +152,7 @@ private void fetchNextBatch() throws IOException { } if (batch == null){ - batch = new ArrayList(); + batch = new ArrayList<>(); // instantiate empty list so that we don't error out on iterator fetching. // If we're here, then the next check of pos will show our caller that // that we've exhausted our event supply diff --git metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/AndFilter.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/AndFilter.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/AndFilter.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/AndFilter.java index d6429f6040..454d2cc4b2 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/AndFilter.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/AndFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/BasicFilter.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/BasicFilter.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/BasicFilter.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/BasicFilter.java index 5294063895..84302d62bf 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/BasicFilter.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/BasicFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java index 490d3b44d8..0852abd84c 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/EventBoundaryFilter.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/EventBoundaryFilter.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/EventBoundaryFilter.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/EventBoundaryFilter.java index 137b4ced2b..988874d712 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/EventBoundaryFilter.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/EventBoundaryFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/MessageFormatFilter.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/MessageFormatFilter.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/MessageFormatFilter.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/MessageFormatFilter.java index 4e91ee6aae..b0ce3b9a64 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/MessageFormatFilter.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/MessageFormatFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java similarity index 95% rename from metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java index 22e246f1c9..f4eacd5fb9 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,10 +33,11 @@ import org.apache.commons.cli.OptionBuilder; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.ObjectStore; /** @@ -113,10 +114,10 @@ private void init() { cmdLineOptions.addOption(tablePropKey); } - private void initObjectStore(HiveConf hiveConf) { + private void initObjectStore(Configuration conf) { if (!isObjStoreInitialized) { objStore = new ObjectStore(); - objStore.setConf(hiveConf); + objStore.setConf(conf); isObjStoreInitialized = true; } } @@ -129,8 +130,8 @@ private void shutdownObjectStore() { } private void listFSRoot() { - HiveConf hiveConf = new HiveConf(HiveMetaTool.class); - initObjectStore(hiveConf); + Configuration conf = MetastoreConf.newMetastoreConf(); + initObjectStore(conf); Set hdfsRoots = objStore.listFSRoots(); if (hdfsRoots != null) { @@ -145,12 +146,11 @@ private void listFSRoot() { } private void executeJDOQLSelect(String query) { - HiveConf hiveConf = new HiveConf(HiveMetaTool.class); - initObjectStore(hiveConf); + Configuration conf = MetastoreConf.newMetastoreConf(); + initObjectStore(conf); System.out.println("Executing query: " + query); - ObjectStore.QueryWrapper queryWrapper = new ObjectStore.QueryWrapper(); - try { + try (ObjectStore.QueryWrapper queryWrapper = new ObjectStore.QueryWrapper()) { Collection result = objStore.executeJDOQLSelect(query, queryWrapper); if (result != null) { Iterator iter = result.iterator(); @@ -160,16 +160,14 @@ private void executeJDOQLSelect(String query) { } } else { System.err.println("Encountered error during executeJDOQLSelect -" + - "commit of JDO transaction failed."); + "commit of JDO transaction failed."); } - } finally { - queryWrapper.close(); } } - private long executeJDOQLUpdate(String query) { - HiveConf hiveConf = new HiveConf(HiveMetaTool.class); - initObjectStore(hiveConf); + private void executeJDOQLUpdate(String query) { + Configuration conf = MetastoreConf.newMetastoreConf(); + initObjectStore(conf); System.out.println("Executing query: " + query); long numUpdated = objStore.executeJDOQLUpdate(query); @@ -179,7 +177,6 @@ private long executeJDOQLUpdate(String query) { System.err.println("Encountered error during executeJDOQL -" + "commit of JDO transaction failed."); } - return numUpdated; } private int printUpdateLocations(Map updateLocations) { @@ -194,8 +191,8 @@ private int printUpdateLocations(Map updateLocations) { private void printTblURIUpdateSummary(ObjectStore.UpdateMStorageDescriptorTblURIRetVal retVal, boolean isDryRun) { - String tblName = new String("SDS"); - String fieldName = new String("LOCATION"); + String tblName = "SDS"; + String fieldName = "LOCATION"; if (retVal == null) { System.err.println("Encountered error while executing updateMStorageDescriptorTblURI - " + @@ -232,8 +229,8 @@ private void printTblURIUpdateSummary(ObjectStore.UpdateMStorageDescriptorTblURI private void printDatabaseURIUpdateSummary(ObjectStore.UpdateMDatabaseURIRetVal retVal, boolean isDryRun) { - String tblName = new String("DBS"); - String fieldName = new String("LOCATION_URI"); + String tblName = "DBS"; + String fieldName = "LOCATION_URI"; if (retVal == null) { System.err.println("Encountered error while executing updateMDatabaseURI - " + @@ -295,7 +292,7 @@ private void printPropURIUpdateSummary(ObjectStore.UpdatePropURIRetVal retVal, S private void printSerdePropURIUpdateSummary(ObjectStore.UpdateSerdeURIRetVal retVal, String serdePropKey, boolean isDryRun) { - String tblName = new String("SERDE_PARAMS"); + String tblName = "SERDE_PARAMS"; if (retVal == null) { System.err.println("Encountered error while executing updateSerdeURI - " + @@ -327,8 +324,8 @@ private void printSerdePropURIUpdateSummary(ObjectStore.UpdateSerdeURIRetVal ret public void updateFSRootLocation(URI oldURI, URI newURI, String serdePropKey, String tablePropKey, boolean isDryRun) { - HiveConf hiveConf = new HiveConf(HiveMetaTool.class); - initObjectStore(hiveConf); + Configuration conf = MetastoreConf.newMetastoreConf(); + initObjectStore(conf); System.out.println("Looking for LOCATION_URI field in DBS table to update.."); ObjectStore.UpdateMDatabaseURIRetVal updateMDBURIRetVal = objStore.updateMDatabaseURI(oldURI, diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java index 5dcedcda85..b44ff8ce47 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java @@ -53,6 +53,16 @@ public boolean accept(Path p) { return !name.startsWith("_") && !name.startsWith("."); } }; + /** + * Filter that filters out hidden files + */ + private static final PathFilter hiddenFileFilter = new PathFilter() { + @Override + public boolean accept(Path p) { + String name = p.getName(); + return !name.startsWith("_") && !name.startsWith("."); + } + }; /** * Move a particular file or directory to the trash. @@ -424,4 +434,23 @@ public static void makeDir(Path path, Configuration conf) throws MetaException { throw new MetaException("Unable to : " + path); } } + + /** + * Utility method that determines if a specified directory already has + * contents (non-hidden files) or not - useful to determine if an + * immutable table already has contents, for example. + * + * @param path + * @throws IOException + */ + public static boolean isDirEmpty(FileSystem fs, Path path) throws IOException { + + if (fs.exists(path)) { + FileStatus[] status = fs.globStatus(new Path(path, "*"), hiddenFileFilter); + if (status.length > 0) { + return false; + } + } + return true; + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java index beee86f572..cde34bcf42 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.metastore.ColumnType; +import org.apache.hadoop.hive.metastore.HiveMetaStore; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; @@ -52,7 +53,6 @@ import org.apache.hadoop.hive.metastore.columnstats.merge.ColumnStatsMerger; import org.apache.hadoop.hive.metastore.columnstats.merge.ColumnStatsMergerFactory; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.events.EventCleanerTask; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.security.SaslRpcServer; @@ -64,9 +64,13 @@ import javax.annotation.Nullable; import java.io.File; +import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.math.BigDecimal; import java.math.BigInteger; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; import java.net.URL; import java.net.URLClassLoader; import java.nio.charset.Charset; @@ -81,6 +85,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Properties; import java.util.SortedMap; import java.util.SortedSet; import java.util.TreeMap; @@ -298,8 +303,8 @@ public static void validatePartitionNameCharacters(List partVals, } } - public static String getPartitionValWithInvalidCharacter(List partVals, - Pattern partitionValidationPattern) { + private static String getPartitionValWithInvalidCharacter(List partVals, + Pattern partitionValidationPattern) { if (partitionValidationPattern == null) { return null; } @@ -499,8 +504,9 @@ public static boolean isExternalTable(Table table) { } // check if stats need to be (re)calculated - public static boolean requireCalStats(Configuration hiveConf, Partition oldPart, - Partition newPart, Table tbl, EnvironmentContext environmentContext) { + public static boolean requireCalStats(Partition oldPart, + Partition newPart, Table tbl, + EnvironmentContext environmentContext) { if (environmentContext != null && environmentContext.isSetProperties() @@ -792,9 +798,11 @@ public static boolean isInsertOnlyTableParam(Map params) { Configuration.class).newInstance(conf); listeners.add(listener); } catch (InvocationTargetException ie) { + LOG.error("Got InvocationTargetException", ie); throw new MetaException("Failed to instantiate listener named: "+ listenerImpl + ", reason: " + ie.getCause()); } catch (Exception e) { + LOG.error("Got Exception", e); throw new MetaException("Failed to instantiate listener named: "+ listenerImpl + ", reason: " + e); } @@ -961,13 +969,20 @@ public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld) ColumnStatisticsObj statsObjNew = csNew.getStatsObj().get(index); ColumnStatisticsObj statsObjOld = map.get(statsObjNew.getColName()); if (statsObjOld != null) { + // because we already confirm that the stats is accurate + // it is impossible that the column types have been changed while the + // column stats is still accurate. + assert (statsObjNew.getStatsData().getSetField() == statsObjOld.getStatsData() + .getSetField()); // If statsObjOld is found, we can merge. ColumnStatsMerger merger = ColumnStatsMergerFactory.getColumnStatsMerger(statsObjNew, statsObjOld); merger.merge(statsObjNew, statsObjOld); } + // If statsObjOld is not found, we just use statsObjNew as it is accurate. list.add(statsObjNew); } + // in all the other cases, we can not merge csNew.setStatsObj(list); } @@ -1064,15 +1079,485 @@ public static boolean checkUserHasHostProxyPrivileges(String user, Configuration return machineList.includes(ipAddress); } - // TODO This should be moved to MetaStoreTestUtils once it is moved into standalone-metastore. /** - * Setup a configuration file for standalone mode. There are a few config variables that have - * defaults that require parts of Hive that aren't present in standalone mode. This method - * sets them to something that will work without the rest of Hive. - * @param conf Configuration object + * Convert FieldSchemas to Thrift DDL. */ - public static void setConfForStandloneMode(Configuration conf) { - MetastoreConf.setVar(conf, MetastoreConf.ConfVars.TASK_THREADS_ALWAYS, - EventCleanerTask.class.getName()); + public static String getDDLFromFieldSchema(String structName, + List fieldSchemas) { + StringBuilder ddl = new StringBuilder(); + ddl.append("struct "); + ddl.append(structName); + ddl.append(" { "); + boolean first = true; + for (FieldSchema col : fieldSchemas) { + if (first) { + first = false; + } else { + ddl.append(", "); + } + ddl.append(ColumnType.typeToThriftType(col.getType())); + ddl.append(' '); + ddl.append(col.getName()); + } + ddl.append("}"); + + LOG.trace("DDL: {}", ddl); + return ddl.toString(); + } + + public static Properties getTableMetadata( + org.apache.hadoop.hive.metastore.api.Table table) { + return MetaStoreUtils.getSchema(table.getSd(), table.getSd(), table + .getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys()); + } + + public static Properties getPartitionMetadata( + org.apache.hadoop.hive.metastore.api.Partition partition, + org.apache.hadoop.hive.metastore.api.Table table) { + return MetaStoreUtils + .getSchema(partition.getSd(), partition.getSd(), partition + .getParameters(), table.getDbName(), table.getTableName(), + table.getPartitionKeys()); + } + + public static Properties getSchema( + org.apache.hadoop.hive.metastore.api.Partition part, + org.apache.hadoop.hive.metastore.api.Table table) { + return MetaStoreUtils.getSchema(part.getSd(), table.getSd(), table + .getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys()); + } + + /** + * Get partition level schema from table level schema. + * This function will use the same column names, column types and partition keys for + * each partition Properties. Their values are copied from the table Properties. This + * is mainly to save CPU and memory. CPU is saved because the first time the + * StorageDescriptor column names are accessed, JDO needs to execute a SQL query to + * retrieve the data. If we know the data will be the same as the table level schema + * and they are immutable, we should just reuse the table level schema objects. + * + * @param sd The Partition level Storage Descriptor. + * @param parameters partition level parameters + * @param tblSchema The table level schema from which this partition should be copied. + * @return the properties + */ + public static Properties getPartSchemaFromTableSchema( + StorageDescriptor sd, + Map parameters, + Properties tblSchema) { + + // Inherent most properties from table level schema and overwrite some properties + // in the following code. + // This is mainly for saving CPU and memory to reuse the column names, types and + // partition columns in the table level schema. + Properties schema = (Properties) tblSchema.clone(); + + // InputFormat + String inputFormat = sd.getInputFormat(); + if (inputFormat == null || inputFormat.length() == 0) { + String tblInput = + schema.getProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_INPUT_FORMAT); + if (tblInput == null) { + inputFormat = org.apache.hadoop.mapred.SequenceFileInputFormat.class.getName(); + } else { + inputFormat = tblInput; + } + } + schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_INPUT_FORMAT, + inputFormat); + + // OutputFormat + String outputFormat = sd.getOutputFormat(); + if (outputFormat == null || outputFormat.length() == 0) { + String tblOutput = + schema.getProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_OUTPUT_FORMAT); + if (tblOutput == null) { + outputFormat = org.apache.hadoop.mapred.SequenceFileOutputFormat.class.getName(); + } else { + outputFormat = tblOutput; + } + } + schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_OUTPUT_FORMAT, + outputFormat); + + // Location + if (sd.getLocation() != null) { + schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_LOCATION, + sd.getLocation()); + } + + // Bucket count + schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.BUCKET_COUNT, + Integer.toString(sd.getNumBuckets())); + + if (sd.getBucketCols() != null && sd.getBucketCols().size() > 0) { + schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.BUCKET_FIELD_NAME, + sd.getBucketCols().get(0)); + } + + // SerdeInfo + if (sd.getSerdeInfo() != null) { + + // We should not update the following 3 values if SerDeInfo contains these. + // This is to keep backward compatible with getSchema(), where these 3 keys + // are updated after SerDeInfo properties got copied. + String cols = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_COLUMNS; + String colTypes = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_COLUMN_TYPES; + String parts = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS; + + for (Map.Entry param : sd.getSerdeInfo().getParameters().entrySet()) { + String key = param.getKey(); + if (schema.get(key) != null && + (key.equals(cols) || key.equals(colTypes) || key.equals(parts))) { + continue; + } + schema.put(key, (param.getValue() != null) ? param.getValue() : StringUtils.EMPTY); + } + + if (sd.getSerdeInfo().getSerializationLib() != null) { + schema.setProperty(ColumnType.SERIALIZATION_LIB, sd.getSerdeInfo().getSerializationLib()); + } + } + + // skipping columns since partition level field schemas are the same as table level's + // skipping partition keys since it is the same as table level partition keys + + if (parameters != null) { + for (Map.Entry e : parameters.entrySet()) { + schema.setProperty(e.getKey(), e.getValue()); + } + } + + return schema; + } + + private static Properties addCols(Properties schema, List cols) { + + StringBuilder colNameBuf = new StringBuilder(); + StringBuilder colTypeBuf = new StringBuilder(); + StringBuilder colComment = new StringBuilder(); + + boolean first = true; + String columnNameDelimiter = getColumnNameDelimiter(cols); + for (FieldSchema col : cols) { + if (!first) { + colNameBuf.append(columnNameDelimiter); + colTypeBuf.append(":"); + colComment.append('\0'); + } + colNameBuf.append(col.getName()); + colTypeBuf.append(col.getType()); + colComment.append((null != col.getComment()) ? col.getComment() : StringUtils.EMPTY); + first = false; + } + schema.setProperty( + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_COLUMNS, + colNameBuf.toString()); + schema.setProperty(ColumnType.COLUMN_NAME_DELIMITER, columnNameDelimiter); + String colTypes = colTypeBuf.toString(); + schema.setProperty( + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_COLUMN_TYPES, + colTypes); + schema.setProperty("columns.comments", colComment.toString()); + + return schema; + + } + + public static Properties getSchemaWithoutCols(StorageDescriptor sd, + Map parameters, String databaseName, String tableName, + List partitionKeys) { + Properties schema = new Properties(); + String inputFormat = sd.getInputFormat(); + if (inputFormat == null || inputFormat.length() == 0) { + inputFormat = org.apache.hadoop.mapred.SequenceFileInputFormat.class + .getName(); + } + schema.setProperty( + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_INPUT_FORMAT, + inputFormat); + String outputFormat = sd.getOutputFormat(); + if (outputFormat == null || outputFormat.length() == 0) { + outputFormat = org.apache.hadoop.mapred.SequenceFileOutputFormat.class + .getName(); + } + schema.setProperty( + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_OUTPUT_FORMAT, + outputFormat); + + schema.setProperty( + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_NAME, + databaseName + "." + tableName); + + if (sd.getLocation() != null) { + schema.setProperty( + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_LOCATION, + sd.getLocation()); + } + schema.setProperty( + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.BUCKET_COUNT, Integer + .toString(sd.getNumBuckets())); + if (sd.getBucketCols() != null && sd.getBucketCols().size() > 0) { + schema.setProperty( + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.BUCKET_FIELD_NAME, sd + .getBucketCols().get(0)); + } + if (sd.getSerdeInfo() != null) { + for (Map.Entry param : sd.getSerdeInfo().getParameters().entrySet()) { + schema.put(param.getKey(), (param.getValue() != null) ? param.getValue() : StringUtils.EMPTY); + } + + if (sd.getSerdeInfo().getSerializationLib() != null) { + schema.setProperty(ColumnType.SERIALIZATION_LIB, sd .getSerdeInfo().getSerializationLib()); + } + } + + if (sd.getCols() != null) { + schema.setProperty(ColumnType.SERIALIZATION_DDL, getDDLFromFieldSchema(tableName, sd.getCols())); + } + + String partString = StringUtils.EMPTY; + String partStringSep = StringUtils.EMPTY; + String partTypesString = StringUtils.EMPTY; + String partTypesStringSep = StringUtils.EMPTY; + for (FieldSchema partKey : partitionKeys) { + partString = partString.concat(partStringSep); + partString = partString.concat(partKey.getName()); + partTypesString = partTypesString.concat(partTypesStringSep); + partTypesString = partTypesString.concat(partKey.getType()); + if (partStringSep.length() == 0) { + partStringSep = "/"; + partTypesStringSep = ":"; + } + } + if (partString.length() > 0) { + schema + .setProperty( + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, + partString); + schema + .setProperty( + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES, + partTypesString); + } + + if (parameters != null) { + for (Map.Entry e : parameters.entrySet()) { + // add non-null parameters to the schema + if ( e.getValue() != null) { + schema.setProperty(e.getKey(), e.getValue()); + } + } + } + + return schema; + } + + public static Properties getSchema( + org.apache.hadoop.hive.metastore.api.StorageDescriptor sd, + org.apache.hadoop.hive.metastore.api.StorageDescriptor tblsd, + Map parameters, String databaseName, String tableName, + List partitionKeys) { + + return addCols(getSchemaWithoutCols(sd, parameters, databaseName, tableName, partitionKeys), tblsd.getCols()); + } + + public static String getColumnNameDelimiter(List fieldSchemas) { + // we first take a look if any fieldSchemas contain COMMA + for (int i = 0; i < fieldSchemas.size(); i++) { + if (fieldSchemas.get(i).getName().contains(",")) { + return String.valueOf(ColumnType.COLUMN_COMMENTS_DELIMITER); + } + } + return String.valueOf(','); + } + + /** + * Convert FieldSchemas to columnNames. + */ + public static String getColumnNamesFromFieldSchema(List fieldSchemas) { + String delimiter = getColumnNameDelimiter(fieldSchemas); + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < fieldSchemas.size(); i++) { + if (i > 0) { + sb.append(delimiter); + } + sb.append(fieldSchemas.get(i).getName()); + } + return sb.toString(); + } + + /** + * Convert FieldSchemas to columnTypes. + */ + public static String getColumnTypesFromFieldSchema( + List fieldSchemas) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < fieldSchemas.size(); i++) { + if (i > 0) { + sb.append(","); + } + sb.append(fieldSchemas.get(i).getType()); + } + return sb.toString(); + } + + public static String getColumnCommentsFromFieldSchema(List fieldSchemas) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < fieldSchemas.size(); i++) { + if (i > 0) { + sb.append(ColumnType.COLUMN_COMMENTS_DELIMITER); + } + sb.append(fieldSchemas.get(i).getComment()); + } + return sb.toString(); + } + + public static int startMetaStore() throws Exception { + return startMetaStore(HadoopThriftAuthBridge.getBridge(), null); + } + + public static int startMetaStore(final HadoopThriftAuthBridge bridge, Configuration conf) throws + Exception { + int port = findFreePort(); + startMetaStore(port, bridge, conf); + return port; + } + + public static int startMetaStore(Configuration conf) throws Exception { + return startMetaStore(HadoopThriftAuthBridge.getBridge(), conf); + } + + public static void startMetaStore(final int port, final HadoopThriftAuthBridge bridge) throws Exception { + startMetaStore(port, bridge, null); + } + + public static void startMetaStore(final int port, + final HadoopThriftAuthBridge bridge, Configuration hiveConf) + throws Exception{ + if (hiveConf == null) { + hiveConf = MetastoreConf.newMetastoreConf(); + } + final Configuration finalHiveConf = hiveConf; + Thread thread = new Thread(new Runnable() { + @Override + public void run() { + try { + HiveMetaStore.startMetaStore(port, bridge, finalHiveConf); + } catch (Throwable e) { + LOG.error("Metastore Thrift Server threw an exception...",e); + } + } + }); + thread.setDaemon(true); + thread.start(); + loopUntilHMSReady(port); + } + + /** + * A simple connect test to make sure that the metastore is up + * @throws Exception + */ + private static void loopUntilHMSReady(int port) throws Exception { + int retries = 0; + Exception exc; + while (true) { + try { + Socket socket = new Socket(); + socket.connect(new InetSocketAddress(port), 5000); + socket.close(); + return; + } catch (Exception e) { + if (retries++ > 60) { //give up + exc = e; + break; + } + Thread.sleep(1000); + } + } + // something is preventing metastore from starting + // print the stack from all threads for debugging purposes + LOG.error("Unable to connect to metastore server: " + exc.getMessage()); + LOG.info("Printing all thread stack traces for debugging before throwing exception."); + LOG.info(getAllThreadStacksAsString()); + throw exc; + } + + private static String getAllThreadStacksAsString() { + Map threadStacks = Thread.getAllStackTraces(); + StringBuilder sb = new StringBuilder(); + for (Map.Entry entry : threadStacks.entrySet()) { + Thread t = entry.getKey(); + sb.append(System.lineSeparator()); + sb.append("Name: ").append(t.getName()).append(" State: ").append(t.getState()); + addStackString(entry.getValue(), sb); + } + return sb.toString(); + } + + private static void addStackString(StackTraceElement[] stackElems, StringBuilder sb) { + sb.append(System.lineSeparator()); + for (StackTraceElement stackElem : stackElems) { + sb.append(stackElem).append(System.lineSeparator()); + } + } + + /** + * Finds a free port on the machine. + * + * @return + * @throws IOException + */ + public static int findFreePort() throws IOException { + ServerSocket socket= new ServerSocket(0); + int port = socket.getLocalPort(); + socket.close(); + return port; + } + + /** + * Finds a free port on the machine, but allow the + * ability to specify a port number to not use, no matter what. + */ + public static int findFreePortExcepting(int portToExclude) throws IOException { + ServerSocket socket1 = null; + ServerSocket socket2 = null; + try { + socket1 = new ServerSocket(0); + socket2 = new ServerSocket(0); + if (socket1.getLocalPort() != portToExclude) { + return socket1.getLocalPort(); + } + // If we're here, then socket1.getLocalPort was the port to exclude + // Since both sockets were open together at a point in time, we're + // guaranteed that socket2.getLocalPort() is not the same. + return socket2.getLocalPort(); + } finally { + if (socket1 != null){ + socket1.close(); + } + if (socket2 != null){ + socket2.close(); + } + } + } + + public static String getIndexTableName(String dbName, String baseTblName, String indexName) { + return dbName + "__" + baseTblName + "_" + indexName + "__"; + } + + public static boolean isMaterializedViewTable(Table table) { + if (table == null) { + return false; + } + return TableType.MATERIALIZED_VIEW.toString().equals(table.getTableType()); + } + + public static List getColumnNames(List schema) { + List cols = new ArrayList<>(schema.size()); + for (FieldSchema fs : schema) { + cols.add(fs.getName()); + } + return cols; } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java index 41a18cb19c..0b0cfbda80 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java @@ -36,11 +36,15 @@ import javax.security.auth.login.AppConfigurationEntry; import org.apache.thrift.transport.TSSLTransportFactory; import org.apache.thrift.transport.TServerSocket; +import org.apache.thrift.transport.TSocket; +import org.apache.thrift.transport.TTransport; import org.apache.thrift.transport.TTransportException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.net.ssl.SSLParameters; import javax.net.ssl.SSLServerSocket; +import javax.net.ssl.SSLSocket; import javax.security.auth.login.LoginException; import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag; @@ -284,4 +288,26 @@ public static TServerSocket getServerSSLSocket(String hiveHost, int portNum, Str } return thriftServerSocket; } + + public static TTransport getSSLSocket(String host, int port, int loginTimeout, + String trustStorePath, String trustStorePassWord) throws TTransportException { + TSSLTransportFactory.TSSLTransportParameters params = + new TSSLTransportFactory.TSSLTransportParameters(); + params.setTrustStore(trustStorePath, trustStorePassWord); + params.requireClientAuth(true); + // The underlying SSLSocket object is bound to host:port with the given SO_TIMEOUT and + // SSLContext created with the given params + TSocket tSSLSocket = TSSLTransportFactory.getClientSocket(host, port, loginTimeout, params); + return getSSLSocketWithHttps(tSSLSocket); + } + + // Using endpoint identification algorithm as HTTPS enables us to do + // CNAMEs/subjectAltName verification + private static TSocket getSSLSocketWithHttps(TSocket tSSLSocket) throws TTransportException { + SSLSocket sslSocket = (SSLSocket) tSSLSocket.getSocket(); + SSLParameters sslParams = sslSocket.getSSLParameters(); + sslParams.setEndpointIdentificationAlgorithm("HTTPS"); + sslSocket.setSSLParameters(sslParams); + return new TSocket(sslSocket); + } } diff --git metastore/src/test/org/apache/hadoop/hive/metastore/AlternateFailurePreListener.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/AlternateFailurePreListener.java similarity index 99% rename from metastore/src/test/org/apache/hadoop/hive/metastore/AlternateFailurePreListener.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/AlternateFailurePreListener.java index 22146ba4cd..bde29a10af 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/AlternateFailurePreListener.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/AlternateFailurePreListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/test/org/apache/hadoop/hive/metastore/DummyEndFunctionListener.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyEndFunctionListener.java similarity index 95% rename from metastore/src/test/org/apache/hadoop/hive/metastore/DummyEndFunctionListener.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyEndFunctionListener.java index ac62cd55b3..ca1f10b56f 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/DummyEndFunctionListener.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyEndFunctionListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,9 +30,9 @@ */ public class DummyEndFunctionListener extends MetaStoreEndFunctionListener{ - public static final List funcNameList = new ArrayList(); + public static final List funcNameList = new ArrayList<>(); public static final List contextList = - new ArrayList(); + new ArrayList<>(); public DummyEndFunctionListener(Configuration config) { super(config); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/DummyListener.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyListener.java similarity index 99% rename from metastore/src/test/org/apache/hadoop/hive/metastore/DummyListener.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyListener.java index 182e724f10..baecd12520 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/DummyListener.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ */ public class DummyListener extends MetaStoreEventListener{ - public static final List notifyList = new ArrayList(); + public static final List notifyList = new ArrayList<>(); /** * @return The last event received, or null if no event was received. diff --git metastore/src/test/org/apache/hadoop/hive/metastore/DummyMetaStoreInitListener.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyMetaStoreInitListener.java similarity index 85% rename from metastore/src/test/org/apache/hadoop/hive/metastore/DummyMetaStoreInitListener.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyMetaStoreInitListener.java index 2eb8354b16..0f2a3c76d9 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/DummyMetaStoreInitListener.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyMetaStoreInitListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,12 +18,7 @@ package org.apache.hadoop.hive.metastore; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreInitContext; -import org.apache.hadoop.hive.metastore.MetaStoreInitListener; import org.apache.hadoop.hive.metastore.api.MetaException; /* diff --git metastore/src/test/org/apache/hadoop/hive/metastore/DummyPreListener.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyPreListener.java similarity index 98% rename from metastore/src/test/org/apache/hadoop/hive/metastore/DummyPreListener.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyPreListener.java index 7ff6f92952..0a68bac7a2 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/DummyPreListener.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyPreListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ */ public class DummyPreListener extends MetaStorePreEventListener { - public static final List notifyList = new ArrayList(); + public static final List notifyList = new ArrayList<>(); public DummyPreListener(Configuration config) { super(config); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java similarity index 99% rename from metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index 4f7b9c850c..24c59f2f1b 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -677,7 +677,7 @@ public String getToken(String tokenIdentifier) { @Override public List getAllTokenIdentifiers() { - return new ArrayList(); + return new ArrayList<>(); } @Override diff --git metastore/src/test/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java similarity index 99% rename from metastore/src/test/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java index d89c54c307..d7a40b608f 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ public void assertInjectionsPerformed( assertEquals(expectedInjectionCalled, injectionPathCalled); assertEquals(expectedNonInjectedPathCalled, nonInjectedPathCalled); } - }; + } private static com.google.common.base.Function getTableModifier = com.google.common.base.Functions.identity(); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/IpAddressListener.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/IpAddressListener.java similarity index 98% rename from metastore/src/test/org/apache/hadoop/hive/metastore/IpAddressListener.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/IpAddressListener.java index e40edcabd3..e7a0d2d241 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/IpAddressListener.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/IpAddressListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +21,6 @@ import java.net.InetAddress; import java.net.UnknownHostException; -import junit.framework.Assert; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -35,6 +33,7 @@ import org.apache.hadoop.hive.metastore.events.DropPartitionEvent; import org.apache.hadoop.hive.metastore.events.DropTableEvent; import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent; +import org.junit.Assert; /** An implementation for MetaStoreEventListener which checks that the IP Address stored in * HMSHandler matches that of local host, for testing purposes. diff --git metastore/src/test/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java similarity index 85% rename from metastore/src/test/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java index 7df7ac55b8..380f3a1fd0 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,8 +25,6 @@ import java.util.Map; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.events.EventCleanerTask; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; @@ -35,20 +33,21 @@ public class MetaStoreTestUtils { - private static final Logger LOG = LoggerFactory.getLogger("hive.log"); + private static final Logger LOG = LoggerFactory.getLogger(MetaStoreTestUtils.class); public static final int RETRY_COUNT = 10; public static int startMetaStore() throws Exception { return MetaStoreTestUtils.startMetaStore(HadoopThriftAuthBridge.getBridge(), null); } - public static int startMetaStore(final HadoopThriftAuthBridge bridge, HiveConf conf) throws Exception { + public static int startMetaStore(final HadoopThriftAuthBridge bridge, Configuration conf) + throws Exception { int port = MetaStoreTestUtils.findFreePort(); MetaStoreTestUtils.startMetaStore(port, bridge, conf); return port; } - public static int startMetaStore(HiveConf conf) throws Exception { + public static int startMetaStore(Configuration conf) throws Exception { return startMetaStore(HadoopThriftAuthBridge.getBridge(), conf); } @@ -57,17 +56,17 @@ public static void startMetaStore(final int port, final HadoopThriftAuthBridge b } public static void startMetaStore(final int port, - final HadoopThriftAuthBridge bridge, HiveConf hiveConf) + final HadoopThriftAuthBridge bridge, Configuration conf) throws Exception{ - if (hiveConf == null) { - hiveConf = new HiveConf(HMSHandler.class); + if (conf == null) { + conf = MetastoreConf.newMetastoreConf(); } - final HiveConf finalHiveConf = hiveConf; + final Configuration finalConf = conf; Thread thread = new Thread(new Runnable() { @Override public void run() { try { - HiveMetaStore.startMetaStore(port, bridge, finalHiveConf); + HiveMetaStore.startMetaStore(port, bridge, finalConf); } catch (Throwable e) { LOG.error("Metastore Thrift Server threw an exception...", e); } @@ -82,7 +81,7 @@ public static int startMetaStoreWithRetry(final HadoopThriftAuthBridge bridge) t return MetaStoreTestUtils.startMetaStoreWithRetry(bridge, null); } - public static int startMetaStoreWithRetry(HiveConf conf) throws Exception { + public static int startMetaStoreWithRetry(Configuration conf) throws Exception { return MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf); } @@ -90,7 +89,7 @@ public static int startMetaStoreWithRetry() throws Exception { return MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), null); } - public static int startMetaStoreWithRetry(final HadoopThriftAuthBridge bridge, HiveConf conf) + public static int startMetaStoreWithRetry(final HadoopThriftAuthBridge bridge, Configuration conf) throws Exception { Exception metaStoreException = null; int metaStorePort = 0; @@ -196,4 +195,14 @@ public static int findFreePortExcepting(int portToExclude) throws IOException { } } + /** + * Setup a configuration file for standalone mode. There are a few config variables that have + * defaults that require parts of Hive that aren't present in standalone mode. This method + * sets them to something that will work without the rest of Hive. + * @param conf Configuration object + */ + public static void setConfForStandloneMode(Configuration conf) { + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.TASK_THREADS_ALWAYS, + EventCleanerTask.class.getName()); + } } diff --git metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java similarity index 96% rename from metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java index 12a862d6bd..346fd98d86 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; -import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import java.util.List; diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreGetMetaConf.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreGetMetaConf.java similarity index 66% rename from metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreGetMetaConf.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreGetMetaConf.java index f71911eb46..ba86e052ed 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreGetMetaConf.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreGetMetaConf.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,8 +19,12 @@ package org.apache.hadoop.hive.metastore; import java.security.Permission; -import org.apache.hadoop.hive.conf.HiveConf; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.thrift.TException; import org.junit.After; import org.junit.AfterClass; @@ -40,7 +44,7 @@ public ExpectedException thrown = ExpectedException.none(); private static final Logger LOG = LoggerFactory.getLogger(TestHiveMetaStoreGetMetaConf.class); - private static HiveConf hiveConf; + private static Configuration conf; private static SecurityManager securityManager; private HiveMetaStoreClient hmsc; @@ -75,26 +79,21 @@ public static void startMetaStoreServer() throws Exception { securityManager = System.getSecurityManager(); System.setSecurityManager(new NoExitSecurityManager()); - HiveConf metastoreConf = new HiveConf(); - metastoreConf.setClass(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS.varname, + Configuration metastoreConf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setClass(metastoreConf, ConfVars.EXPRESSION_PROXY_CLASS, MockPartitionExpressionForMetastore.class, PartitionExpressionProxy.class); - metastoreConf.setBoolVar(HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL_DDL, false); - int msPort = MetaStoreTestUtils.startMetaStore(metastoreConf); - hiveConf = new HiveConf(TestHiveMetaStoreGetMetaConf.class); - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" - + msPort); - hiveConf.setVar(HiveConf.ConfVars.PREEXECHOOKS, ""); - hiveConf.setVar(HiveConf.ConfVars.POSTEXECHOOKS, ""); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 10); - - System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); - System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); + MetastoreConf.setBoolVar(metastoreConf, ConfVars.TRY_DIRECT_SQL_DDL, false); + MetaStoreTestUtils.setConfForStandloneMode(metastoreConf); + int msPort = MetaStoreUtils.startMetaStore(metastoreConf); + conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + msPort); + MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 10); } @Before public void setup() throws MetaException { - hmsc = new HiveMetaStoreClient(hiveConf); + hmsc = new HiveMetaStoreClient(conf); } @After @@ -105,31 +104,31 @@ public void closeClient() { } @Test - public void testGetMetaConfDefault() throws MetaException, TException { - HiveConf.ConfVars metaConfVar = HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL; - String expected = metaConfVar.getDefaultValue(); + public void testGetMetaConfDefault() throws TException { + ConfVars metaConfVar = ConfVars.TRY_DIRECT_SQL; + String expected = metaConfVar.getDefaultVal().toString(); String actual = hmsc.getMetaConf(metaConfVar.toString()); assertEquals(expected, actual); } @Test - public void testGetMetaConfDefaultEmptyString() throws MetaException, TException { - HiveConf.ConfVars metaConfVar = HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN; + public void testGetMetaConfDefaultEmptyString() throws TException { + ConfVars metaConfVar = ConfVars.PARTITION_NAME_WHITELIST_PATTERN; String expected = ""; String actual = hmsc.getMetaConf(metaConfVar.toString()); assertEquals(expected, actual); } @Test - public void testGetMetaConfOverridden() throws MetaException, TException { - HiveConf.ConfVars metaConfVar = HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL_DDL; + public void testGetMetaConfOverridden() throws TException { + ConfVars metaConfVar = ConfVars.TRY_DIRECT_SQL_DDL; String expected = "false"; String actual = hmsc.getMetaConf(metaConfVar.toString()); assertEquals(expected, actual); } @Test - public void testGetMetaConfUnknownPreperty() throws MetaException, TException { + public void testGetMetaConfUnknownPreperty() throws TException { String unknownPropertyName = "hive.meta.foo.bar"; thrown.expect(MetaException.class); thrown.expectMessage("Invalid configuration key " + unknownPropertyName); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java similarity index 90% rename from metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java index 86462ff7b1..57e5a4126e 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.metastore; -import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Partition; @@ -26,9 +26,10 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.partition.spec.CompositePartitionSpecProxy; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; -import org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -50,7 +51,7 @@ private static final Logger LOG = LoggerFactory.getLogger(TestHiveMetaStorePartitionSpecs.class); private static int msPort; - private static HiveConf hiveConf; + private static Configuration conf; private static SecurityManager securityManager; public static class NoExitSecurityManager extends SecurityManager { @@ -79,30 +80,26 @@ public static void tearDown() throws Exception { LOG.info("Shutting down metastore."); System.setSecurityManager(securityManager); - HiveMetaStoreClient hmsc = new HiveMetaStoreClient(hiveConf); + HiveMetaStoreClient hmsc = new HiveMetaStoreClient(conf); hmsc.dropDatabase(dbName, true, true, true); } @BeforeClass public static void startMetaStoreServer() throws Exception { - HiveConf metastoreConf = new HiveConf(); - metastoreConf.setClass(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS.varname, + Configuration metastoreConf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setClass(metastoreConf, ConfVars.EXPRESSION_PROXY_CLASS, MockPartitionExpressionForMetastore.class, PartitionExpressionProxy.class); + MetaStoreTestUtils.setConfForStandloneMode(metastoreConf); msPort = MetaStoreTestUtils.startMetaStore(metastoreConf); securityManager = System.getSecurityManager(); System.setSecurityManager(new NoExitSecurityManager()); - hiveConf = new HiveConf(TestHiveMetaStorePartitionSpecs.class); - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" - + msPort); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, - "false"); - hiveConf.set(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS.name(), MockPartitionExpressionForMetastore.class.getCanonicalName()); - System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); - System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); + conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + msPort); + MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3); + MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + MetastoreConf.setClass(conf, ConfVars.EXPRESSION_PROXY_CLASS, + MockPartitionExpressionForMetastore.class, PartitionExpressionProxy.class); } private static String dbName = "testpartitionspecs_db"; @@ -113,15 +110,16 @@ public static void startMetaStoreServer() throws Exception { private static void createTable(HiveMetaStoreClient hmsc, boolean enablePartitionGrouping) throws Exception { - List columns = new ArrayList(); + List columns = new ArrayList<>(); columns.add(new FieldSchema("foo", "string", "")); columns.add(new FieldSchema("bar", "string", "")); - List partColumns = new ArrayList(); + List partColumns = new ArrayList<>(); partColumns.add(new FieldSchema("dt", "string", "")); partColumns.add(new FieldSchema("blurb", "string", "")); - SerDeInfo serdeInfo = new SerDeInfo("LBCSerDe", LazyBinaryColumnarSerDe.class.getCanonicalName(), new HashMap()); + SerDeInfo serdeInfo = new SerDeInfo("LBCSerDe", + "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", new HashMap<>()); StorageDescriptor storageDescriptor = new StorageDescriptor(columns, null, @@ -129,7 +127,7 @@ private static void createTable(HiveMetaStoreClient hmsc, boolean enablePartitio "org.apache.hadoop.hive.ql.io.RCFileOutputFormat", false, 0, serdeInfo, null, null, null); - Map tableParameters = new HashMap(); + Map tableParameters = new HashMap<>(); tableParameters.put("hive.hcatalog.partition.spec.grouping.enabled", enablePartitionGrouping? "true":"false"); Table table = new Table(tableName, dbName, "", 0, 0, 0, storageDescriptor, partColumns, tableParameters, "", "", ""); @@ -177,7 +175,7 @@ private static void populatePartitions(HiveMetaStoreClient hmsc, Table table, Li private void testGetPartitionSpecs(boolean enablePartitionGrouping) { try { - HiveMetaStoreClient hmsc = new HiveMetaStoreClient(hiveConf); + HiveMetaStoreClient hmsc = new HiveMetaStoreClient(conf); clearAndRecreateDB(hmsc); createTable(hmsc, enablePartitionGrouping); Table table = hmsc.getTable(dbName, tableName); @@ -186,9 +184,9 @@ private void testGetPartitionSpecs(boolean enablePartitionGrouping) { PartitionSpecProxy partitionSpecProxy = hmsc.listPartitionSpecs(dbName, tableName, -1); Assert.assertEquals( "Unexpected number of partitions.", nDates * 2, partitionSpecProxy.size()); - Map> locationToDateMap = new HashMap>(); - locationToDateMap.put("isLocatedInTablePath", new ArrayList()); - locationToDateMap.put("isLocatedOutsideTablePath", new ArrayList()); + Map> locationToDateMap = new HashMap<>(); + locationToDateMap.put("isLocatedInTablePath", new ArrayList<>()); + locationToDateMap.put("isLocatedOutsideTablePath", new ArrayList<>()); PartitionSpecProxy.PartitionIterator iterator = partitionSpecProxy.getPartitionIterator(); while (iterator.hasNext()) { @@ -196,7 +194,7 @@ private void testGetPartitionSpecs(boolean enablePartitionGrouping) { locationToDateMap.get(partition.getValues().get(1)).add(partition.getValues().get(0)); } - List expectedDates = new ArrayList(nDates); + List expectedDates = new ArrayList<>(nDates); for (int i=0; i> blurbToPartitionList = new HashMap>(3); + Map> blurbToPartitionList = new HashMap<>(3); while (iterator.hasNext()) { Partition partition = iterator.next(); String blurb = partition.getValues().get(1); if (!blurbToPartitionList.containsKey(blurb)) { - blurbToPartitionList.put(blurb, new ArrayList(nDates)); + blurbToPartitionList.put(blurb, new ArrayList<>(nDates)); } blurbToPartitionList.get(blurb).add(partition); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java similarity index 77% rename from metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java index 2166c2080a..1489975749 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,9 +20,11 @@ import java.util.concurrent.TimeUnit; -import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.util.StringUtils; import org.junit.AfterClass; import org.junit.Assert; @@ -35,25 +37,20 @@ */ public class TestHiveMetaStoreTimeout { protected static HiveMetaStoreClient client; - protected static HiveConf hiveConf; + protected static Configuration conf; protected static Warehouse warehouse; @BeforeClass public static void setUp() throws Exception { HiveMetaStore.TEST_TIMEOUT_ENABLED = true; - hiveConf = new HiveConf(TestHiveMetaStoreTimeout.class); - hiveConf.set(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS.varname, - MockPartitionExpressionForMetastore.class.getCanonicalName()); - hiveConf.setTimeVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 10 * 1000, + conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setClass(conf, ConfVars.EXPRESSION_PROXY_CLASS, + MockPartitionExpressionForMetastore.class, PartitionExpressionProxy.class); + MetastoreConf.setTimeVar(conf, ConfVars.CLIENT_SOCKET_TIMEOUT, 1000, TimeUnit.MILLISECONDS); - warehouse = new Warehouse(hiveConf); - try { - client = new HiveMetaStoreClient(hiveConf); - } catch (Throwable e) { - System.err.println("Unable to open the metastore"); - System.err.println(StringUtils.stringifyException(e)); - throw e; - } + MetaStoreTestUtils.setConfForStandloneMode(conf); + warehouse = new Warehouse(conf); + client = new HiveMetaStoreClient(conf); } @AfterClass @@ -70,7 +67,7 @@ public static void tearDown() throws Exception { @Test public void testNoTimeout() throws Exception { - HiveMetaStore.TEST_TIMEOUT_VALUE = 5 * 1000; + HiveMetaStore.TEST_TIMEOUT_VALUE = 250; String dbName = "db"; client.dropDatabase(dbName, true, true); @@ -88,7 +85,7 @@ public void testNoTimeout() throws Exception { @Test public void testTimeout() throws Exception { - HiveMetaStore.TEST_TIMEOUT_VALUE = 15 * 1000; + HiveMetaStore.TEST_TIMEOUT_VALUE = 2 * 1000; String dbName = "db"; client.dropDatabase(dbName, true, true); @@ -104,12 +101,12 @@ public void testTimeout() throws Exception { } // restore - HiveMetaStore.TEST_TIMEOUT_VALUE = 5 * 1000; + HiveMetaStore.TEST_TIMEOUT_VALUE = 1; } @Test public void testResetTimeout() throws Exception { - HiveMetaStore.TEST_TIMEOUT_VALUE = 5 * 1000; + HiveMetaStore.TEST_TIMEOUT_VALUE = 250; String dbName = "db"; // no timeout before reset @@ -124,7 +121,8 @@ public void testResetTimeout() throws Exception { client.dropDatabase(dbName, true, true); // reset - client.setMetaConf(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT.varname, "3s"); + HiveMetaStore.TEST_TIMEOUT_VALUE = 2000; + client.setMetaConf(ConfVars.CLIENT_SOCKET_TIMEOUT.getVarname(), "1s"); // timeout after reset try { @@ -137,6 +135,6 @@ public void testResetTimeout() throws Exception { // restore client.dropDatabase(dbName, true, true); - client.setMetaConf(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT.varname, "10s"); + client.setMetaConf(ConfVars.CLIENT_SOCKET_TIMEOUT.getVarname(), "10s"); } } \ No newline at end of file diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreConnectionUrlHook.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreConnectionUrlHook.java index 7c54354a16..19279a5537 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreConnectionUrlHook.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreConnectionUrlHook.java @@ -21,7 +21,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; -import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.junit.Test; /** @@ -37,7 +36,7 @@ public void testUrlHook() throws Exception { MetastoreConf.setVar(conf, ConfVars.CONNECTURLHOOK, DummyJdoConnectionUrlHook.class.getName()); MetastoreConf.setVar(conf, ConfVars.CONNECTURLKEY, DummyJdoConnectionUrlHook.initialUrl); MetastoreConf.setVar(conf, ConfVars.RAW_STORE_IMPL, DummyRawStoreForJdoConnection.class.getName()); - MetaStoreUtils.setConfForStandloneMode(conf); + MetaStoreTestUtils.setConfForStandloneMode(conf); // Instantiating the HMSHandler with hive.metastore.checkForDefaultDb will cause it to // initialize an instance of the DummyRawStoreForJdoConnection diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestOldSchema.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java similarity index 91% rename from metastore/src/test/org/apache/hadoop/hive/metastore/TestOldSchema.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java index 8409d9bacc..bf8556d3b0 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestOldSchema.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,8 +22,8 @@ import java.util.Collections; import java.util.List; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; @@ -41,8 +41,8 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; -import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -84,14 +84,14 @@ public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) { } } - byte bitVectors[][] = new byte[2][]; + private byte bitVectors[][] = new byte[2][]; @Before public void setUp() throws Exception { - HiveConf conf = new HiveConf(); - conf.setVar(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS, - MockPartitionExpressionProxy.class.getName()); - conf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_FETCH_BITVECTOR, false); + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setClass(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, + MockPartitionExpressionProxy.class, PartitionExpressionProxy.class); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.STATS_FETCH_BITVECTOR, false); store = new ObjectStore(); store.setConf(conf); @@ -114,8 +114,6 @@ public void tearDown() { /** * Tests partition operations - * - * @throws Exception */ @Test public void testPartitionOps() throws Exception { @@ -128,11 +126,11 @@ public void testPartitionOps() throws Exception { cols.add(new FieldSchema("col1", "long", "nocomment")); SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, - serde, null, null, Collections. emptyMap()); + serde, null, null, Collections.emptyMap()); List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections.emptyMap(), null, null, null); store.createTable(table); Deadline.startTimer("getPartition"); @@ -142,7 +140,7 @@ public void testPartitionOps() throws Exception { StorageDescriptor psd = new StorageDescriptor(sd); psd.setLocation("file:/tmp/default/hit/ds=" + partVal); Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, - Collections. emptyMap()); + Collections.emptyMap()); store.addPartition(part); ColumnStatistics cs = new ColumnStatistics(); ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); @@ -191,11 +189,11 @@ public void checkStats(AggrStats aggrStats) throws Exception { } - private static interface Checker { + private interface Checker { void checkStats(AggrStats aggrStats) throws Exception; } - public static void dropAllStoreObjects(RawStore store) throws MetaException, + private static void dropAllStoreObjects(RawStore store) throws MetaException, InvalidObjectException, InvalidInputException { try { Deadline.registerIfNot(100000); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java similarity index 99% rename from metastore/src/test/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java index a8c7ac3bfc..150b6ca919 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -74,7 +74,7 @@ public VerifyingObjectStore() { @Override public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException { - List ormParts = new LinkedList(); + List ormParts = new LinkedList<>(); boolean sqlResult = getPartitionsByExprInternal( dbName, tblName, expr, defaultPartitionName, maxParts, result, true, false); boolean ormResult = getPartitionsByExprInternal(