From 083b17a6d81d5b4591fae0183a59403b03a0be7c Mon Sep 17 00:00:00 2001 From: Alexander Kolbasov Date: Tue, 14 Aug 2018 13:38:05 -0700 Subject: [PATCH 1/1] HIVE-20387: Move non-server related methods from Warehouse to MetastoreUtils --- .../hadoop/hive/cli/TestCliSessionState.java | 4 +- .../hadoop/hive/druid/DruidStorageHandler.java | 3 +- .../org/apache/hive/hcatalog/cli/HCatDriver.java | 3 +- .../org/apache/hive/hcatalog/common/HCatUtil.java | 4 +- .../mapreduce/FileOutputCommitterContainer.java | 4 +- .../hive/hcatalog/mapreduce/HCatTableInfo.java | 4 +- .../hive/hcatalog/mapreduce/InputJobInfo.java | 4 +- .../hive/hcatalog/mapreduce/OutputJobInfo.java | 4 +- .../org/apache/hive/hcatalog/cli/TestPermsGrp.java | 5 +- .../hive/hcatalog/cli/TestSemanticAnalysis.java | 24 +- .../hive/hcatalog/mapreduce/HCatMapReduceTest.java | 8 +- .../mapreduce/TestHCatPartitionPublish.java | 5 +- .../hcatalog/mapreduce/TestPassProperties.java | 4 +- .../org/apache/hive/hcatalog/pig/PigHCatUtil.java | 4 +- .../hcatalog/pig/TestHCatLoaderEncryption.java | 4 +- .../hcatalog/listener/DbNotificationListener.java | 5 +- .../hive/hcatalog/streaming/HiveEndPoint.java | 12 +- .../mutate/worker/MetaStorePartitionHelper.java | 4 +- .../streaming/mutate/StreamingTestUtils.java | 4 +- .../hive/hcatalog/api/HCatClientHMSImpl.java | 4 +- .../apache/hive/hcatalog/api/HCatPartition.java | 6 +- .../org/apache/hive/hcatalog/api/HCatTable.java | 6 +- .../apache/hive/hcatalog/api/TestHCatClient.java | 6 +- .../mapreduce/TestSequenceFileReadWrite.java | 8 +- .../metastore/tools/TestSchemaToolCatalogOps.java | 4 +- .../hadoop/hive/ql/history/TestHiveHistory.java | 4 +- .../metadata/TestSemanticAnalyzerHookLoading.java | 4 +- .../hive/ql/parse/TestReplicationScenarios.java | 3 +- .../hadoop/hive/ql/parse/WarehouseInstance.java | 6 +- .../hadoop/hive/ql/MetaStoreDumpUtility.java | 4 +- .../java/org/apache/hadoop/hive/ql/QTestUtil.java | 6 +- ql/src/java/org/apache/hadoop/hive/ql/Driver.java | 7 +- .../apache/hadoop/hive/ql/exec/ArchiveUtils.java | 7 +- .../org/apache/hadoop/hive/ql/exec/DDLTask.java | 10 +- .../hadoop/hive/ql/exec/MaterializedViewTask.java | 2 - .../org/apache/hadoop/hive/ql/exec/Utilities.java | 4 +- .../bootstrap/events/filesystem/FSTableEvent.java | 6 +- .../repl/bootstrap/load/table/LoadPartitions.java | 3 +- .../hive/ql/hooks/EnforceReadOnlyTables.java | 2 +- .../apache/hadoop/hive/ql/hooks/LineageInfo.java | 6 +- .../apache/hadoop/hive/ql/hooks/LineageLogger.java | 4 +- .../org/apache/hadoop/hive/ql/metadata/Hive.java | 7 +- .../hive/ql/metadata/HiveMetaStoreChecker.java | 6 +- .../apache/hadoop/hive/ql/metadata/Partition.java | 7 +- .../ql/metadata/SessionHiveMetaStoreClient.java | 30 +- .../org/apache/hadoop/hive/ql/metadata/Table.java | 5 +- .../hive/ql/optimizer/lineage/OpProcFactory.java | 4 +- .../hive/ql/optimizer/ppr/PartitionPruner.java | 4 +- .../hadoop/hive/ql/parse/CalcitePlanner.java | 7 +- .../hadoop/hive/ql/parse/DDLSemanticAnalyzer.java | 5 +- .../hive/ql/parse/ImportSemanticAnalyzer.java | 11 +- .../hive/ql/parse/MacroSemanticAnalyzer.java | 4 +- .../hadoop/hive/ql/parse/SemanticAnalyzer.java | 2 +- .../ql/parse/UpdateDeleteSemanticAnalyzer.java | 10 +- .../ql/plan/ConditionalResolverMergeFiles.java | 4 +- .../hadoop/hive/ql/plan/DynamicPartitionCtx.java | 4 +- .../hadoop/hive/ql/plan/TruncateTableDesc.java | 4 +- .../authorization/plugin/HiveV1Authorizer.java | 4 +- .../plugin/sqlstd/SQLAuthorizationUtils.java | 4 +- .../hadoop/hive/ql/session/SessionState.java | 5 +- .../hadoop/hive/ql/stats/BasicStatsTask.java | 2 +- .../hadoop/hive/ql/stats/ColStatsProcessor.java | 4 +- .../hadoop/hive/ql/stats/StatsUpdaterThread.java | 6 +- .../hadoop/hive/ql/txn/compactor/Cleaner.java | 3 +- .../hive/ql/txn/compactor/CompactorThread.java | 4 +- .../hadoop/hive/ql/txn/compactor/Worker.java | 4 +- .../hive/ql/util/HiveStrictManagedMigration.java | 3 +- .../apache/hadoop/hive/ql/util/UpgradeTool.java | 45 ++- .../apache/hadoop/hive/ql/exec/TestExecDriver.java | 22 +- .../apache/hadoop/hive/ql/metadata/TestHive.java | 27 +- .../hadoop/hive/ql/session/TestSessionState.java | 6 +- .../hadoop/hive/metastore/AcidEventListener.java | 5 +- .../hadoop/hive/metastore/HiveAlterHandler.java | 8 +- .../hadoop/hive/metastore/HiveMetaStore.java | 40 +-- .../hadoop/hive/metastore/HiveMetaStoreClient.java | 2 +- .../hadoop/hive/metastore/IMetaStoreClient.java | 13 +- .../MaterializationsRebuildLockHandler.java | 9 +- .../hadoop/hive/metastore/MetaStoreDirectSql.java | 2 +- .../apache/hadoop/hive/metastore/ObjectStore.java | 32 +-- .../hadoop/hive/metastore/StatObjectConverter.java | 3 +- .../metastore/TransactionalValidationListener.java | 33 ++- .../apache/hadoop/hive/metastore/Warehouse.java | 314 +-------------------- .../hadoop/hive/metastore/cache/CachedStore.java | 17 +- .../hadoop/hive/metastore/cache/SharedCache.java | 6 +- .../client/builder/ConstraintBuilder.java | 4 +- .../metastore/client/builder/FunctionBuilder.java | 3 +- .../metastore/client/builder/ISchemaBuilder.java | 6 +- .../metastore/client/builder/PartitionBuilder.java | 3 +- .../client/builder/SQLForeignKeyBuilder.java | 4 +- .../client/builder/SchemaVersionBuilder.java | 4 +- .../metastore/client/builder/TableBuilder.java | 3 +- .../hive/metastore/parser/ExpressionTree.java | 3 +- .../spec/CompositePartitionSpecProxy.java | 2 +- .../hadoop/hive/metastore/txn/TxnHandler.java | 14 +- .../hive/metastore/utils/MetaStoreUtils.java | 304 +++++++++++++++++++- .../metastore/HiveMetaStoreClientPreCatalog.java | 2 +- .../hadoop/hive/metastore/MetaStoreTestUtils.java | 2 +- .../hive/metastore/NonCatCallsWithCatalog.java | 2 +- .../hive/metastore/TestAggregateStatsCache.java | 2 +- .../hive/metastore/TestCatalogOldClient.java | 2 +- .../hadoop/hive/metastore/TestFilterHooks.java | 5 +- .../hadoop/hive/metastore/TestHiveMetaStore.java | 5 +- .../metastore/TestHiveMetaStoreSchemaMethods.java | 4 +- .../TestHiveMetaStoreWithEnvironmentContext.java | 2 +- .../TestMetaStoreEndFunctionListener.java | 4 +- .../TestMetaStoreEventListenerOnlyOnCommit.java | 4 +- .../hadoop/hive/metastore/TestObjectStore.java | 5 +- .../metastore/TestObjectStoreSchemaMethods.java | 4 +- .../hadoop/hive/metastore/TestOldSchema.java | 3 +- .../apache/hadoop/hive/metastore/TestStats.java | 8 +- .../hive/metastore/VerifyingObjectStore.java | 1 - .../hive/metastore/cache/TestCachedStore.java | 5 +- .../hive/metastore/cache/TestCatalogCaching.java | 5 +- .../hive/metastore/client/TestAlterPartitions.java | 4 +- .../hadoop/hive/metastore/client/TestCatalogs.java | 12 +- .../hive/metastore/client/TestCheckConstraint.java | 4 +- .../hive/metastore/client/TestDatabases.java | 16 +- .../metastore/client/TestDefaultConstraint.java | 4 +- .../hive/metastore/client/TestDropPartitions.java | 2 - .../metastore/client/TestExchangePartitions.java | 12 +- .../hive/metastore/client/TestForeignKey.java | 2 +- .../hive/metastore/client/TestFunctions.java | 2 +- .../hive/metastore/client/TestGetPartitions.java | 1 - .../metastore/client/TestNotNullConstraint.java | 4 +- .../hive/metastore/client/TestPrimaryKey.java | 2 +- .../client/TestTablesCreateDropAlterTruncate.java | 5 +- .../hive/metastore/client/TestTablesGetExists.java | 2 +- .../hive/metastore/client/TestTablesList.java | 2 +- .../metastore/client/TestUniqueConstraint.java | 4 +- .../hive/streaming/AbstractRecordWriter.java | 4 +- .../hive/streaming/HiveStreamingConnection.java | 10 +- 131 files changed, 711 insertions(+), 754 deletions(-) diff --git a/cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java b/cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java index c159142954..00f07c935e 100644 --- a/cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java +++ b/cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java @@ -20,7 +20,7 @@ import static org.junit.Assert.assertEquals; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.Test; @@ -35,7 +35,7 @@ @Test public void testgetDbName() throws Exception { SessionState.start(new HiveConf()); - assertEquals(Warehouse.DEFAULT_DATABASE_NAME, + assertEquals(MetaStoreUtils.DEFAULT_DATABASE_NAME, SessionState.get().getCurrentDatabase()); } } diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java index 9f34b7b6fe..92456fb8fe 100644 --- a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java +++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java @@ -76,7 +76,6 @@ import org.apache.hadoop.hive.druid.serde.DruidSerDe; import org.apache.hadoop.hive.metastore.DefaultHiveMetaHook; import org.apache.hadoop.hive.metastore.HiveMetaHook; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.LockType; @@ -230,7 +229,7 @@ public void preCreateTable(Table table) throws MetaException { } // create dataSourceName based on Hive Table name - dataSourceName = Warehouse.getQualifiedName(table); + dataSourceName = MetaStoreUtils.getQualifiedName(table); try { // NOTE: This just created druid_segments table in Druid metastore. // This is needed for the case when hive is started before any of druid services diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatDriver.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatDriver.java index 6a7b9390de..338dc9d302 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatDriver.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatDriver.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.metadata.Hive; @@ -115,7 +116,7 @@ private int setFSPermsNGrp(SessionState ss, HiveConf conf) { } } else { // looks like a db operation - if (dbName.isEmpty() || dbName.equals(Warehouse.DEFAULT_DATABASE_NAME)) { + if (dbName.isEmpty() || dbName.equals(MetaStoreUtils.DEFAULT_DATABASE_NAME)) { // We dont set perms or groups for default dir. return 0; } else { diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java index 8e72a1275a..d0fc96c2c7 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java @@ -45,11 +45,11 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.security.DelegationTokenIdentifier; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat; import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; @@ -437,7 +437,7 @@ public static HiveStorageHandler getStorageHandler(Configuration conf, public static Pair getDbAndTableName(String tableName) throws IOException { String[] dbTableNametokens = tableName.split("\\."); if (dbTableNametokens.length == 1) { - return new Pair(Warehouse.DEFAULT_DATABASE_NAME, tableName); + return new Pair(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); } else if (dbTableNametokens.length == 2) { return new Pair(dbTableNametokens[0], dbTableNametokens[1]); } else { diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java index 4a76010904..902364a923 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java @@ -38,13 +38,13 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.io.HdfsUtils; import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.shims.ShimLoader; @@ -710,7 +710,7 @@ private void discoverPartitions(JobContext context) throws IOException { for (FileStatus st : status) { LinkedHashMap fullPartSpec = new LinkedHashMap(); if (!customDynamicLocationUsed) { - Warehouse.makeSpecFromName(fullPartSpec, st.getPath(), null); + MetaStoreUtils.makeSpecFromName(fullPartSpec, st.getPath(), null); } else { HCatFileUtil.getPartKeyValuesForCustomLocation(fullPartSpec, jobInfo, st.getPath().toString()); diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatTableInfo.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatTableInfo.java index 0ba23e7500..125ebdd5d1 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatTableInfo.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatTableInfo.java @@ -24,8 +24,8 @@ import java.util.List; import com.google.common.collect.Lists; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hive.hcatalog.common.HCatUtil; import org.apache.hive.hcatalog.data.schema.HCatFieldSchema; import org.apache.hive.hcatalog.data.schema.HCatSchema; @@ -76,7 +76,7 @@ HCatSchema partitionColumns, StorerInfo storerInfo, Table table) { - this.databaseName = (databaseName == null) ? Warehouse.DEFAULT_DATABASE_NAME : databaseName; + this.databaseName = (databaseName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : databaseName; this.tableName = tableName; this.dataColumns = dataColumns; this.table = table; diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InputJobInfo.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InputJobInfo.java index ac1dd54be8..b27b3e4fd3 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InputJobInfo.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InputJobInfo.java @@ -20,7 +20,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import java.io.IOException; import java.io.ObjectInputStream; @@ -84,7 +84,7 @@ private InputJobInfo(String databaseName, String filter, Properties properties) { this.databaseName = (databaseName == null) ? - Warehouse.DEFAULT_DATABASE_NAME : databaseName; + MetaStoreUtils.DEFAULT_DATABASE_NAME : databaseName; this.tableName = tableName; this.filter = filter; this.properties = properties == null ? new Properties() : properties; diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/OutputJobInfo.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/OutputJobInfo.java index d05debf05f..423889550e 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/OutputJobInfo.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/OutputJobInfo.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hive.common.classification.InterfaceAudience; import org.apache.hadoop.hive.common.classification.InterfaceStability; -import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hive.hcatalog.data.schema.HCatSchema; /** The class used to serialize and store the output related information */ @@ -95,7 +95,7 @@ public static OutputJobInfo create(String databaseName, private OutputJobInfo(String databaseName, String tableName, Map partitionValues) { - this.databaseName = (databaseName == null) ? Warehouse.DEFAULT_DATABASE_NAME : databaseName; + this.databaseName = (databaseName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : databaseName; this.tableName = tableName; this.partitionValues = partitionValues; this.properties = new Properties(); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java index 3cf172b5ea..52e66e0c59 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.Type; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.io.HiveInputFormat; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.ql.metadata.Hive; @@ -106,7 +107,7 @@ protected void setUp() throws Exception { public void testCustomPerms() throws Exception { - String dbName = Warehouse.DEFAULT_DATABASE_NAME; + String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; String tblName = "simptbl"; String typeName = "Person"; @@ -145,7 +146,7 @@ public void testCustomPerms() throws Exception { // And no metadata gets created. try { - msc.getTable(Warehouse.DEFAULT_DATABASE_NAME, tblName); + msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName); fail(); } catch (Exception e) { assertTrue(e instanceof NoSuchObjectException); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java index d6386ab67e..4ddfa4e085 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java @@ -20,10 +20,10 @@ import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -92,7 +92,7 @@ public void testCreateTblWithLowerCasePartNames() throws Exception { CommandProcessorResponse resp = driver.run("create table junit_sem_analysis (a int) partitioned by (B string) stored as TEXTFILE"); assertEquals(resp.getResponseCode(), 0); assertEquals(null, resp.getErrorMessage()); - Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); + Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME); assertEquals("Partition key name case problem", "b", tbl.getPartitionKeys().get(0).getName()); driver.run("drop table junit_sem_analysis"); } @@ -105,13 +105,13 @@ public void testAlterTblFFpart() throws Exception { driver.run("alter table junit_sem_analysis add partition (b='2010-10-10')"); hcatDriver.run("alter table junit_sem_analysis partition (b='2010-10-10') set fileformat RCFILE"); - Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); + Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME); assertEquals(TextInputFormat.class.getName(), tbl.getSd().getInputFormat()); assertEquals(HiveIgnoreKeyTextOutputFormat.class.getName(), tbl.getSd().getOutputFormat()); List partVals = new ArrayList(1); partVals.add("2010-10-10"); - Partition part = client.getPartition(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME, partVals); + Partition part = client.getPartition(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME, partVals); assertEquals(RCFileInputFormat.class.getName(), part.getSd().getInputFormat()); assertEquals(RCFileOutputFormat.class.getName(), part.getSd().getOutputFormat()); @@ -158,7 +158,7 @@ public void testCreateTableIfNotExists() throws Exception { hcatDriver.run("drop table " + TBL_NAME); hcatDriver.run("create table " + TBL_NAME + " (a int) stored as RCFILE"); - Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); + Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME); List cols = tbl.getSd().getCols(); assertEquals(1, cols.size()); assertTrue(cols.get(0).equals(new FieldSchema("a", "int", null))); @@ -168,7 +168,7 @@ public void testCreateTableIfNotExists() throws Exception { CommandProcessorResponse resp = hcatDriver.run("create table if not exists junit_sem_analysis (a int) stored as RCFILE"); assertEquals(0, resp.getResponseCode()); assertNull(resp.getErrorMessage()); - tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); + tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME); cols = tbl.getSd().getCols(); assertEquals(1, cols.size()); assertTrue(cols.get(0).equals(new FieldSchema("a", "int", null))); @@ -221,7 +221,7 @@ public void testAddReplaceCols() throws Exception { response = hcatDriver.run("describe extended junit_sem_analysis"); assertEquals(0, response.getResponseCode()); - Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); + Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME); List cols = tbl.getSd().getCols(); assertEquals(2, cols.size()); assertTrue(cols.get(0).equals(new FieldSchema("a1", "tinyint", null))); @@ -244,11 +244,11 @@ public void testAlterTableRename() throws Exception { hcatDriver.run("drop table oldname"); hcatDriver.run("drop table newname"); hcatDriver.run("create table oldname (a int)"); - Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, "oldname"); + Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "oldname"); assertTrue("The old table location is: " + tbl.getSd().getLocation(), tbl.getSd().getLocation().contains("oldname")); hcatDriver.run("alter table oldname rename to newNAME"); - tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, "newname"); + tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "newname"); // since the oldname table is not under its database (See HIVE-15059), the renamed oldname table will keep // its location after HIVE-14909. I changed to check the existence of the newname table and its name instead // of verifying its location @@ -265,7 +265,7 @@ public void testAlterTableSetFF() throws Exception { hcatDriver.run("drop table junit_sem_analysis"); hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"); - Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); + Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME); assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat()); assertEquals(RCFileOutputFormat.class.getName(), tbl.getSd().getOutputFormat()); @@ -273,7 +273,7 @@ public void testAlterTableSetFF() throws Exception { "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver'"); hcatDriver.run("desc extended junit_sem_analysis"); - tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); + tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME); assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat()); assertEquals(RCFileOutputFormat.class.getName(), tbl.getSd().getOutputFormat()); @@ -329,7 +329,7 @@ public void testAddDriverInfo() throws Exception { "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver' "; assertEquals(0, hcatDriver.run(query).getResponseCode()); - Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); + Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME); assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat()); assertEquals(RCFileOutputFormat.class.getName(), tbl.getSd().getOutputFormat()); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java index e16674d99f..bb10494952 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java @@ -37,12 +37,12 @@ import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.io.StorageFormats; import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe; import org.apache.hadoop.hive.serde.serdeConstants; @@ -88,7 +88,7 @@ public abstract class HCatMapReduceTest extends HCatBaseTest { private static final Logger LOG = LoggerFactory.getLogger(HCatMapReduceTest.class); - protected static String dbName = Warehouse.DEFAULT_DATABASE_NAME; + protected static String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; protected static final String TABLE_NAME = "testHCatMapReduceTable"; private static List writeRecords = new ArrayList(); @@ -155,7 +155,7 @@ public static void setUpOneTime() throws Exception { @After public void deleteTable() throws Exception { try { - String databaseName = (dbName == null) ? Warehouse.DEFAULT_DATABASE_NAME : dbName; + String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName; client.dropTable(databaseName, tableName); // in case of external table, drop the table contents as well @@ -176,7 +176,7 @@ public void createTable() throws Exception { // SerDe is in the disabled serdes list. Assume.assumeTrue(!DISABLED_SERDES.contains(serdeClass)); - String databaseName = (dbName == null) ? Warehouse.DEFAULT_DATABASE_NAME : dbName; + String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName; try { client.dropTable(databaseName, tableName); } catch (Exception e) { diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java index 271709387b..f9afab019d 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java @@ -38,13 +38,12 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.io.RCFileInputFormat; import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; import org.apache.hadoop.hive.serde.serdeConstants; @@ -234,7 +233,7 @@ public void map(LongWritable key, Text value, Context context) } private void createTable(String dbName, String tableName) throws Exception { - String databaseName = (dbName == null) ? Warehouse.DEFAULT_DATABASE_NAME + String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName; try { msc.dropTable(databaseName, tableName); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java index 332c240e52..c7ca253866 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java @@ -29,7 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.session.SessionState; @@ -102,7 +102,7 @@ public void testSequenceTableWriteReadMR() throws Exception { TextInputFormat.setInputPaths(job, INPUT_FILE_NAME); HCatOutputFormat.setOutput(job, OutputJobInfo.create( - Warehouse.DEFAULT_DATABASE_NAME, "bad_props_table", null)); + MetaStoreUtils.DEFAULT_DATABASE_NAME, "bad_props_table", null)); job.setOutputFormatClass(HCatOutputFormat.class); HCatOutputFormat.setSchema(job, getSchema()); job.setNumReduceTasks(0); diff --git a/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java b/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java index afe6e92163..c5117d9855 100644 --- a/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java +++ b/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java @@ -36,8 +36,8 @@ import org.apache.hadoop.hive.common.type.Timestamp; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.mapreduce.Job; import org.apache.hive.hcatalog.common.HCatConstants; @@ -71,7 +71,7 @@ private static final Logger LOG = LoggerFactory.getLogger(PigHCatUtil.class); static final int PIG_EXCEPTION_CODE = 1115; // http://wiki.apache.org/pig/PigErrorHandlingFunctionalSpecification#Error_codes - private static final String DEFAULT_DB = Warehouse.DEFAULT_DATABASE_NAME; + private static final String DEFAULT_DB = MetaStoreUtils.DEFAULT_DATABASE_NAME; private final Map, Table> hcatTableCache = new HashMap, Table>(); diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java index 872d7afd23..3b9d4e83f5 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java @@ -44,7 +44,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.io.StorageFormats; @@ -333,7 +333,7 @@ public void testReadDataFromEncryptedHiveTableByHCatMR() throws Exception { job.setInputFormatClass(HCatInputFormat.class); job.setOutputFormatClass(TextOutputFormat.class); - HCatInputFormat.setInput(job, Warehouse.DEFAULT_DATABASE_NAME, ENCRYPTED_TABLE, null); + HCatInputFormat.setInput(job, MetaStoreUtils.DEFAULT_DATABASE_NAME, ENCRYPTED_TABLE, null); job.setMapOutputKeyClass(BytesWritable.class); job.setMapOutputValueClass(Text.class); diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java index 2ab59d7a02..f427cd916e 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hive.metastore.RawStoreProxy; import org.apache.hadoop.hive.metastore.ReplChangeManager; import org.apache.hadoop.hive.metastore.TransactionalMetaStoreEventListener; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -89,7 +88,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.Lists; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; import static org.apache.hadoop.hive.metastore.DatabaseProduct.MYSQL; /** @@ -285,7 +284,7 @@ public PartitionFiles next() { fileIterator = Collections.emptyIterator(); } PartitionFiles partitionFiles = - new PartitionFiles(Warehouse.makePartName(t.getPartitionKeys(), p.getValues()), fileIterator); + new PartitionFiles(MetaStoreUtils.makePartName(t.getPartitionKeys(), p.getValues()), fileIterator); return partitionFiles; } catch (MetaException e) { throw new RuntimeException(e); diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java index 3604630c43..38589f5ec8 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java @@ -24,32 +24,28 @@ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.DataOperationType; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.LockComponentBuilder; import org.apache.hadoop.hive.metastore.LockRequestBuilder; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; import org.apache.hadoop.hive.metastore.api.LockRequest; import org.apache.hadoop.hive.metastore.api.LockResponse; import org.apache.hadoop.hive.metastore.api.LockState; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TxnAbortedException; import org.apache.hadoop.hive.metastore.api.TxnToWriteId; -import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.hcatalog.common.HCatUtil; import org.apache.hadoop.security.UserGroupInformation; @@ -465,11 +461,11 @@ private static void createPartitionIfNotExists(HiveEndPoint ep, org.apache.hadoop.hive.ql.metadata.Table tableObject = new org.apache.hadoop.hive.ql.metadata.Table(msClient.getTable(ep.database, ep.table)); Map partSpec = - Warehouse.makeSpecFromValues(tableObject.getPartitionKeys(), ep.partitionVals); + MetaStoreUtils.makeSpecFromValues(tableObject.getPartitionKeys(), ep.partitionVals); AddPartitionDesc addPartitionDesc = new AddPartitionDesc(ep.database, ep.table, true); String partLocation = new Path(tableObject.getDataLocation(), - Warehouse.makePartPath(partSpec)).toString(); + MetaStoreUtils.makePartPath(partSpec)).toString(); addPartitionDesc.addPartition(partSpec, partLocation); Partition partition = Hive.convertAddSpecToMetaPartition(tableObject, addPartitionDesc.getPartition(0), conf); @@ -583,7 +579,7 @@ private TransactionBatchImpl(final String user, UserGroupInformation ugi, HiveEn if ( endPt.partitionVals!=null && !endPt.partitionVals.isEmpty() ) { Table tableObj = msClient.getTable(endPt.database, endPt.table); List partKeys = tableObj.getPartitionKeys(); - partNameForLock = Warehouse.makePartName(partKeys, endPt.partitionVals); + partNameForLock = MetaStoreUtils.makePartName(partKeys, endPt.partitionVals); } else { partNameForLock = null; } diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MetaStorePartitionHelper.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MetaStorePartitionHelper.java index fb88f2d081..97e7d91dc0 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MetaStorePartitionHelper.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MetaStorePartitionHelper.java @@ -22,12 +22,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -94,7 +94,7 @@ public void createPartitionIfNotExists(List newPartitionValues) throws W partition.setTableName(table.getTableName()); StorageDescriptor partitionSd = new StorageDescriptor(table.getSd()); partitionSd.setLocation(table.getSd().getLocation() + Path.SEPARATOR - + Warehouse.makePartName(table.getPartitionKeys(), newPartitionValues)); + + MetaStoreUtils.makePartName(table.getPartitionKeys(), newPartitionValues)); partition.setSd(partitionSd); partition.setValues(newPartitionValues); diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingTestUtils.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingTestUtils.java index 63690f9a24..148357c857 100644 --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingTestUtils.java +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/StreamingTestUtils.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Partition; @@ -45,6 +44,7 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; import org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat; import org.apache.hadoop.hive.ql.io.orc.OrcSerde; @@ -268,7 +268,7 @@ private Table internalCreate(IMetaStoreClient metaStoreClient) throws Exception partition.setTableName(table.getTableName()); StorageDescriptor partitionSd = new StorageDescriptor(table.getSd()); partitionSd.setLocation(table.getSd().getLocation() + Path.SEPARATOR - + Warehouse.makePartName(table.getPartitionKeys(), partitionValues)); + + MetaStoreUtils.makePartName(table.getPartitionKeys(), partitionValues)); partition.setSd(partitionSd); partition.setValues(partitionValues); diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java index a06191d87b..6410553232 100644 --- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java +++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; @@ -51,6 +50,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.ObjectPair; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.SerializationUtilities; @@ -890,7 +890,7 @@ public void close() throws HCatException { private String checkDB(String name) { if (StringUtils.isEmpty(name)) { - return Warehouse.DEFAULT_DATABASE_NAME; + return MetaStoreUtils.DEFAULT_DATABASE_NAME; } else { return name; } diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java index 796ebc5948..f305f2f1bd 100644 --- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java +++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java @@ -27,12 +27,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.classification.InterfaceAudience; import org.apache.hadoop.hive.common.classification.InterfaceStability; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hive.hcatalog.common.HCatException; import org.apache.hive.hcatalog.data.schema.HCatFieldSchema; import org.apache.hive.hcatalog.data.schema.HCatSchemaUtils; @@ -50,7 +50,7 @@ private HCatTable hcatTable; private String tableName; - private String dbName = Warehouse.DEFAULT_DATABASE_NAME; + private String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; private List values; private int createTime; private int lastAccessTime; @@ -138,7 +138,7 @@ Partition toHivePartition() throws HCatException { if (sd.getLocation() == null) { LOG.warn("Partition location is not set! Attempting to construct default partition location."); try { - String partName = Warehouse.makePartName(HCatSchemaUtils.getFieldSchemas(hcatTable.getPartCols()), values); + String partName = MetaStoreUtils.makePartName(HCatSchemaUtils.getFieldSchemas(hcatTable.getPartCols()), values); sd.setLocation(new Path(hcatTable.getSd().getLocation(), partName).toString()); } catch(MetaException exception) { diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java index ed2aef4758..b3b8f692e6 100644 --- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java +++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java @@ -31,12 +31,12 @@ import org.apache.hadoop.hive.common.classification.InterfaceStability; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat; import org.apache.hadoop.hive.ql.io.RCFileInputFormat; import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; @@ -108,7 +108,7 @@ public static final String DEFAULT_INPUT_FORMAT_CLASS = org.apache.hadoop.mapred.TextInputFormat.class.getName(); public static final String DEFAULT_OUTPUT_FORMAT_CLASS = org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat.class.getName(); - private String dbName = Warehouse.DEFAULT_DATABASE_NAME; + private String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; private String tableName; private HiveConf conf; private String tableType; @@ -122,7 +122,7 @@ private String owner; public HCatTable(String dbName, String tableName) { - this.dbName = StringUtils.isBlank(dbName)? Warehouse.DEFAULT_DATABASE_NAME : dbName; + this.dbName = StringUtils.isBlank(dbName)? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName; this.tableName = tableName; this.sd = new StorageDescriptor(); this.sd.setInputFormat(DEFAULT_INPUT_FORMAT_CLASS); diff --git a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java index da08d2f8a3..21263acbf0 100644 --- a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java +++ b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java @@ -37,10 +37,10 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NotificationEvent; import org.apache.hadoop.hive.metastore.api.PartitionEventType; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; import org.apache.hadoop.hive.ql.io.RCFileInputFormat; import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; @@ -80,8 +80,6 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertArrayEquals; -import org.apache.hadoop.util.Shell; - import javax.annotation.Nullable; public class TestHCatClient { @@ -151,7 +149,7 @@ public static String fixPath(String path) { } public static String makePartLocation(HCatTable table, Map partitionSpec) throws MetaException { - return (new Path(table.getSd().getLocation(), Warehouse.makePartPath(partitionSpec))).toUri().toString(); + return (new Path(table.getSd().getLocation(), MetaStoreUtils.makePartPath(partitionSpec))).toUri().toString(); } @Test diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java index ab25ffed8e..396d0a5803 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java @@ -25,6 +25,8 @@ import java.io.File; import java.io.IOException; import java.util.ArrayList; + +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.junit.Ignore; import java.util.Iterator; @@ -32,7 +34,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.session.SessionState; @@ -49,7 +50,6 @@ import org.apache.hive.hcatalog.data.DefaultHCatRecord; import org.apache.hive.hcatalog.data.schema.HCatFieldSchema; import org.apache.hive.hcatalog.data.schema.HCatSchema; -import org.apache.pig.ExecType; import org.apache.pig.PigServer; import org.apache.pig.data.Tuple; import org.junit.After; @@ -182,7 +182,7 @@ public void testSequenceTableWriteReadMR() throws Exception { TextInputFormat.setInputPaths(job, inputFileName); HCatOutputFormat.setOutput(job, OutputJobInfo.create( - Warehouse.DEFAULT_DATABASE_NAME, "demo_table_2", null)); + MetaStoreUtils.DEFAULT_DATABASE_NAME, "demo_table_2", null)); job.setOutputFormatClass(HCatOutputFormat.class); HCatOutputFormat.setSchema(job, getSchema()); job.setNumReduceTasks(0); @@ -230,7 +230,7 @@ public void testTextTableWriteReadMR() throws Exception { TextInputFormat.setInputPaths(job, inputFileName); HCatOutputFormat.setOutput(job, OutputJobInfo.create( - Warehouse.DEFAULT_DATABASE_NAME, "demo_table_3", null)); + MetaStoreUtils.DEFAULT_DATABASE_NAME, "demo_table_3", null)); job.setOutputFormatClass(HCatOutputFormat.class); HCatOutputFormat.setSchema(job, getSchema()); assertTrue(job.waitForCompletion(true)); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolCatalogOps.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolCatalogOps.java index 42eb979bcc..57bea69a90 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolCatalogOps.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolCatalogOps.java @@ -48,8 +48,8 @@ import java.util.HashSet; import java.util.Set; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; public class TestSchemaToolCatalogOps { private static MetastoreSchemaTool schemaTool; diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java index 9b50fd4f30..139c42dca0 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hive.common.LogUtils.LogInitializationException; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.history.HiveHistory.Keys; @@ -103,7 +103,7 @@ protected void setUp() { cols.add("key"); cols.add("value"); for (String src : srctables) { - db.dropTable(Warehouse.DEFAULT_DATABASE_NAME, src, true, true); + db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true); db.createTable(src, cols, null, TextInputFormat.class, IgnoreKeyTextOutputFormat.class); db.loadTable(hadoopDataFile[i], src, diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java index 58ac4aca81..5782c6f0a9 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; @@ -48,7 +48,7 @@ public void testHookLoading() throws Exception{ assertEquals(0, resp.getResponseCode()); assertNull(resp.getErrorMessage()); - Map params = Hive.get(conf).getTable(Warehouse.DEFAULT_DATABASE_NAME, "testDL").getParameters(); + Map params = Hive.get(conf).getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "testDL").getParameters(); assertEquals(DummyCreateTableHook.class.getName(),params.get("createdBy")); assertEquals("Open Source rocks!!", params.get("Message")); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 3d509f3532..9337806b22 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; -import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest; import org.apache.hadoop.hive.metastore.api.NotificationEvent; @@ -84,7 +83,7 @@ import java.util.Arrays; import java.util.List; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertEquals; diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java index 1e3478d718..18f9ea6c88 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -44,6 +43,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.exec.repl.ReplDumpWork; @@ -404,12 +404,12 @@ public Partition getPartition(String dbName, String tableName, List part } public List getUniqueConstraintList(String dbName, String tblName) throws Exception { - return client.getUniqueConstraints(new UniqueConstraintsRequest(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName)); + return client.getUniqueConstraints(new UniqueConstraintsRequest(MetaStoreUtils.DEFAULT_CATALOG_NAME, dbName, tblName)); } public List getNotNullConstraintList(String dbName, String tblName) throws Exception { return client.getNotNullConstraints( - new NotNullConstraintsRequest(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName)); + new NotNullConstraintsRequest(MetaStoreUtils.DEFAULT_CATALOG_NAME, dbName, tblName)); } ReplicationV1CompatRule getReplivationV1CompatRule(List testsToSkip) { diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/MetaStoreDumpUtility.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/MetaStoreDumpUtility.java index 2389c3bc68..a3e0ab56f5 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/MetaStoreDumpUtility.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/MetaStoreDumpUtility.java @@ -41,7 +41,7 @@ import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hive.testutils.HiveTestEnvSetup; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -217,7 +217,7 @@ public int compare(String str1, String str2) { } s.execute("ALTER TABLE APP.TAB_COL_STATS ADD COLUMN CAT_NAME VARCHAR(256)"); - s.execute("update APP.TAB_COL_STATS set CAT_NAME = '" + Warehouse.DEFAULT_CATALOG_NAME + "'"); + s.execute("update APP.TAB_COL_STATS set CAT_NAME = '" + MetaStoreUtils.DEFAULT_CATALOG_NAME + "'"); s.close(); diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index 5adbb63693..e30ee149a6 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.ql; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; @@ -75,8 +75,8 @@ import org.apache.hadoop.hive.llap.LlapItUtils; import org.apache.hadoop.hive.llap.daemon.MiniLlapCluster; import org.apache.hadoop.hive.llap.io.api.LlapProxy; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache; import org.apache.hadoop.hive.ql.dataset.Dataset; import org.apache.hadoop.hive.ql.dataset.DatasetCollection; @@ -1486,7 +1486,7 @@ public void convertSequenceFileToTextFile() throws Exception { .run("FROM dest4_sequencefile INSERT OVERWRITE TABLE dest4 SELECT dest4_sequencefile.*"); // Drop dest4_sequencefile - db.dropTable(Warehouse.DEFAULT_DATABASE_NAME, "dest4_sequencefile", + db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "dest4_sequencefile", true, true); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 039f991f9d..b0dec865ed 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -59,7 +59,6 @@ import org.apache.hadoop.hive.conf.VariableSubstitution; import org.apache.hadoop.hive.metastore.ColumnType; import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.LockComponent; @@ -828,7 +827,7 @@ private boolean isValidTxnListState() throws LockException { lckCmp.getType() == LockType.SHARED_WRITE) && lckCmp.getTablename() != null) { nonSharedLocks.add( - Warehouse.getQualifiedName( + MetaStoreUtils.getQualifiedName( lckCmp.getDbname(), lckCmp.getTablename())); } } @@ -839,7 +838,7 @@ private boolean isValidTxnListState() throws LockException { lock.getHiveLockMode() == HiveLockMode.SEMI_SHARED) && lock.getHiveLockObject().getPaths().length == 2) { nonSharedLocks.add( - Warehouse.getQualifiedName( + MetaStoreUtils.getQualifiedName( lock.getHiveLockObject().getPaths()[0], lock.getHiveLockObject().getPaths()[1])); } } @@ -860,7 +859,7 @@ private boolean isValidTxnListState() throws LockException { .collect(Collectors.toList()), currentTxnString); for (Pair tableInfo : writtenTables) { - String fullQNameForLock = Warehouse.getQualifiedName( + String fullQNameForLock = MetaStoreUtils.getQualifiedName( tableInfo.getRight().getDbName(), MetaStoreUtils.encodeTableName(tableInfo.getRight().getTableName())); if (nonSharedLocks.contains(fullQNameForLock)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java index 6ad0556b55..1338f2f190 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java @@ -31,7 +31,6 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; @@ -100,7 +99,7 @@ static public PartSpecInfo create(Table tbl, Map partSpec) public Path createPath(Table tbl) throws HiveException { String prefixSubdir; try { - prefixSubdir = Warehouse.makePartName(fields, values); + prefixSubdir = MetaStoreUtils.makePartName(fields, values); } catch (MetaException e) { throw new HiveException("Unable to get partitions directories prefix", e); } @@ -115,7 +114,7 @@ public Path createPath(Table tbl) throws HiveException { */ public String getName() throws HiveException { try { - return Warehouse.makePartName(fields, values); + return MetaStoreUtils.makePartName(fields, values); } catch (MetaException e) { throw new HiveException("Unable to create partial name", e); } @@ -225,7 +224,7 @@ public static String getPartialName(Partition p, int level) throws HiveException List fields = p.getTable().getPartCols().subList(0, level); List values = p.getValues().subList(0, level); try { - return Warehouse.makePartName(fields, values); + return MetaStoreUtils.makePartName(fields, values); } catch (MetaException e) { throw new HiveException("Wasn't able to generate name" + " for partial specification"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 467f7280ef..4adbf0f54c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -1569,7 +1569,7 @@ boolean partitionInCustomLocation(Table tbl, Partition p) throws HiveException { String subdir = null; try { - subdir = Warehouse.makePartName(tbl.getPartCols(), p.getValues()); + subdir = MetaStoreUtils.makePartName(tbl.getPartCols(), p.getValues()); } catch (MetaException e) { throw new HiveException("Unable to get partition's directory", e); } @@ -2169,7 +2169,7 @@ private int msck(Hive db, MsckDesc msckDesc) { while (iter.hasNext()) { CheckResult.PartitionResult part = iter.next(); try { - vals = Warehouse.makeValsFromName(part.getPartitionName(), vals); + vals = MetaStoreUtils.makeValsFromName(part.getPartitionName(), vals); } catch (MetaException ex) { throw new HiveException(ex); } @@ -2288,7 +2288,7 @@ public Void execute(int size) throws Exception { if (currentBatchSize == 0) { break; } - apd.addPartition(Warehouse.makeSpecFromName(part.getPartitionName()), null); + apd.addPartition(MetaStoreUtils.makeSpecFromName(part.getPartitionName()), null); lastBatch.add(part); addMsgs.add(String.format(addMsgFormat, part.getPartitionName())); currentBatchSize--; @@ -2652,7 +2652,7 @@ else if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC) { if (tbl.getStorageHandler() == null) { // If serialization.format property has the default value, it will not to be included in // SERDE properties - if (Warehouse.DEFAULT_SERIALIZATION_FORMAT.equals(serdeParams.get( + if (MetaStoreUtils.DEFAULT_SERIALIZATION_FORMAT.equals(serdeParams.get( serdeConstants.SERIALIZATION_FORMAT))){ serdeParams.remove(serdeConstants.SERIALIZATION_FORMAT); } @@ -3971,7 +3971,7 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { // Note: this is necessary for UPDATE_STATISTICS command, that operates via ADDPROPS (why?). // For any other updates, we don't want to do txn check on partitions when altering table. boolean isTxn = alterTbl.getPartSpec() != null && alterTbl.getOp() == AlterTableTypes.ADDPROPS; - db.alterPartitions(Warehouse.getQualifiedName(tbl.getTTable()), allPartitions, environmentContext, isTxn); + db.alterPartitions(MetaStoreUtils.getQualifiedName(tbl.getTTable()), allPartitions, environmentContext, isTxn); } // Add constraints if necessary addConstraints(db, alterTbl); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java index 87828b14ee..d3ec10900c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java @@ -32,8 +32,6 @@ import java.io.Serializable; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; - /** * This task does some work related to materialized views. In particular, it adds * or removes the materialized view from the registry if needed, or registers new diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index b677d467d6..a1cc8cd2fe 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -105,7 +105,6 @@ import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; @@ -138,7 +137,6 @@ import org.apache.hadoop.hive.ql.io.RCFile; import org.apache.hadoop.hive.ql.io.ReworkMapredInputFormat; import org.apache.hadoop.hive.ql.io.SelfDescribingInputFormatInterface; -import org.apache.hadoop.hive.ql.io.AcidUtils.ParsedDelta; import org.apache.hadoop.hive.ql.io.merge.MergeFileMapper; import org.apache.hadoop.hive.ql.io.merge.MergeFileWork; import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateMapper; @@ -2703,7 +2701,7 @@ public boolean skipProcessing(Task task) { // generate a full partition specification LinkedHashMap fullPartSpec = new LinkedHashMap(partSpec); - if (!Warehouse.makeSpecFromName(fullPartSpec, partPath, new HashSet(partSpec.keySet()))) { + if (!MetaStoreUtils.makeSpecFromName(fullPartSpec, partPath, new HashSet(partSpec.keySet()))) { Utilities.FILE_OP_LOGGER.warn("Ignoring invalid DP directory {}", partPath); continue; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java index d203ae4aa9..3f023dfc59 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java @@ -21,9 +21,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.repl.bootstrap.events.TableEvent; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.EximUtil; @@ -95,7 +95,7 @@ public ImportTableDesc tableDesc(String dbName) throws SemanticException { List partitions = new ArrayList<>(); try { for (Partition partition : metadata.getPartitions()) { - String partName = Warehouse.makePartName(tblDesc.getPartCols(), partition.getValues()); + String partName = MetaStoreUtils.makePartName(tblDesc.getPartCols(), partition.getValues()); partitions.add(partName); } } catch (MetaException e) { @@ -121,7 +121,7 @@ private AddPartitionDesc partitionDesc(Path fromPath, partDesc.setBucketCols(partition.getSd().getBucketCols()); partDesc.setSortCols(partition.getSd().getSortCols()); partDesc.setLocation(new Path(fromPath, - Warehouse.makePartName(tblDesc.getPartCols(), partition.getValues())).toString()); + MetaStoreUtils.makePartName(tblDesc.getPartCols(), partition.getValues())).toString()); partsDesc.setReplicationSpec(replicationSpec()); return partsDesc; } catch (Exception e) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java index c0cfc439d2..3963d1dc18 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hive.ql.exec.repl.bootstrap.load.table; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -279,7 +278,7 @@ private void addPartition(boolean hasMorePartitions, AddPartitionDesc addPartiti private Path locationOnReplicaWarehouse(Table table, AddPartitionDesc.OnePartitionDesc partSpec) throws MetaException, HiveException, IOException { - String child = Warehouse.makePartPath(partSpec.getPartSpec()); + String child = MetaStoreUtils.makePartPath(partSpec.getPartSpec()); if (tableDesc.getLocation() == null) { if (table.getDataLocation() == null) { Database parentDb = context.hiveDb.getDatabase(tableDesc.getDatabaseName()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java index 47fff8b33e..bbf6316fad 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.ql.hooks; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; import java.util.Arrays; import java.util.List; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java index 532c7d0b60..40ca52ebfe 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java @@ -28,10 +28,10 @@ import org.apache.commons.collections.SetUtils; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.hive.common.StringInternUtils; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.stats.StatsUtils; /** @@ -124,7 +124,7 @@ public String toString() { return isPartition() ? StatsUtils.getFullyQualifiedTableName(part.getDbName(), part.getTableName()) + "@" + part.getValues() - : Warehouse.getQualifiedName(tab); + : MetaStoreUtils.getQualifiedName(tab); } } @@ -334,7 +334,7 @@ public void setTable(Table table) { @Override public String toString() { - return Warehouse.getQualifiedName(table) + "(" + alias + ")"; + return MetaStoreUtils.getQualifiedName(table) + "(" + alias + ")"; } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java index a9d845aa5f..3c41d3e041 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java @@ -28,9 +28,9 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.SelectOperator; @@ -364,7 +364,7 @@ private static void addEdge(Map vertexCache, List edges, continue; } Vertex.Type type = Vertex.Type.TABLE; - String tableName = Warehouse.getQualifiedName(table); + String tableName = MetaStoreUtils.getQualifiedName(table); FieldSchema fieldSchema = col.getColumn(); String label = tableName; if (fieldSchema != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index f846e93ce5..eb41d7c0cc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -101,7 +101,6 @@ import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient; import org.apache.hadoop.hive.metastore.SynchronizedMetaStoreClient; import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.ReplChangeManager; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; @@ -2020,7 +2019,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par private static Path genPartPathFromTable(Table tbl, Map partSpec, Path tblDataLocationPath) throws MetaException { - Path partPath = new Path(tbl.getDataLocation(), Warehouse.makePartPath(partSpec)); + Path partPath = new Path(tbl.getDataLocation(), MetaStoreUtils.makePartPath(partSpec)); return new Path(tblDataLocationPath.toUri().getScheme(), tblDataLocationPath.toUri().getAuthority(), partPath.toUri().getPath()); } @@ -2332,7 +2331,7 @@ private void constructOneLBLocationMap(FileStatus fSta, for(final Path partPath : validPartitions) { // generate a full partition specification final LinkedHashMap fullPartSpec = Maps.newLinkedHashMap(partSpec); - if (!Warehouse.makeSpecFromName( + if (!MetaStoreUtils.makeSpecFromName( fullPartSpec, partPath, new HashSet(partSpec.keySet()))) { Utilities.FILE_OP_LOGGER.warn("Ignoring invalid DP directory " + partPath); continue; @@ -2614,7 +2613,7 @@ public Partition createPartition(Table tbl, Map partSpec) throws List partsToAlter = new ArrayList<>(); List part_names = new ArrayList<>(); for (org.apache.hadoop.hive.metastore.api.Partition p: in){ - part_names.add(Warehouse.makePartName(tbl.getPartitionKeys(), p.getValues())); + part_names.add(MetaStoreUtils.makePartName(tbl.getPartitionKeys(), p.getValues())); try { org.apache.hadoop.hive.metastore.api.Partition ptn = getMSC().getPartition(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), p.getValues()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java index 598bb2ee8b..005bfc1163 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java @@ -38,6 +38,7 @@ import com.google.common.collect.Sets; import org.apache.hadoop.hive.common.StringInternUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.log.PerfLogger; import org.apache.hadoop.hive.ql.session.SessionState; import org.slf4j.Logger; @@ -48,7 +49,6 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.ql.metadata.CheckResult.PartitionResult; @@ -100,7 +100,7 @@ public void checkMetastore(String dbName, String tableName, throws HiveException, IOException { if (dbName == null || "".equalsIgnoreCase(dbName)) { - dbName = Warehouse.DEFAULT_DATABASE_NAME; + dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; } try { @@ -237,7 +237,7 @@ void checkTable(String dbName, String tableName, if (part == null) { PartitionResult pr = new PartitionResult(); pr.setTableName(tableName); - pr.setPartitionName(Warehouse.makePartPath(map)); + pr.setPartitionName(MetaStoreUtils.makePartPath(map)); result.getPartitionsNotInMs().add(pr); } else { loadedPartitions.add(part); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java index 136709c6dc..29b52a6b34 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java @@ -36,7 +36,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; @@ -170,7 +169,7 @@ protected void initialize(Table table, // set default if location is not set and this is a physical // table partition (not a view partition) if (table.getDataLocation() != null) { - Path partPath = new Path(table.getDataLocation(), Warehouse.makePartName(table.getPartCols(), tPartition.getValues())); + Path partPath = new Path(table.getDataLocation(), MetaStoreUtils.makePartName(table.getPartCols(), tPartition.getValues())); tPartition.getSd().setLocation(partPath.toString()); } } @@ -197,7 +196,7 @@ protected void initialize(Table table, public String getName() { try { - return Warehouse.makePartName(table.getPartCols(), tPartition.getValues()); + return MetaStoreUtils.makePartName(table.getPartCols(), tPartition.getValues()); } catch (MetaException e) { throw new RuntimeException(e); } @@ -447,7 +446,7 @@ public Path getBucketPath(int bucketNum) { public String toString() { String pn = "Invalid Partition"; try { - pn = Warehouse.makePartName(getSpec(), false); + pn = MetaStoreUtils.makePartName(getSpec(), false); } catch (MetaException e) { // ignore as we most probably in an exception path already otherwise this // error wouldn't occur diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index a2b57fb646..8bbc59a2ab 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -73,7 +73,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; /** * todo: This need review re: thread safety. Various places (see callsers of @@ -508,7 +508,7 @@ private void createTempTable(org.apache.hadoop.hive.metastore.api.Table tbl, SessionState ss = SessionState.get(); if (ss == null) { throw new MetaException("No current SessionState, cannot create temporary table: " - + Warehouse.getQualifiedName(tbl)); + + MetaStoreUtils.getQualifiedName(tbl)); } // We may not own the table object, create a copy @@ -775,7 +775,7 @@ private void dropTempTable(org.apache.hadoop.hive.metastore.api.Table table, boo */ public static Map getTempTablesForDatabase(String dbName, String tblName) { - return getTempTables(Warehouse.getQualifiedName(dbName, tblName)). + return getTempTables(MetaStoreUtils.getQualifiedName(dbName, tblName)). get(dbName); } @@ -793,7 +793,7 @@ private void dropTempTable(org.apache.hadoop.hive.metastore.api.Table table, boo SessionState ss = SessionState.get(); if (ss == null) { LOG.debug("No current SessionState, skipping temp tables for " + - Warehouse.getQualifiedName(dbName, tableName)); + MetaStoreUtils.getQualifiedName(dbName, tableName)); return null; } String lookupName = StatsUtils.getFullyQualifiedTableName(dbName.toLowerCase(), @@ -911,7 +911,7 @@ private Partition getPartition(String partName) throws MetaException { } private void assertPartitioned() throws MetaException { if(tTable.getPartitionKeysSize() <= 0) { - throw new MetaException(Warehouse.getQualifiedName(tTable) + " is not partitioned"); + throw new MetaException(MetaStoreUtils.getQualifiedName(tTable) + " is not partitioned"); } } @@ -927,13 +927,13 @@ private PartitionTree(org.apache.hadoop.hive.metastore.api.Table t) { this.tTable = t; } private void addPartition(Partition p) throws AlreadyExistsException, MetaException { - String partName = Warehouse.makePartName(tTable.getPartitionKeys(), p.getValues()); + String partName = MetaStoreUtils.makePartName(tTable.getPartitionKeys(), p.getValues()); if(parts.putIfAbsent(partName, p) != null) { throw new AlreadyExistsException("Partition " + partName + " already exists"); } } /** - * @param partName - "p=1/q=2" full partition name {@link Warehouse#makePartName(List, List)} + * @param partName - "p=1/q=2" full partition name {@link MetaStoreUtils#makePartName(List, List)} * @return null if doesn't exist */ private Partition getPartition(String partName) { @@ -980,7 +980,7 @@ private Partition getPartition(String partName) { TempTable tt = getTempTable(table); if(tt == null) { throw new IllegalStateException("TempTable not found for " + - Warehouse.getQualifiedName(table)); + MetaStoreUtils.getQualifiedName(table)); } tt.addPartition(deepCopy(partition)); return partition; @@ -1003,7 +1003,7 @@ private Partition getPartition(String partName) { TempTable tt = getTempTable(table); if(tt == null) { throw new IllegalStateException("TempTable not found for " + - Warehouse.getQualifiedName(table)); + MetaStoreUtils.getQualifiedName(table)); } List parts = tt.getPartitions(partialPvals); List matchedParts = new ArrayList<>(); @@ -1028,7 +1028,7 @@ private Partition getPartition(String partName) { TempTable tt = getTempTable(table); if(tt == null) { throw new IllegalStateException("TempTable not found for " + - Warehouse.getQualifiedName(table)); + MetaStoreUtils.getQualifiedName(table)); } List partVals = new ArrayList<>(); partVals.add(""); //to get all partitions @@ -1036,7 +1036,7 @@ private Partition getPartition(String partName) { List matchedParts = new ArrayList<>(); for(int i = 0; i < (maxParts <= 0 ? parts.size() : maxParts); i++) { matchedParts.add( - Warehouse.makePartName(tt.tTable.getPartitionKeys(), parts.get(i).getValues())); + MetaStoreUtils.makePartName(tt.tTable.getPartitionKeys(), parts.get(i).getValues())); } return matchedParts; } @@ -1066,7 +1066,7 @@ private Partition getPartition(String partName) { } private static TempTable getTempTable(org.apache.hadoop.hive.metastore.api.Table t) { - String qualifiedTableName = Warehouse. + String qualifiedTableName = MetaStoreUtils. getQualifiedName(t.getDbName().toLowerCase(), t.getTableName().toLowerCase()); SessionState ss = SessionState.get(); if (ss == null) { @@ -1076,21 +1076,21 @@ private static TempTable getTempTable(org.apache.hadoop.hive.metastore.api.Table return ss.getTempPartitions().get(qualifiedTableName); } private static void removeTempTable(org.apache.hadoop.hive.metastore.api.Table t) { - String qualifiedTableName = Warehouse. + String qualifiedTableName = MetaStoreUtils. getQualifiedName(t.getDbName().toLowerCase(), t.getTableName().toLowerCase()); SessionState ss = SessionState.get(); if (ss == null) { LOG.warn("No current SessionState, skipping temp partitions for " + qualifiedTableName); return; } - ss.getTempPartitions().remove(Warehouse.getQualifiedName(t)); + ss.getTempPartitions().remove(MetaStoreUtils.getQualifiedName(t)); } private static void createTempTable(org.apache.hadoop.hive.metastore.api.Table t) { if(t.getPartitionKeysSize() <= 0) { //do nothing as it's not a partitioned table return; } - String qualifiedTableName = Warehouse. + String qualifiedTableName = MetaStoreUtils. getQualifiedName(t.getDbName().toLowerCase(), t.getTableName().toLowerCase()); SessionState ss = SessionState.get(); if (ss == null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index 26f21cf8c4..3d62109605 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -376,7 +375,7 @@ public ValidationFailureSemanticException(String s) { final public void validatePartColumnNames( Map spec, boolean shouldBeFull) throws SemanticException { List partCols = tTable.getPartitionKeys(); - final String tableName = Warehouse.getQualifiedName(tTable); + final String tableName = MetaStoreUtils.getQualifiedName(tTable); if (partCols == null || (partCols.size() == 0)) { if (spec != null) { throw new ValidationFailureSemanticException(tableName + @@ -963,7 +962,7 @@ public boolean isNonNative() { } public String getFullyQualifiedName() { - return Warehouse.getQualifiedName(tTable); + return MetaStoreUtils.getQualifiedName(tTable); } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java index 236003251a..6e0f23fb63 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java @@ -27,9 +27,9 @@ import java.util.Set; import java.util.Stack; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.exec.FilterOperator; @@ -456,7 +456,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, BaseColumnInfo col = expr_dep.getBaseCols().iterator().next(); Table t = col.getTabAlias().getTable(); if (t != null) { - sb.append(Warehouse.getQualifiedName(t)).append("."); + sb.append(MetaStoreUtils.getQualifiedName(t)).append("."); } sb.append(col.getColumn().getName()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java index 7645564f3d..ba0db247de 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java @@ -31,9 +31,9 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.StrictChecks; import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.TableScanOperator; @@ -565,7 +565,7 @@ public static boolean prunePartitionNames(List partColumnNames, Iterator partIter = partNamesSeq.iterator(); while (partIter.hasNext()) { String partName = partIter.next(); - Warehouse.makeValsFromName(partName, values); + MetaStoreUtils.makeValsFromName(partName, values); ArrayList convertedValues = new ArrayList(values.size()); for(int i=0; i fields = new ArrayList(); fields.add(new FieldSchema("val", "int", null)); table.setFields(fields); - table.setDataLocation(Warehouse.getDnsPath(new Path(SessionState.get().getTempTableSpace(), + table.setDataLocation(MetaStoreUtils.getDnsPath(new Path(SessionState.get().getTempTableSpace(), tableName), conf)); table.getTTable().setTemporary(true); table.setStoredAsSubDirectories(false); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java index 80f77b9f0c..0f0a97d93b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java @@ -31,7 +31,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.Utilities; @@ -334,7 +334,7 @@ private PartitionDesc generateDPFullPartSpec(DynamicPartitionCtx dpCtx, FileStat TableDesc tblDesc, int i) { LinkedHashMap fullPartSpec = new LinkedHashMap<>( dpCtx.getPartSpec()); // Require all the directories to be present with some values. - if (!Warehouse.makeSpecFromName(fullPartSpec, status[i].getPath(), + if (!MetaStoreUtils.makeSpecFromName(fullPartSpec, status[i].getPath(), new HashSet<>(dpCtx.getPartSpec().keySet()))) { return null; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicPartitionCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicPartitionCtx.java index c1aeb8f136..a702e82bb2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicPartitionCtx.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicPartitionCtx.java @@ -26,7 +26,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -103,7 +103,7 @@ public DynamicPartitionCtx(Map partSpec, String defaultPartName, this.numDPCols = dpNames.size(); this.numSPCols = spNames.size(); if (this.numSPCols > 0) { - this.spPath = Warehouse.makeDynamicPartName(partSpec); + this.spPath = MetaStoreUtils.makeDynamicPartName(partSpec); } else { this.spPath = null; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java index 9e83576e6b..317a56a2aa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java @@ -22,7 +22,7 @@ import java.util.Map; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; @@ -65,7 +65,7 @@ public TruncateTableDesc(String tableName, Map partSpec, this.partSpec = partSpec; this.replicationSpec = replicationSpec; this.isTransactional = AcidUtils.isTransactionalTable(table); - this.fullTableName = table == null ? tableName : Warehouse.getQualifiedName(table.getTTable()); + this.fullTableName = table == null ? tableName : MetaStoreUtils.getQualifiedName(table.getTTable()); } @Explain(displayName = "TableName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java index c889321082..6a2cef5cec 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java @@ -23,7 +23,6 @@ import java.util.Map; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.HiveObjectRef; @@ -34,6 +33,7 @@ import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; @@ -165,7 +165,7 @@ private PrivilegeBag toPrivilegeBag(List privileges, if (privObject.getPartKeys() != null) { Map partSpec = - Warehouse.makeSpecFromValues(tableObj.getPartitionKeys(), privObject.getPartKeys()); + MetaStoreUtils.makeSpecFromValues(tableObj.getPartitionKeys(), privObject.getPartKeys()); Partition partObj = hive.getPartition(tableObj, partSpec, false).getTPartition(); partValues = partObj.getValues(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java index e78753812b..96a53334e8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java @@ -29,7 +29,7 @@ import java.util.Map; import java.util.Set; -import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FileStatus; @@ -274,7 +274,7 @@ private static boolean isOwner(IMetaStoreClient metastoreClient, String userName return userName.equals(thriftTableObj.getOwner()); } case DATABASE: { - if (Warehouse.DEFAULT_DATABASE_NAME.equalsIgnoreCase(hivePrivObject.getDbname())) { + if (MetaStoreUtils.DEFAULT_DATABASE_NAME.equalsIgnoreCase(hivePrivObject.getDbname())) { return true; } Database db = null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index 71e130b608..fa9c28613c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hive.ql.session; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; import java.io.Closeable; import java.io.File; @@ -60,7 +60,6 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.conf.HiveConfUtil; import org.apache.hadoop.hive.metastore.ObjectStore; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.cache.CachedStore; @@ -849,7 +848,7 @@ public Path getTempTableSpace() { public static String generateTempTableLocation(Configuration conf) throws MetaException { Path path = new Path(SessionState.getTempTableSpace(conf), UUID.randomUUID().toString()); - path = Warehouse.getDnsPath(path, conf); + path = MetaStoreUtils.getDnsPath(path, conf); return path.toString(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java index 6eb1ca2645..ec744ff39e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java @@ -198,7 +198,7 @@ private String getAggregationPrefix0(Table table, Partition partition) throws Me // prefix = work.getAggKey(); prefix = prefix.toLowerCase(); if (partition != null) { - return Utilities.join(prefix, Warehouse.makePartPath(partition.getSpec())); + return Utilities.join(prefix, MetaStoreUtils.makePartPath(partition.getSpec())); } return prefix; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java index 2e25ecef65..16f43efe14 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java @@ -25,13 +25,13 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.exec.FetchOperator; import org.apache.hadoop.hive.ql.io.AcidUtils; @@ -138,7 +138,7 @@ public int process(Hive db, Table tbl) throws Exception { partVals.add(partVal == null ? // could be null for default partition this.conf.getVar(ConfVars.DEFAULTPARTITIONNAME) : partVal.toString()); } - partName = Warehouse.makePartName(partColSchema, partVals); + partName = MetaStoreUtils.makePartName(partColSchema, partVals); } ColumnStatisticsDesc statsDesc = buildColumnStatsDesc(tbl, partName, isTblLevel); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java index a50ec18b8a..72296b34da 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.RawStore; import org.apache.hadoop.hive.metastore.RawStoreProxy; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -54,6 +53,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.StatsUpdateMode; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.DriverUtils; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.session.SessionState; @@ -331,7 +331,7 @@ private void stopWorkers() { } int currentIxInBatch = nextIxInBatch++; Partition part = currentBatch.get(currentIxInBatch); - String partName = Warehouse.makePartName(t.getPartitionKeys(), part.getValues()); + String partName = MetaStoreUtils.makePartName(t.getPartitionKeys(), part.getValues()); LOG.debug("Processing partition ({} in batch), {}", currentIxInBatch, partName); // Skip the partitions in progress, and the ones for which stats update is disabled. @@ -416,7 +416,7 @@ private void addPreviousPartitions(Table t, List allPartNames, } // Current match may be out of order w.r.t. the global name list, so add specific parts. for (int i = 0; i < currentIxInBatch; ++i) { - String name = Warehouse.makePartName(t.getPartitionKeys(), currentBatch.get(i).getValues()); + String name = MetaStoreUtils.makePartName(t.getPartitionKeys(), currentBatch.get(i).getValues()); LOG.trace("Adding previous {}, {}", name, cols); partsToAnalyze.put(name, cols); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java index 3565616171..d419b584b3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.metastore.ReplChangeManager; import org.apache.hadoop.hive.metastore.txn.TxnStore; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.slf4j.Logger; @@ -219,7 +220,7 @@ public void run() { * Hive QL is not case sensitive wrt db/table/column names * Partition names get * normalized (as far as I can tell) by lower casing column name but not partition value. - * {@link org.apache.hadoop.hive.metastore.Warehouse#makePartName(List, List, String)} + * {@link MetaStoreUtils#makePartName(List, List, String)} * {@link org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer#getPartSpec(ASTNode)} * Since user input may start out in any case, compare here case-insensitive for db/table * but leave partition name as is. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java index dd0929f2b9..9bf6126d7d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hive.metastore.MetaStoreThread; import org.apache.hadoop.hive.metastore.RawStore; import org.apache.hadoop.hive.metastore.RawStoreProxy; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; @@ -34,6 +33,7 @@ import org.apache.hadoop.hive.metastore.txn.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; @@ -217,6 +217,6 @@ protected boolean runJobAsSelf(String owner) { } protected String tableName(Table t) { - return Warehouse.getQualifiedName(t); + return MetaStoreUtils.getQualifiedName(t); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index e77358b0e4..b2206376ac 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -23,12 +23,12 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.mapred.JobConf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.txn.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.ql.Driver; @@ -284,7 +284,7 @@ void gatherStats() { .append(StatsUtils.getFullyQualifiedTableName(ci.dbname, ci.tableName)); if (ci.partName != null) { sb.append(" partition("); - Map partitionColumnValues = Warehouse.makeEscSpecFromName(ci.partName); + Map partitionColumnValues = MetaStoreUtils.makeEscSpecFromName(ci.partName); for (Map.Entry ent : partitionColumnValues.entrySet()) { sb.append(ent.getKey()).append("='").append(ent.getValue()).append("',"); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java index 0f0dc2251d..1af44bd3e7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java @@ -53,7 +53,6 @@ import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.parse.HiveParser.switchDatabaseStatement_return; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; @@ -503,7 +502,7 @@ void moveTableData(Database dbObj, Table tableObj, Path newTablePath) throws Hiv for (String partName : partNames) { Partition partObj = hms.getPartition(dbName, tableName, partName); Map partSpec = - Warehouse.makeSpecFromValues(tableObj.getPartitionKeys(), partObj.getValues()); + MetaStoreUtils.makeSpecFromValues(tableObj.getPartitionKeys(), partObj.getValues()); if (shouldModifyPartitionLocation(dbObj, tableObj, partObj, partSpec)) { // Table directory (which includes the partition directory) has already been moved, // just update the partition location in the metastore. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java b/ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java index c523a76659..41141b2c7a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java @@ -31,14 +31,11 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient; import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -47,14 +44,12 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.utils.FileUtils; import org.apache.hadoop.hive.metastore.utils.FileUtils.RemoteIteratorWithFilter; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.BucketCodec; -import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hive.common.util.HiveVersionInfo; import org.apache.thrift.TException; @@ -209,7 +204,7 @@ private void performUpgradeInternal(String scriptLocation, boolean execute) LOG.debug("found " + tables.size() + " tables in " + dbName); for(String tableName : tables) { Table t = hms.getTable(dbName, tableName); - LOG.debug("processing table " + Warehouse.getQualifiedName(t)); + LOG.debug("processing table " + MetaStoreUtils.getQualifiedName(t)); if(isAcidEnabled) { //if acid is off post upgrade, you can't make any tables acid - will throw processConversion(t, convertToAcid, convertToMM, hms, db, execute, pw); @@ -236,7 +231,7 @@ private static void alterTable(Table t, Hive db, boolean isMM) EnvironmentContext ec = new EnvironmentContext(); /*we are not modifying any data so stats should be exactly the same*/ ec.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); - db.alterTable(Warehouse.getQualifiedName(t), metaTable, false, ec, false); + db.alterTable(MetaStoreUtils.getQualifiedName(t), metaTable, false, ec, false); } /** @@ -269,7 +264,7 @@ static void handleRenameFiles(Table t, Path p, boolean execute, Configuration co while (iter.hasNext()) { LocatedFileStatus lfs = iter.next(); if (lfs.isDirectory()) { - String msg = Warehouse.getQualifiedName(t) + " is bucketed and has a subdirectory: " + + String msg = MetaStoreUtils.getQualifiedName(t) + " is bucketed and has a subdirectory: " + lfs.getPath(); LOG.error(msg); throw new IllegalStateException(msg); @@ -278,13 +273,13 @@ static void handleRenameFiles(Table t, Path p, boolean execute, Configuration co if (bmd.bucketId < 0) { //non-standard file name - don't know what bucket the rows belong to and we can't //rename the file so tha it may end up treated like a different bucket id - String msg = "Bucketed table " + Warehouse.getQualifiedName(t) + " contains file " + + String msg = "Bucketed table " + MetaStoreUtils.getQualifiedName(t) + " contains file " + lfs.getPath() + " with non-standard name"; LOG.error(msg); throw new IllegalArgumentException(msg); } else { if (bmd.bucketId > BucketCodec.MAX_BUCKET_ID) { - String msg = "Bucketed table " + Warehouse.getQualifiedName(t) + " contains file " + + String msg = "Bucketed table " + MetaStoreUtils.getQualifiedName(t) + " contains file " + lfs.getPath() + " with bucketId=" + bmd.bucketId + " that is out of range"; LOG.error(msg); throw new IllegalArgumentException(msg); @@ -295,7 +290,7 @@ static void handleRenameFiles(Table t, Path p, boolean execute, Configuration co } } if(!deltaToFileMap.isEmpty()) { - println(pw, "#Begin file renames for bucketed table " + Warehouse.getQualifiedName(t)); + println(pw, "#Begin file renames for bucketed table " + MetaStoreUtils.getQualifiedName(t)); } for (Map.Entry> ent : deltaToFileMap.entrySet()) { /* create delta and move each files to it. HIVE-19750 ensures wer have reserved @@ -315,13 +310,13 @@ static void handleRenameFiles(Table t, Path p, boolean execute, Configuration co Path newFile = new Path(deltaDir, stripCopySuffix(file.getName())); LOG.debug("need to rename: " + file + " to " + newFile); if (fs.exists(newFile)) { - String msg = Warehouse.getQualifiedName(t) + ": " + newFile + " already exists?!"; + String msg = MetaStoreUtils.getQualifiedName(t) + ": " + newFile + " already exists?!"; LOG.error(msg); throw new IllegalStateException(msg); } if (execute) { if (!fs.rename(file, newFile)) { - String msg = Warehouse.getQualifiedName(t) + ": " + newFile + ": failed to rename"; + String msg = MetaStoreUtils.getQualifiedName(t) + ": " + newFile + ": failed to rename"; LOG.error(msg); throw new IllegalStateException(msg); } @@ -331,7 +326,7 @@ static void handleRenameFiles(Table t, Path p, boolean execute, Configuration co } } if(!deltaToFileMap.isEmpty()) { - println(pw, "#End file renames for bucketed table " + Warehouse.getQualifiedName(t)); + println(pw, "#End file renames for bucketed table " + MetaStoreUtils.getQualifiedName(t)); } return; } @@ -393,19 +388,19 @@ static void handleRenameFiles(Table t, Path p, boolean execute, Configuration co return; } if(!renames.isEmpty()) { - println(pw, "#Begin file renames for unbucketed table " + Warehouse.getQualifiedName(t)); + println(pw, "#Begin file renames for unbucketed table " + MetaStoreUtils.getQualifiedName(t)); } for(RenamePair renamePair : renames) { LOG.debug("need to rename: " + renamePair.getOldPath() + " to " + renamePair.getNewPath()); if (fs.exists(renamePair.getNewPath())) { - String msg = Warehouse.getQualifiedName(t) + ": " + renamePair.getNewPath() + + String msg = MetaStoreUtils.getQualifiedName(t) + ": " + renamePair.getNewPath() + " already exists?!"; LOG.error(msg); throw new IllegalStateException(msg); } if (execute) { if (!fs.rename(renamePair.getOldPath(), renamePair.getNewPath())) { - String msg = Warehouse.getQualifiedName(t) + ": " + renamePair.getNewPath() + + String msg = MetaStoreUtils.getQualifiedName(t) + ": " + renamePair.getNewPath() + ": failed to rename"; LOG.error(msg); throw new IllegalStateException(msg); @@ -415,7 +410,7 @@ static void handleRenameFiles(Table t, Path p, boolean execute, Configuration co makeRenameCommand(renamePair.getOldPath(), renamePair.getNewPath(), pw); } if(!renames.isEmpty()) { - println(pw, "#End file renames for unbucketed table " + Warehouse.getQualifiedName(t)); + println(pw, "#End file renames for unbucketed table " + MetaStoreUtils.getQualifiedName(t)); } } private static void makeRenameCommand(Path file, Path newFile, PrintWriter pw) { @@ -520,7 +515,7 @@ private static void processConversion(Table t, List convertToAcid, return; } //todo: are HBase, Druid talbes managed in 2.x? 3.0? - String fullTableName = Warehouse.getQualifiedName(t); + String fullTableName = MetaStoreUtils.getQualifiedName(t); /* * ORC uses table props for settings so things like bucketing, I/O Format, etc should * be the same for each partition. @@ -528,7 +523,7 @@ private static void processConversion(Table t, List convertToAcid, boolean canBeMadeAcid = canBeMadeAcid(fullTableName, t.getSd()); if(t.getPartitionKeysSize() <= 0) { if(canBeMadeAcid) { - convertToAcid.add("ALTER TABLE " + Warehouse.getQualifiedName(t) + " SET TBLPROPERTIES (" + + convertToAcid.add("ALTER TABLE " + MetaStoreUtils.getQualifiedName(t) + " SET TBLPROPERTIES (" + "'transactional'='true')"); //do this before alterTable in case files need to be renamed, else // TransactionalMetastoreListerner will squak @@ -539,7 +534,7 @@ private static void processConversion(Table t, List convertToAcid, } } else { - convertToMM.add("ALTER TABLE " + Warehouse.getQualifiedName(t) + " SET TBLPROPERTIES (" + + convertToMM.add("ALTER TABLE " + MetaStoreUtils.getQualifiedName(t) + " SET TBLPROPERTIES (" + "'transactional'='true', 'transactional_properties'='insert_only')"); if(execute) { alterTable(t, db, true); @@ -548,7 +543,7 @@ private static void processConversion(Table t, List convertToAcid, } else { if(!canBeMadeAcid) { - convertToMM.add("ALTER TABLE " + Warehouse.getQualifiedName(t) + " SET TBLPROPERTIES (" + + convertToMM.add("ALTER TABLE " + MetaStoreUtils.getQualifiedName(t) + " SET TBLPROPERTIES (" + "'transactional'='true', 'transactional_properties'='insert_only')"); if(execute) { alterTable(t, db, true); @@ -578,7 +573,7 @@ private static void processConversion(Table t, List convertToAcid, } } //if here, handled all parts and they are no wAcid compatible - make it acid - convertToAcid.add("ALTER TABLE " + Warehouse.getQualifiedName(t) + " SET TBLPROPERTIES (" + + convertToAcid.add("ALTER TABLE " + MetaStoreUtils.getQualifiedName(t) + " SET TBLPROPERTIES (" + "'transactional'='true')"); if(execute) { alterTable(t, db, false); @@ -657,7 +652,7 @@ private static boolean isFullAcidTable(Table t) { String transacationalValue = t.getParameters() .get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL); if (transacationalValue != null && "true".equalsIgnoreCase(transacationalValue)) { - System.out.println("Found Acid table: " + Warehouse.getQualifiedName(t)); + System.out.println("Found Acid table: " + MetaStoreUtils.getQualifiedName(t)); return true; } return false; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java index e108684660..031e1e326f 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java @@ -26,7 +26,7 @@ import junit.framework.TestCase; -import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FSDataInputStream; @@ -138,7 +138,7 @@ cols.add("key"); cols.add("value"); for (String src : srctables) { - db.dropTable(Warehouse.DEFAULT_DATABASE_NAME, src, true, true); + db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true); db.createTable(src, cols, null, TextInputFormat.class, HiveIgnoreKeyTextOutputFormat.class); db.loadTable(hadoopDataFile[i], src, LoadFileType.KEEP_EXISTING, @@ -494,7 +494,7 @@ private void executePlan() throws Exception { public void testMapPlan1() throws Exception { LOG.info("Beginning testMapPlan1"); - populateMapPlan1(db.getTable(Warehouse.DEFAULT_DATABASE_NAME, "src")); + populateMapPlan1(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "src")); executePlan(); fileDiff("lt100.txt.deflate", "mapplan1.out"); } @@ -502,7 +502,7 @@ public void testMapPlan1() throws Exception { public void testMapPlan2() throws Exception { LOG.info("Beginning testMapPlan2"); - populateMapPlan2(db.getTable(Warehouse.DEFAULT_DATABASE_NAME, "src")); + populateMapPlan2(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "src")); executePlan(); fileDiff("lt100.txt", "mapplan2.out"); } @@ -510,7 +510,7 @@ public void testMapPlan2() throws Exception { public void testMapRedPlan1() throws Exception { LOG.info("Beginning testMapRedPlan1"); - populateMapRedPlan1(db.getTable(Warehouse.DEFAULT_DATABASE_NAME, + populateMapRedPlan1(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "src")); executePlan(); fileDiff("kv1.val.sorted.txt", "mapredplan1.out"); @@ -519,7 +519,7 @@ public void testMapRedPlan1() throws Exception { public void testMapRedPlan2() throws Exception { LOG.info("Beginning testMapPlan2"); - populateMapRedPlan2(db.getTable(Warehouse.DEFAULT_DATABASE_NAME, + populateMapRedPlan2(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "src")); executePlan(); fileDiff("lt100.sorted.txt", "mapredplan2.out"); @@ -528,8 +528,8 @@ public void testMapRedPlan2() throws Exception { public void testMapRedPlan3() throws Exception { LOG.info("Beginning testMapPlan3"); - populateMapRedPlan3(db.getTable(Warehouse.DEFAULT_DATABASE_NAME, - "src"), db.getTable(Warehouse.DEFAULT_DATABASE_NAME, "src2")); + populateMapRedPlan3(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, + "src"), db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "src2")); executePlan(); fileDiff("kv1kv2.cogroup.txt", "mapredplan3.out"); } @@ -537,7 +537,7 @@ public void testMapRedPlan3() throws Exception { public void testMapRedPlan4() throws Exception { LOG.info("Beginning testMapPlan4"); - populateMapRedPlan4(db.getTable(Warehouse.DEFAULT_DATABASE_NAME, + populateMapRedPlan4(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "src")); executePlan(); fileDiff("kv1.string-sorted.txt", "mapredplan4.out"); @@ -546,7 +546,7 @@ public void testMapRedPlan4() throws Exception { public void testMapRedPlan5() throws Exception { LOG.info("Beginning testMapPlan5"); - populateMapRedPlan5(db.getTable(Warehouse.DEFAULT_DATABASE_NAME, + populateMapRedPlan5(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "src")); executePlan(); fileDiff("kv1.string-sorted.txt", "mapredplan5.out"); @@ -555,7 +555,7 @@ public void testMapRedPlan5() throws Exception { public void testMapRedPlan6() throws Exception { LOG.info("Beginning testMapPlan6"); - populateMapRedPlan6(db.getTable(Warehouse.DEFAULT_DATABASE_NAME, + populateMapRedPlan6(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "src")); executePlan(); fileDiff("lt100.sorted.txt", "mapredplan6.out"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index ca4d36f30d..461b56473f 100755 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.ql.metadata; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; import java.util.ArrayList; import java.util.Arrays; @@ -39,6 +39,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.StatsUtils; @@ -116,13 +117,13 @@ public void testTable() throws Throwable { // create a simple table and test create, drop, get String tableName = "table_for_testtable"; try { - hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName); + hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); } catch (HiveException e1) { e1.printStackTrace(); assertTrue("Unable to drop table", false); } - Table tbl = new Table(Warehouse.DEFAULT_DATABASE_NAME, tableName); + Table tbl = new Table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); List fields = tbl.getCols(); fields.add(new FieldSchema("col1", serdeConstants.INT_TYPE_NAME, "int -- first column")); @@ -182,9 +183,9 @@ public void testTable() throws Throwable { validateTable(tbl, tableName); try { - hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName, true, + hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, true, false); - Table ft2 = hm.getTable(Warehouse.DEFAULT_DATABASE_NAME, + Table ft2 = hm.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, false); assertNull("Unable to drop table ", ft2); } catch (HiveException e) { @@ -214,12 +215,12 @@ public void testThriftTable() throws Throwable { String tableName = "table_for_test_thrifttable"; try { try { - hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName); + hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); } catch (HiveException e1) { System.err.println(StringUtils.stringifyException(e1)); assertTrue("Unable to drop table", false); } - Table tbl = new Table(Warehouse.DEFAULT_DATABASE_NAME, tableName); + Table tbl = new Table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); tbl.setInputFormatClass(SequenceFileInputFormat.class.getName()); tbl.setOutputFormatClass(SequenceFileOutputFormat.class.getName()); tbl.setSerializationLib(ThriftDeserializer.class.getName()); @@ -306,7 +307,7 @@ private void validateTable(Table tbl, String tableName) throws MetaException { // (create table sets it to empty (non null) structures) tbl.getTTable().setPrivilegesIsSet(false); - ft = hm.getTable(Warehouse.DEFAULT_DATABASE_NAME, tableName); + ft = hm.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); assertNotNull("Unable to fetch table", ft); ft.checkValidity(hiveConf); assertEquals("Table names didn't match for table: " + tableName, tbl @@ -530,7 +531,7 @@ private void cleanUpTableQuietly(String dbName, String tableName) { * @throws Exception on failure. */ public void testDropPartitionsWithPurge() throws Exception { - String dbName = Warehouse.DEFAULT_DATABASE_NAME; + String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; String tableName = "table_for_testDropPartitionsWithPurge"; try { @@ -593,7 +594,7 @@ public void testDropPartitionsWithPurge() throws Exception { */ public void testAutoPurgeTablesAndPartitions() throws Throwable { - String dbName = Warehouse.DEFAULT_DATABASE_NAME; + String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; String tableName = "table_for_testAutoPurgeTablesAndPartitions"; try { @@ -647,7 +648,7 @@ public void testPartition() throws Throwable { try { String tableName = "table_for_testpartition"; try { - hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName); + hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); } catch (HiveException e) { System.err.println(StringUtils.stringifyException(e)); assertTrue("Unable to drop table: " + tableName, false); @@ -668,7 +669,7 @@ public void testPartition() throws Throwable { } Table tbl = null; try { - tbl = hm.getTable(Warehouse.DEFAULT_DATABASE_NAME, tableName); + tbl = hm.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); } catch (HiveException e) { System.err.println(StringUtils.stringifyException(e)); assertTrue("Unable to fetch table: " + tableName, false); @@ -683,7 +684,7 @@ public void testPartition() throws Throwable { System.err.println(StringUtils.stringifyException(e)); assertTrue("Unable to create parition for table: " + tableName, false); } - hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName); + hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); } catch (Throwable e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testPartition() failed"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java b/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java index 0fa1c81f1a..4cb1cef552 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java @@ -33,7 +33,7 @@ import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.conf.HiveConf; @@ -115,7 +115,7 @@ public void tearDown(){ @Test public void testgetDbName() throws Exception { //check that we start with default db - assertEquals(Warehouse.DEFAULT_DATABASE_NAME, + assertEquals(MetaStoreUtils.DEFAULT_DATABASE_NAME, SessionState.get().getCurrentDatabase()); final String newdb = "DB_2"; @@ -126,7 +126,7 @@ public void testgetDbName() throws Exception { //verify that a new sessionstate has default db SessionState.start(new HiveConf()); - assertEquals(Warehouse.DEFAULT_DATABASE_NAME, + assertEquals(MetaStoreUtils.DEFAULT_DATABASE_NAME, SessionState.get().getCurrentDatabase()); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AcidEventListener.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AcidEventListener.java index 52792471f6..fb7a544607 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AcidEventListener.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AcidEventListener.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hive.metastore.events.DropTableEvent; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; /** @@ -99,8 +100,8 @@ public void onAlterPartition(AlterPartitionEvent partitionEvent) throws MetaExc Partition oldPart = partitionEvent.getOldPartition(); Partition newPart = partitionEvent.getNewPartition(); Table t = partitionEvent.getTable(); - String oldPartName = Warehouse.makePartName(t.getPartitionKeys(), oldPart.getValues()); - String newPartName = Warehouse.makePartName(t.getPartitionKeys(), newPart.getValues()); + String oldPartName = MetaStoreUtils.makePartName(t.getPartitionKeys(), oldPart.getValues()); + String newPartName = MetaStoreUtils.makePartName(t.getPartitionKeys(), newPart.getValues()); if(!oldPartName.equals(newPartName)) { txnHandler = getTxnHandler(); txnHandler.onRename(t.getCatName(), t.getDbName(), t.getTableName(), oldPartName, diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 0441a33cd7..d450ab3856 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -61,7 +61,7 @@ import java.util.Map; import java.util.Map.Entry; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; @@ -615,7 +615,7 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str new_part, tbl, wh, false, true, environmentContext, false); } - String newPartName = Warehouse.makePartName(tbl.getPartitionKeys(), new_part.getValues()); + String newPartName = MetaStoreUtils.makePartName(tbl.getPartitionKeys(), new_part.getValues()); ColumnStatistics cs = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, oldPart.getValues(), oldPart.getSd().getCols(), tbl, new_part, null); msdb.alterPartition(catName, dbname, name, part_vals, new_part, validWriteIds); @@ -881,8 +881,8 @@ private ColumnStatistics updateOrGetPartitionColumnStats( if (newCols == null) { newCols = part.getSd() == null ? new ArrayList<>() : part.getSd().getCols(); } - String oldPartName = Warehouse.makePartName(table.getPartitionKeys(), partVals); - String newPartName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues()); + String oldPartName = MetaStoreUtils.makePartName(table.getPartitionKeys(), partVals); + String newPartName = MetaStoreUtils.makePartName(table.getPartitionKeys(), part.getValues()); boolean rename = !part.getDbName().equals(dbname) || !part.getTableName().equals(tblname) || !oldPartName.equals(newPartName); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 95b08eb6dd..52166aaf02 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -18,10 +18,10 @@ package org.apache.hadoop.hive.metastore; import static org.apache.commons.lang.StringUtils.join; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_COMMENT; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; -import static org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedTableName; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_COMMENT; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getCatalogQualifiedTableName; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.parseDbName; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.CAT_NAME; @@ -749,7 +749,7 @@ public static void createDefaultCatalog(RawStore ms, Warehouse wh) throws MetaEx } catch (NoSuchObjectException e) { Catalog cat = new Catalog(DEFAULT_CATALOG_NAME, wh.getWhRoot().toString()); - cat.setDescription(Warehouse.DEFAULT_CATALOG_COMMENT); + cat.setDescription(MetaStoreUtils.DEFAULT_CATALOG_COMMENT); ms.createCatalog(cat); } } @@ -3145,7 +3145,7 @@ private Partition append_partition_common(RawStore ms, String catName, String db firePreEvent(new PreAddPartitionEvent(tbl, part, this)); part.setSd(tbl.getSd().deepCopy()); - partLocation = new Path(tbl.getSd().getLocation(), Warehouse + partLocation = new Path(tbl.getSd().getLocation(), MetaStoreUtils .makePartName(tbl.getPartitionKeys(), part_vals)); part.getSd().setLocation(partLocation.toString()); @@ -3763,7 +3763,7 @@ private boolean createLocationForAddedPartition( // set default location if not specified and this is // a physical table partition (not a view) if (tbl.getSd().getLocation() != null) { - partLocation = new Path(tbl.getSd().getLocation(), Warehouse + partLocation = new Path(tbl.getSd().getLocation(), MetaStoreUtils .makePartName(tbl.getPartitionKeys(), part.getValues())); } } else { @@ -4016,9 +4016,9 @@ public Partition exchange_partition(Map partitionSpecs, " Their partitions cannot be exchanged."); } Path sourcePath = new Path(sourceTable.getSd().getLocation(), - Warehouse.makePartName(partitionKeysPresent, partValsPresent)); + MetaStoreUtils.makePartName(partitionKeysPresent, partValsPresent)); Path destPath = new Path(destinationTable.getSd().getLocation(), - Warehouse.makePartName(partitionKeysPresent, partValsPresent)); + MetaStoreUtils.makePartName(partitionKeysPresent, partValsPresent)); List destPartitions = new ArrayList<>(); Map transactionalListenerResponsesForAddPartition = Collections.emptyMap(); @@ -4031,7 +4031,7 @@ public Partition exchange_partition(Map partitionSpecs, if (destPartitionNames != null && !destPartitionNames.isEmpty()) { for (Partition partition : partitionsToExchange) { String partToExchangeName = - Warehouse.makePartName(destinationTable.getPartitionKeys(), partition.getValues()); + MetaStoreUtils.makePartName(destinationTable.getPartitionKeys(), partition.getValues()); if (destPartitionNames.contains(partToExchangeName)) { throw new MetaException("The partition " + partToExchangeName + " already exists in the table " + destTableName); @@ -4052,7 +4052,7 @@ public Partition exchange_partition(Map partitionSpecs, destPartition.setDbName(parsedDestDbName[DB_NAME]); destPartition.setTableName(destinationTable.getTableName()); Path destPartitionPath = new Path(destinationTable.getSd().getLocation(), - Warehouse.makePartName(destinationTable.getPartitionKeys(), partition.getValues())); + MetaStoreUtils.makePartName(destinationTable.getPartitionKeys(), partition.getValues())); destPartition.getSd().setLocation(destPartitionPath.toString()); ms.addPartition(destPartition); destPartitions.add(destPartition); @@ -5414,7 +5414,7 @@ public String get_config_value(String name, String defaultValue) throws MetaException, InvalidObjectException { Preconditions.checkArgument(t != null, "Table can not be null"); // Unescape the partition name - LinkedHashMap hm = Warehouse.makeSpecFromName(partName); + LinkedHashMap hm = MetaStoreUtils.makeSpecFromName(partName); List partVals = new ArrayList<>(); for (FieldSchema field : t.getPartitionKeys()) { @@ -5639,7 +5639,7 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n if (part_name.length() == 0) { return new ArrayList<>(); } - LinkedHashMap map = Warehouse.makeSpecFromName(part_name); + LinkedHashMap map = MetaStoreUtils.makeSpecFromName(part_name); List part_vals = new ArrayList<>(); part_vals.addAll(map.values()); return part_vals; @@ -5650,13 +5650,13 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n if (part_name.length() == 0) { return new HashMap<>(); } - return Warehouse.makeSpecFromName(part_name); + return MetaStoreUtils.makeSpecFromName(part_name); } private String lowerCaseConvertPartName(String partName) throws MetaException { if (partName == null) return partName; boolean isFirst = true; - Map partSpec = Warehouse.makeEscSpecFromName(partName); + Map partSpec = MetaStoreUtils.makeEscSpecFromName(partName); String convertedPartName = new String(); for (Map.Entry entry : partSpec.entrySet()) { @@ -6179,7 +6179,7 @@ private String getPartName(HiveObjectRef hiveObject) throws MetaException { getDefaultCatalog(conf); Table table = get_table_core(catName, hiveObject.getDbName(), hiveObject .getObjectName()); - partName = Warehouse + partName = MetaStoreUtils .makePartName(table.getPartitionKeys(), partValue); } catch (NoSuchObjectException e) { throw new MetaException(e.getMessage()); @@ -6617,7 +6617,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, return getMS().listPrincipalPartitionColumnGrantsAll(principalName, principalType); } Table tbl = get_table_core(catName, dbName, tableName); - String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); + String partName = MetaStoreUtils.makePartName(tbl.getPartitionKeys(), partValues); if (principalName == null) { return getMS().listPartitionColumnGrantsAll(catName, dbName, tableName, partName, columnName); } @@ -6662,7 +6662,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, return getMS().listPrincipalPartitionGrantsAll(principalName, principalType); } Table tbl = get_table_core(catName, dbName, tableName); - String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); + String partName = MetaStoreUtils.makePartName(tbl.getPartitionKeys(), partValues); if (principalName == null) { return getMS().listPartitionGrantsAll(catName, dbName, tableName, partName); } @@ -7345,7 +7345,7 @@ public void commit_txn(CommitTxnRequest rqst) throws TException { Table tbl = getTblObject(writeEventInfo.getDatabase(), writeEventInfo.getTable()); if (writeEventInfo.getPartition() != null && !writeEventInfo.getPartition().isEmpty()) { - partitionValue = Warehouse.getPartValuesFromPartName(writeEventInfo.getPartition()); + partitionValue = MetaStoreUtils.getPartValuesFromPartName(writeEventInfo.getPartition()); ptnObj = getPartitionObj(writeEventInfo.getDatabase(), writeEventInfo.getTable(), partitionValue, tbl); root = ptnObj.getSd().getLocation(); } else { @@ -7412,7 +7412,7 @@ private void addTxnWriteNotificationLog(Table tableObj, Partition ptnObj, WriteN throws MetaException { String partition = ""; //Empty string is an invalid partition name. Can be used for non partitioned table. if (ptnObj != null) { - partition = Warehouse.makePartName(tableObj.getPartitionKeys(), rqst.getPartitionVals()); + partition = MetaStoreUtils.makePartName(tableObj.getPartitionKeys(), rqst.getPartitionVals()); } AcidWriteEvent event = new AcidWriteEvent(partition, tableObj, ptnObj, rqst); getTxnHandler().addWriteNotificationLog(event); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 5ae00af564..15be2e5f75 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.metastore; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 91405b9a33..186abb0fd2 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -128,6 +128,7 @@ import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; import org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.ObjectPair; import org.apache.thrift.TException; @@ -689,7 +690,7 @@ Database getDatabase(String databaseName) /** * Get a database. * @param catalogName catalog name. Can be null, in which case - * {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed. + * {@link MetaStoreUtils#DEFAULT_CATALOG_NAME} will be assumed. * @param databaseName database name * @return the database object * @throws NoSuchObjectException No database with this name exists in the specified catalog @@ -1651,7 +1652,7 @@ void alter_table(String catName, String databaseName, String tblName, Table tabl /** * Create a new database. * @param db database object. If the catalog name is null it will be assumed to be - * {@link Warehouse#DEFAULT_CATALOG_NAME}. + * {@link MetaStoreUtils#DEFAULT_CATALOG_NAME}. * @throws InvalidObjectException There is something wrong with the database object. * @throws AlreadyExistsException There is already a database of this name in the specified * catalog. @@ -1707,7 +1708,7 @@ void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, bool /** * Drop a database. * @param catName Catalog name. This can be null, in which case - * {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed. + * {@link MetaStoreUtils#DEFAULT_CATALOG_NAME} will be assumed. * @param dbName database name. * @param deleteData whether to drop the underlying HDFS directory. * @param ignoreUnknownDb whether to ignore an attempt to drop a non-existant database @@ -1727,7 +1728,7 @@ void dropDatabase(String catName, String dbName, boolean deleteData, boolean ign * Drop a database. Equivalent to * {@link #dropDatabase(String, String, boolean, boolean, boolean)} with cascade = false. * @param catName Catalog name. This can be null, in which case - * {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed. + * {@link MetaStoreUtils#DEFAULT_CATALOG_NAME} will be assumed. * @param dbName database name. * @param deleteData whether to drop the underlying HDFS directory. * @param ignoreUnknownDb whether to ignore an attempt to drop a non-existant database @@ -1748,7 +1749,7 @@ default void dropDatabase(String catName, String dbName, boolean deleteData, * {@link #dropDatabase(String, String, boolean, boolean, boolean)} with deleteData = * true, ignoreUnknownDb = false, cascade = false. * @param catName Catalog name. This can be null, in which case - * {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed. + * {@link MetaStoreUtils#DEFAULT_CATALOG_NAME} will be assumed. * @param dbName database name. * @throws NoSuchObjectException No database of this name exists in the specified catalog and * ignoreUnknownDb is false. @@ -1776,7 +1777,7 @@ void alterDatabase(String name, Database db) /** * Alter a database. * @param catName Catalog name. This can be null, in which case - * {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed. + * {@link MetaStoreUtils#DEFAULT_CATALOG_NAME} will be assumed. * @param dbName database name. * @param newDb new database object. * @throws NoSuchObjectException No database of this name exists in the specified catalog. diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsRebuildLockHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsRebuildLockHandler.java index dd31226dca..91469b6ccf 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsRebuildLockHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsRebuildLockHandler.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hive.metastore.api.LockResponse; import org.apache.hadoop.hive.metastore.api.LockState; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import java.util.Iterator; import java.util.Map; @@ -77,7 +78,7 @@ public static MaterializationsRebuildLockHandler get() { */ public LockResponse lockResource(String dbName, String tableName, long txnId) { final ResourceLock prevResourceLock = locks.putIfAbsent( - Warehouse.getQualifiedName(dbName, tableName), + MetaStoreUtils.getQualifiedName(dbName, tableName), new ResourceLock(txnId, System.nanoTime(), State.ACQUIRED)); if (prevResourceLock != null) { return new LockResponse(txnId, LockState.NOT_ACQUIRED); @@ -94,7 +95,7 @@ public LockResponse lockResource(String dbName, String tableName, long txnId) { * to COMMIT_READY state, false otherwise */ public boolean readyToCommitResource(String dbName, String tableName, long txnId) { - final ResourceLock prevResourceLock = locks.get(Warehouse.getQualifiedName(dbName, tableName)); + final ResourceLock prevResourceLock = locks.get(MetaStoreUtils.getQualifiedName(dbName, tableName)); if (prevResourceLock == null || prevResourceLock.txnId != txnId) { // Lock was outdated and it was removed (then maybe another transaction picked it up) return false; @@ -110,7 +111,7 @@ public boolean readyToCommitResource(String dbName, String tableName, long txnId * @throws MetaException */ public boolean refreshLockResource(String dbName, String tableName, long txnId) { - final ResourceLock prevResourceLock = locks.get(Warehouse.getQualifiedName(dbName, tableName)); + final ResourceLock prevResourceLock = locks.get(MetaStoreUtils.getQualifiedName(dbName, tableName)); if (prevResourceLock == null || prevResourceLock.txnId != txnId || prevResourceLock.state.get() != State.ACQUIRED) { // Lock was outdated and it was removed (then maybe another transaction picked it up) @@ -130,7 +131,7 @@ public boolean refreshLockResource(String dbName, String tableName, long txnId) * @throws MetaException */ public boolean unlockResource(String dbName, String tableName, long txnId) { - final String fullyQualifiedName = Warehouse.getQualifiedName(dbName, tableName); + final String fullyQualifiedName = MetaStoreUtils.getQualifiedName(dbName, tableName); final ResourceLock prevResourceLock = locks.get(fullyQualifiedName); if (prevResourceLock == null || prevResourceLock.txnId != txnId) { return false; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 571c789edd..0ea991cfae 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -21,7 +21,7 @@ import static org.apache.commons.lang.StringUtils.join; import static org.apache.commons.lang.StringUtils.normalizeSpace; import static org.apache.commons.lang.StringUtils.repeat; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; import java.sql.Blob; import java.sql.Clob; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 8e2f94eb69..5bec8b9e0c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -2226,7 +2226,7 @@ private CreationMetadata convertToCreationMetadata( Set tablesUsed = new HashSet<>(); for (MTable mtbl : s.getTables()) { tablesUsed.add( - Warehouse.getQualifiedName( + MetaStoreUtils.getQualifiedName( mtbl.getDatabase().getName(), mtbl.getTableName())); } CreationMetadata r = new CreationMetadata(s.getCatalogName(), @@ -2490,7 +2490,7 @@ private MPartition getMPartition(String catName, String dbName, String tableName // Change the query to use part_vals instead of the name which is // redundant TODO: callers of this often get part_vals out of name for no reason... String name = - Warehouse.makePartName(convertToFieldSchemas(mtbl.getPartitionKeys()), part_vals); + MetaStoreUtils.makePartName(convertToFieldSchemas(mtbl.getPartitionKeys()), part_vals); result = getMPartition(catName, dbName, tableName, name); committed = commitTransaction(); } finally { @@ -2592,7 +2592,7 @@ private MPartition convertToMPart(Partition part, MTable mt, boolean useTableCD) msd = convertToMStorageDescriptor(part.getSd()); } - MPartition mpart = new MPartition(Warehouse.makePartName(convertToFieldSchemas(mt + MPartition mpart = new MPartition(MetaStoreUtils.makePartName(convertToFieldSchemas(mt .getPartitionKeys()), part.getValues()), mt, part.getValues(), part .getCreateTime(), part.getLastAccessTime(), msd, part.getParameters()); @@ -2846,7 +2846,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio parts.add(part); if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { - String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl + String partName = MetaStoreUtils.makePartName(this.convertToFieldSchemas(mtbl .getPartitionKeys()), part.getValues()); PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(catName, dbName, tblName, partName, userName, groupNames); @@ -2878,7 +2878,7 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN MTable mtbl = mpart.getTable(); part = convertToPart(mpart); if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { - String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl + String partName = MetaStoreUtils.makePartName(this.convertToFieldSchemas(mtbl .getPartitionKeys()), partVals); PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(catName, dbName, tblName, partName, user_name, group_names); @@ -3038,7 +3038,7 @@ private PartitionValuesResponse extractPartitionNamesByFilter( for (Partition partition : partitions) { // Check for NULL's just to be safe if (tbl.getPartitionKeys() != null && partition.getValues() != null) { - partitionNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), partition.getValues())); + partitionNames.add(MetaStoreUtils.makePartName(tbl.getPartitionKeys(), partition.getValues())); } } } @@ -3059,7 +3059,7 @@ private PartitionValuesResponse extractPartitionNamesByFilter( for (String partName : partitionNames) { ArrayList vals = new ArrayList(Collections.nCopies(tbl.getPartitionKeys().size(), null)); PartitionValuesRow row = new PartitionValuesRow(); - Warehouse.makeValsFromName(partName, vals); + MetaStoreUtils.makeValsFromName(partName, vals); for (String value : vals) { row.addToRow(value); } @@ -3280,7 +3280,7 @@ private Collection getPartitionPsQueryResults(String catName, String dbName, Str //set auth privileges if (null != userName && null != groupNames && "TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { - String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl + String partName = MetaStoreUtils.makePartName(this.convertToFieldSchemas(mtbl .getPartitionKeys()), part.getValues()); PrincipalPrivilegeSet partAuth = getPartitionPrivilegeSet(catName, db_name, tbl_name, partName, userName, groupNames); @@ -6229,7 +6229,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) hiveObject.getObjectName(), null); String partName = null; if (hiveObject.getPartValues() != null) { - partName = Warehouse.makePartName(tabObj.getPartitionKeys(), hiveObject.getPartValues()); + partName = MetaStoreUtils.makePartName(tabObj.getPartitionKeys(), hiveObject.getPartValues()); } List partitionGrants = this .listPrincipalMPartitionGrants(userName, principalType, @@ -6263,7 +6263,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) .getObjectName(), null); String partName = null; if (hiveObject.getPartValues() != null) { - partName = Warehouse.makePartName(tabObj.getPartitionKeys(), + partName = MetaStoreUtils.makePartName(tabObj.getPartitionKeys(), hiveObject.getPartValues()); } @@ -8332,7 +8332,7 @@ private void writeMTableColumnStatistics(Table table, MTableColumnStatistics mSt try { LOG.info("Updating table level column statistics for table={}" + - " colName={}", Warehouse.getCatalogQualifiedTableName(table), colName); + " colName={}", MetaStoreUtils.getCatalogQualifiedTableName(table), colName); validateTableCols(table, Lists.newArrayList(colName)); if (oldStats != null) { @@ -8758,7 +8758,7 @@ protected ColumnStatistics getJdoResult( // TODO: this could be improved to get partitions in bulk for (ColumnStatistics cs : allStats) { MPartition mpart = getMPartition(catName, dbName, tableName, - Warehouse.getPartValuesFromPartName(cs.getStatsDesc().getPartName())); + MetaStoreUtils.getPartValuesFromPartName(cs.getStatsDesc().getPartName())); if (mpart == null || !isCurrentStatsValidForTheQuery(mpart, writeIdList, false)) { if (mpart != null) { @@ -8845,7 +8845,7 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblNam // checking isolation-level-compliance of each partition column stats. for (String partName : partNames) { MPartition mpart = getMPartition( - catName, dbName, tblName, Warehouse.getPartValuesFromPartName(partName)); + catName, dbName, tblName, MetaStoreUtils.getPartValuesFromPartName(partName)); if (!isCurrentStatsValidForTheQuery(mpart, writeIdList, false)) { LOG.debug("The current metastore transactional partition column statistics " + "for " + dbName + "." + tblName + "." + mpart.getPartitionName() + " is not valid " + @@ -8997,7 +8997,7 @@ public boolean deletePartitionColumnStatistics(String catName, String dbName, St boolean ret = false; Query query = null; dbName = org.apache.commons.lang.StringUtils.defaultString(dbName, - Warehouse.DEFAULT_DATABASE_NAME); + MetaStoreUtils.DEFAULT_DATABASE_NAME); catName = normalizeIdentifier(catName); if (tableName == null) { throw new InvalidInputException("Table name is null."); @@ -9081,7 +9081,7 @@ public boolean deleteTableColumnStatistics(String catName, String dbName, String boolean ret = false; Query query = null; dbName = org.apache.commons.lang.StringUtils.defaultString(dbName, - Warehouse.DEFAULT_DATABASE_NAME); + MetaStoreUtils.DEFAULT_DATABASE_NAME); if (tableName == null) { throw new InvalidInputException("Table name is null."); } @@ -9517,7 +9517,7 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro public boolean doesPartitionExist(String catName, String dbName, String tableName, List partKeys, List partVals) throws MetaException { - String name = Warehouse.makePartName(partKeys, partVals); + String name = MetaStoreUtils.makePartName(partKeys, partVals); return this.getMPartition(catName, dbName, tableName, name) != null; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java index 7a0b21b258..4949ace2ce 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.metastore; import java.math.BigDecimal; -import java.math.BigInteger; import java.nio.ByteBuffer; import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; @@ -49,7 +48,7 @@ import org.apache.hadoop.hive.metastore.model.MTable; import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; /** * This class contains conversion logic that creates Thrift stat objects from diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java index 004acf8f12..015c6850fa 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hive.metastore.api.InitializeTableWriteIdsRequest; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -161,11 +160,11 @@ private void handleAlterTableTransactionalProp(PreAlterTableEvent context) throw || !"insert_only".equalsIgnoreCase(transactionalPropertiesValue); if (isFullAcid && !conformToAcid(newTable)) { throw new MetaException("The table must be stored using an ACID compliant " - + "format (such as ORC): " + Warehouse.getQualifiedName(newTable)); + + "format (such as ORC): " + MetaStoreUtils.getQualifiedName(newTable)); } if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) { - throw new MetaException(Warehouse.getQualifiedName(newTable) + + throw new MetaException(MetaStoreUtils.getQualifiedName(newTable) + " cannot be declared transactional because it's an external table"); } if (isFullAcid) { @@ -186,7 +185,7 @@ private void handleAlterTableTransactionalProp(PreAlterTableEvent context) throw // if here, there is attempt to set transactional to something other than 'true' // and NOT the same value it was before throw new MetaException("TBLPROPERTIES with 'transactional'='true' cannot be unset: " - + Warehouse.getQualifiedName(newTable)); + + MetaStoreUtils.getQualifiedName(newTable)); } if (isTransactionalPropertiesPresent) { @@ -226,7 +225,7 @@ private void checkSorted(Table newTable) throws MetaException { } StorageDescriptor sd = newTable.getSd(); if (sd.getSortCols() != null && sd.getSortCols().size() > 0) { - throw new MetaException("Table " + Warehouse.getQualifiedName(newTable) + throw new MetaException("Table " + MetaStoreUtils.getQualifiedName(newTable) + " cannot support full ACID functionality since it is sorted."); } } @@ -242,7 +241,7 @@ private void checkSorted(Table newTable) throws MetaException { private void makeAcid(Table newTable) throws MetaException { if(newTable.getParameters() != null && newTable.getParameters().containsKey(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL)) { - LOG.info("Could not make " + Warehouse.getQualifiedName(newTable) + " acid: already has " + + LOG.info("Could not make " + MetaStoreUtils.getQualifiedName(newTable) + " acid: already has " + hive_metastoreConstants.TABLE_IS_TRANSACTIONAL + "=" + newTable.getParameters().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL)); return; @@ -260,17 +259,17 @@ private void makeAcid(Table newTable) throws MetaException { if(makeAcid) { if(!conformToAcid(newTable)) { - LOG.info("Could not make " + Warehouse.getQualifiedName(newTable) + " acid: wrong IO format"); + LOG.info("Could not make " + MetaStoreUtils.getQualifiedName(newTable) + " acid: wrong IO format"); return; } if(!TableType.MANAGED_TABLE.toString().equalsIgnoreCase(newTable.getTableType())) { //todo should this check be in conformToAcid()? - LOG.info("Could not make " + Warehouse.getQualifiedName(newTable) + " acid: it's " + + LOG.info("Could not make " + MetaStoreUtils.getQualifiedName(newTable) + " acid: it's " + newTable.getTableType()); return; } if(newTable.getSd().getSortColsSize() > 0) { - LOG.info("Could not make " + Warehouse.getQualifiedName(newTable) + " acid: it's sorted"); + LOG.info("Could not make " + MetaStoreUtils.getQualifiedName(newTable) + " acid: it's sorted"); return; } //check if orc and not sorted @@ -280,7 +279,7 @@ private void makeAcid(Table newTable) throws MetaException { } parameters.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true"); newTable.setParameters(parameters); - LOG.info("Automatically chose to make " + Warehouse.getQualifiedName(newTable) + " acid."); + LOG.info("Automatically chose to make " + MetaStoreUtils.getQualifiedName(newTable) + " acid."); } } /** @@ -327,12 +326,12 @@ private void handleCreateTableTransactionalProp(PreCreateTableEvent context) thr // INSERT_ONLY tables don't have to conform to ACID requirement like ORC or bucketing if (transactionalProperties == null || !"insert_only".equalsIgnoreCase(transactionalProperties)) { throw new MetaException("The table must be stored using an ACID compliant format (such as ORC): " - + Warehouse.getQualifiedName(newTable)); + + MetaStoreUtils.getQualifiedName(newTable)); } } if (MetaStoreUtils.isExternalTable(newTable)) { - throw new MetaException(Warehouse.getQualifiedName(newTable) + + throw new MetaException(MetaStoreUtils.getQualifiedName(newTable) + " cannot be declared transactional because it's an external table"); } @@ -347,7 +346,7 @@ private void handleCreateTableTransactionalProp(PreCreateTableEvent context) thr } // transactional is found, but the value is not in expected range throw new MetaException("'transactional' property of TBLPROPERTIES may only have value 'true': " - + Warehouse.getQualifiedName(newTable)); + + MetaStoreUtils.getQualifiedName(newTable)); } /** @@ -379,7 +378,7 @@ public static boolean conformToAcid(Table table) throws MetaException { } } catch (ClassNotFoundException e) { LOG.warn("Could not verify InputFormat=" + sd.getInputFormat() + " or OutputFormat=" + - sd.getOutputFormat() + " for " + Warehouse.getQualifiedName(table)); + sd.getOutputFormat() + " for " + MetaStoreUtils.getQualifiedName(table)); return false; } @@ -406,7 +405,7 @@ private void initializeTransactionalProperties(Table table) throws MetaException String validationError = validateTransactionalProperties(tableTransactionalProperties); if (validationError != null) { throw new MetaException("Invalid transactional properties specified for " - + Warehouse.getQualifiedName(table) + " with the error " + validationError); + + MetaStoreUtils.getQualifiedName(table) + " with the error " + validationError); } break; } @@ -475,12 +474,12 @@ private void validateTableStructure(IHMSHandler hmsHandler, Table table) ); if (!validFile) { throw new IllegalStateException("Unexpected data file name format. Cannot convert " + - Warehouse.getQualifiedName(table) + " to transactional table. File: " + MetaStoreUtils.getQualifiedName(table) + " to transactional table. File: " + fileStatus.getPath()); } } } catch (IOException|NoSuchObjectException e) { - String msg = "Unable to list files for " + Warehouse.getQualifiedName(table); + String msg = "Unable to list files for " + MetaStoreUtils.getQualifiedName(table); LOG.error(msg, e); MetaException e1 = new MetaException(msg); e1.initCause(e); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java index da5a71cc64..2e58af62b7 100755 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -20,18 +20,14 @@ import java.io.FileNotFoundException; import java.io.IOException; -import java.util.AbstractList; import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; -import java.util.Set; import java.util.regex.Matcher; -import java.util.regex.Pattern; import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; @@ -51,7 +47,6 @@ import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.util.ReflectionUtils; @@ -60,12 +55,6 @@ * This class represents a warehouse where data of Hive tables is stored */ public class Warehouse { - public static final String DEFAULT_CATALOG_NAME = "hive"; - public static final String DEFAULT_CATALOG_COMMENT = "Default catalog, for Hive"; - public static final String DEFAULT_DATABASE_NAME = "default"; - public static final String DEFAULT_DATABASE_COMMENT = "Default Hive database"; - public static final String DEFAULT_SERIALIZATION_FORMAT = "1"; - public static final String DATABASE_WAREHOUSE_SUFFIX = ".db"; private static final String CAT_DB_TABLE_SEPARATOR = "."; private Path whRoot; @@ -108,44 +97,13 @@ private MetaStoreFS getMetaStoreFsHandler(Configuration conf) } - /** - * Helper functions to convert IOException to MetaException - */ - public static FileSystem getFs(Path f, Configuration conf) throws MetaException { - try { - return f.getFileSystem(conf); - } catch (IOException e) { - MetaStoreUtils.logAndThrowMetaException(e); - } - return null; - } - public FileSystem getFs(Path f) throws MetaException { - return getFs(f, conf); + return MetaStoreUtils.getFs(f, conf); } - /** - * Hadoop File System reverse lookups paths with raw ip addresses The File - * System URI always contains the canonical DNS name of the Namenode. - * Subsequently, operations on paths with raw ip addresses cause an exception - * since they don't match the file system URI. - * - * This routine solves this problem by replacing the scheme and authority of a - * path with the scheme and authority of the FileSystem that it maps to. - * - * @param path - * Path to be canonicalized - * @return Path with canonical scheme and authority - */ - public static Path getDnsPath(Path path, Configuration conf) throws MetaException { - FileSystem fs = getFs(path, conf); - return (new Path(fs.getUri().getScheme(), fs.getUri().getAuthority(), path - .toUri().getPath())); - } - public Path getDnsPath(Path path) throws MetaException { - return getDnsPath(path, conf); + return MetaStoreUtils.getDnsPath(path, conf); } /** @@ -190,8 +148,8 @@ public Path determineDatabasePath(Catalog cat, Database db) throws MetaException if (db.isSetLocationUri()) { return getDnsPath(new Path(db.getLocationUri())); } - if (cat == null || cat.getName().equalsIgnoreCase(DEFAULT_CATALOG_NAME)) { - if (db.getName().equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { + if (cat == null || cat.getName().equalsIgnoreCase(MetaStoreUtils.DEFAULT_CATALOG_NAME)) { + if (db.getName().equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) { return getWhRoot(); } else { return new Path(getWhRoot(), dbDirFromDbName(db)); @@ -202,7 +160,7 @@ public Path determineDatabasePath(Catalog cat, Database db) throws MetaException } private String dbDirFromDbName(Database db) throws MetaException { - return db.getName().toLowerCase() + DATABASE_WAREHOUSE_SUFFIX; + return db.getName().toLowerCase() + MetaStoreUtils.DATABASE_WAREHOUSE_SUFFIX; } /** @@ -214,8 +172,8 @@ private String dbDirFromDbName(Database db) throws MetaException { * file system. */ public Path getDatabasePath(Database db) throws MetaException { - if (db.getCatalogName().equalsIgnoreCase(DEFAULT_CATALOG_NAME) && - db.getName().equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { + if (db.getCatalogName().equalsIgnoreCase(MetaStoreUtils.DEFAULT_CATALOG_NAME) && + db.getName().equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) { return getWhRoot(); } return new Path(db.getLocationUri()); @@ -227,17 +185,17 @@ public Path getDefaultDatabasePath(String dbName) throws MetaException { // new database is being created. Once I have confirmation of this change calls of this to // getDatabasePath(), since it does the right thing. Also, merge this with // determineDatabasePath() as it duplicates much of the logic. - if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { + if (dbName.equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) { return getWhRoot(); } - return new Path(getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX); + return new Path(getWhRoot(), dbName.toLowerCase() + MetaStoreUtils.DATABASE_WAREHOUSE_SUFFIX); } public Path getDefaultExternalDatabasePath(String dbName) throws MetaException { - if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { + if (dbName.equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) { return getWhRootExternal(); } - return new Path(getWhRootExternal(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX); + return new Path(getWhRootExternal(), dbName.toLowerCase() + MetaStoreUtils.DATABASE_WAREHOUSE_SUFFIX); } private boolean hasExternalWarehouseRoot() { @@ -284,29 +242,6 @@ public Path getDefaultTablePath(Database db, Table table) throws MetaException { return getDefaultTablePath(db, table.getTableName(), MetaStoreUtils.isExternalTable(table)); } - @Deprecated // Use TableName - public static String getQualifiedName(Table table) { - return TableName.getDbTable(table.getDbName(), table.getTableName()); - } - - @Deprecated // Use TableName - public static String getQualifiedName(String dbName, String tableName) { - return TableName.getDbTable(dbName, tableName); - } - - public static String getQualifiedName(Partition partition) { - return partition.getDbName() + "." + partition.getTableName() + partition.getValues(); - } - - /** - * Get table name in cat.db.table format. - * @param table table object - * @return fully qualified name. - */ - public static String getCatalogQualifiedTableName(Table table) { - return TableName.getQualified(table.getCatName(), table.getDbName(), table.getTableName()); - } - public boolean mkdirs(Path f) throws MetaException { FileSystem fs; try { @@ -406,181 +341,6 @@ public boolean isWritable(Path path) throws IOException { } } - private static String escapePathName(String path) { - return FileUtils.escapePathName(path); - } - - private static String unescapePathName(String path) { - return FileUtils.unescapePathName(path); - } - - /** - * Given a partition specification, return the path corresponding to the - * partition spec. By default, the specification does not include dynamic partitions. - * @param spec - * @return string representation of the partition specification. - * @throws MetaException - */ - public static String makePartPath(Map spec) - throws MetaException { - return makePartName(spec, true); - } - - /** - * Makes a partition name from a specification - * @param spec - * @param addTrailingSeperator if true, adds a trailing separator e.g. 'ds=1/' - * @return partition name - * @throws MetaException - */ - public static String makePartName(Map spec, - boolean addTrailingSeperator) - throws MetaException { - StringBuilder suffixBuf = new StringBuilder(); - int i = 0; - for (Entry e : spec.entrySet()) { - if (e.getValue() == null || e.getValue().length() == 0) { - throw new MetaException("Partition spec is incorrect. " + spec); - } - if (i>0) { - suffixBuf.append(Path.SEPARATOR); - } - suffixBuf.append(escapePathName(e.getKey())); - suffixBuf.append('='); - suffixBuf.append(escapePathName(e.getValue())); - i++; - } - if (addTrailingSeperator) { - suffixBuf.append(Path.SEPARATOR); - } - return suffixBuf.toString(); - } - /** - * Given a dynamic partition specification, return the path corresponding to the - * static part of partition specification. This is basically a copy of makePartName - * but we get rid of MetaException since it is not serializable. - * @param spec - * @return string representation of the static part of the partition specification. - */ - public static String makeDynamicPartName(Map spec) { - StringBuilder suffixBuf = new StringBuilder(); - for (Entry e : spec.entrySet()) { - if (e.getValue() != null && e.getValue().length() > 0) { - suffixBuf.append(escapePathName(e.getKey())); - suffixBuf.append('='); - suffixBuf.append(escapePathName(e.getValue())); - suffixBuf.append(Path.SEPARATOR); - } else { // stop once we see a dynamic partition - break; - } - } - return suffixBuf.toString(); - } - - static final Pattern pat = Pattern.compile("([^/]+)=([^/]+)"); - - private static final Pattern slash = Pattern.compile("/"); - - /** - * Extracts values from partition name without the column names. - * @param name Partition name. - * @param result The result. Must be pre-sized to the expected number of columns. - */ - public static AbstractList makeValsFromName( - String name, AbstractList result) throws MetaException { - assert name != null; - String[] parts = slash.split(name, 0); - if (result == null) { - result = new ArrayList<>(parts.length); - for (int i = 0; i < parts.length; ++i) { - result.add(null); - } - } else if (parts.length != result.size()) { - throw new MetaException( - "Expected " + result.size() + " components, got " + parts.length + " (" + name + ")"); - } - for (int i = 0; i < parts.length; ++i) { - int eq = parts[i].indexOf('='); - if (eq <= 0) { - throw new MetaException("Unexpected component " + parts[i]); - } - result.set(i, unescapePathName(parts[i].substring(eq + 1))); - } - return result; - } - - public static LinkedHashMap makeSpecFromName(String name) - throws MetaException { - if (name == null || name.isEmpty()) { - throw new MetaException("Partition name is invalid. " + name); - } - LinkedHashMap partSpec = new LinkedHashMap<>(); - makeSpecFromName(partSpec, new Path(name), null); - return partSpec; - } - - public static boolean makeSpecFromName(Map partSpec, Path currPath, - Set requiredKeys) { - List kvs = new ArrayList<>(); - do { - String component = currPath.getName(); - Matcher m = pat.matcher(component); - if (m.matches()) { - String k = unescapePathName(m.group(1)); - String v = unescapePathName(m.group(2)); - String[] kv = new String[2]; - kv[0] = k; - kv[1] = v; - kvs.add(kv); - } - currPath = currPath.getParent(); - } while (currPath != null && !currPath.getName().isEmpty()); - - // reverse the list since we checked the part from leaf dir to table's base dir - for (int i = kvs.size(); i > 0; i--) { - String key = kvs.get(i - 1)[0]; - if (requiredKeys != null) { - requiredKeys.remove(key); - } - partSpec.put(key, kvs.get(i - 1)[1]); - } - if (requiredKeys == null || requiredKeys.isEmpty()) return true; - LOG.warn("Cannot create partition spec from " + currPath + "; missing keys " + requiredKeys); - return false; - } - - public static Map makeEscSpecFromName(String name) throws MetaException { - - if (name == null || name.isEmpty()) { - throw new MetaException("Partition name is invalid. " + name); - } - LinkedHashMap partSpec = new LinkedHashMap<>(); - - Path currPath = new Path(name); - - List kvs = new ArrayList<>(); - do { - String component = currPath.getName(); - Matcher m = pat.matcher(component); - if (m.matches()) { - String k = m.group(1); - String v = m.group(2); - String[] kv = new String[2]; - kv[0] = k; - kv[1] = v; - kvs.add(kv); - } - currPath = currPath.getParent(); - } while (currPath != null && !currPath.getName().isEmpty()); - - // reverse the list since we checked the part from leaf dir to table's base dir - for (int i = kvs.size(); i > 0; i--) { - partSpec.put(kvs.get(i - 1)[0], kvs.get(i - 1)[1]); - } - - return partSpec; - } - /** * Returns the default partition path of a table within a given database and partition key value * pairs. It uses the database location and appends it the table name and the partition key,value @@ -607,7 +367,7 @@ public Path getDefaultPartitionPath(Database db, Table table, */ public Path getPartitionPath(Path tblPath, Map pm) throws MetaException { - return new Path(tblPath, makePartPath(pm)); + return new Path(tblPath, MetaStoreUtils.makePartPath(pm)); } /** @@ -658,11 +418,6 @@ public boolean isDir(Path f) throws MetaException { return true; } - public static String makePartName(List partCols, - List vals) throws MetaException { - return makePartName(partCols, vals, null); - } - /** * @param desc * @return array of FileStatus objects corresponding to the files @@ -708,49 +463,4 @@ public static String makePartName(List partCols, return null; } - /** - * Makes a valid partition name. - * @param partCols The partition columns - * @param vals The partition values - * @param defaultStr - * The default name given to a partition value if the respective value is empty or null. - * @return An escaped, valid partition name. - * @throws MetaException - */ - public static String makePartName(List partCols, - List vals, String defaultStr) throws MetaException { - if ((partCols.size() != vals.size()) || (partCols.size() == 0)) { - String errorStr = "Invalid partition key & values; keys ["; - for (FieldSchema fs : partCols) { - errorStr += (fs.getName() + ", "); - } - errorStr += "], values ["; - for (String val : vals) { - errorStr += (val + ", "); - } - throw new MetaException(errorStr + "]"); - } - List colNames = new ArrayList<>(); - for (FieldSchema col: partCols) { - colNames.add(col.getName()); - } - return FileUtils.makePartName(colNames, vals, defaultStr); - } - - public static List getPartValuesFromPartName(String partName) - throws MetaException { - LinkedHashMap partSpec = Warehouse.makeSpecFromName(partName); - List values = new ArrayList<>(); - values.addAll(partSpec.values()); - return values; - } - - public static Map makeSpecFromValues(List partCols, - List values) { - Map spec = new LinkedHashMap<>(); - for (int i = 0; i < values.size(); i++) { - spec.put(partCols.get(i).getName(), values.get(i)); - } - return spec; - } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 0445cbf909..45f658eb8b 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -48,7 +48,6 @@ import org.apache.hadoop.hive.metastore.PartitionExpressionProxy; import org.apache.hadoop.hive.metastore.RawStore; import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.cache.SharedCache.StatsType; import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator; @@ -69,7 +68,7 @@ import com.google.common.annotations.VisibleForTesting; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; @@ -245,7 +244,7 @@ static void prewarm(RawStore rawStore) { Deadline.stopTimer(); List partNames = new ArrayList<>(partitions.size()); for (Partition p : partitions) { - partNames.add(Warehouse.makePartName(table.getPartitionKeys(), p.getValues())); + partNames.add(MetaStoreUtils.makePartName(table.getPartitionKeys(), p.getValues())); } if (!partNames.isEmpty()) { // Get partition column stats for this table @@ -1183,7 +1182,7 @@ public void updateCreationMetadata(String catName, String dbname, String tablena int count = 0; for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, max_parts)) { if (max_parts == -1 || count < max_parts) { - partitionNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), part.getValues())); + partitionNames.add(MetaStoreUtils.makePartName(tbl.getPartitionKeys(), part.getValues())); } } return partitionNames; @@ -1236,7 +1235,7 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, StringUtils.normalizeIdentifier(table.getDbName()), StringUtils.normalizeIdentifier(table.getTableName()), maxParts); for (Partition part : parts) { - result.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues())); + result.add(MetaStoreUtils.makePartName(table.getPartitionKeys(), part.getValues())); } if (defaultPartName == null || defaultPartName.isEmpty()) { defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); @@ -1519,7 +1518,7 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN } Partition p = sharedCache.getPartitionFromCache(catName, dbName, tblName, partVals); if (p != null) { - String partName = Warehouse.makePartName(table.getPartitionKeys(), partVals); + String partName = MetaStoreUtils.makePartName(table.getPartitionKeys(), partVals); PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(catName, dbName, tblName, partName, userName, groupNames); p.setPrivileges(privs); @@ -1546,7 +1545,7 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN int count = 0; for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) { if (maxParts == -1 || count < maxParts) { - String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues()); + String partName = MetaStoreUtils.makePartName(table.getPartitionKeys(), part.getValues()); PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(catName, dbName, tblName, partName, userName, groupNames); part.setPrivileges(privs); @@ -1588,7 +1587,7 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN continue; } if (maxParts == -1 || count < maxParts) { - partNames.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues())); + partNames.add(MetaStoreUtils.makePartName(table.getPartitionKeys(), part.getValues())); count++; } } @@ -1628,7 +1627,7 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN continue; } if (maxParts == -1 || count < maxParts) { - String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues()); + String partName = MetaStoreUtils.makePartName(table.getPartitionKeys(), part.getValues()); PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(catName, dbName, tblName, partName, userName, groupNames); part.setPrivileges(privs); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java index 37c300e882..c8bae72ba4 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hive.metastore.StatObjectConverter; import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.Catalog; @@ -48,6 +47,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.StringUtils; import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator; import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator.ObjectEstimator; @@ -577,7 +577,7 @@ public void refreshPartitionColStats(List partitionColStats) { } List partVal; try { - partVal = Warehouse.makeValsFromName(cs.getStatsDesc().getPartName(), null); + partVal = MetaStoreUtils.makeValsFromName(cs.getStatsDesc().getPartName(), null); List colStatsObjs = cs.getStatsObj(); for (ColumnStatisticsObj colStatObj : colStatsObjs) { if (isPartitionColStatsCacheDirty.compareAndSet(true, false)) { @@ -1068,7 +1068,7 @@ public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, for (ColumnStatistics cs : partitionColStats) { List partVal; try { - partVal = Warehouse.makeValsFromName(cs.getStatsDesc().getPartName(), null); + partVal = MetaStoreUtils.makeValsFromName(cs.getStatsDesc().getPartName(), null); List colStats = cs.getStatsObj(); if (!tblWrapper.updatePartitionColStats(partVal, colStats)) { return false; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java index 2e32cbf3c4..b48f46a07c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java @@ -17,9 +17,7 @@ */ package org.apache.hadoop.hive.metastore.client.builder; -import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; @@ -43,7 +41,7 @@ protected ConstraintBuilder() { nextSeq = 1; enable = true; validate = rely = false; - dbName = Warehouse.DEFAULT_DATABASE_NAME; + dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; columns = new ArrayList<>(); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java index c4c09dcd4f..5664afc1f7 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java @@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.FunctionType; @@ -54,7 +53,7 @@ public FunctionBuilder() { createTime = (int) (System.currentTimeMillis() / 1000); funcType = FunctionType.JAVA; resourceUris = new ArrayList<>(); - dbName = Warehouse.DEFAULT_DATABASE_NAME; + dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; } public FunctionBuilder setCatName(String catName) { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java index f61a62c2e3..1ee9b34f6b 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hive.metastore.client.builder; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.ISchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.SchemaCompatibility; import org.apache.hadoop.hive.metastore.api.SchemaType; import org.apache.hadoop.hive.metastore.api.SchemaValidation; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; public class ISchemaBuilder { private SchemaType schemaType; // required @@ -39,8 +39,8 @@ public ISchemaBuilder() { compatibility = SchemaCompatibility.BACKWARD; validationLevel = SchemaValidation.ALL; canEvolve = true; - dbName = Warehouse.DEFAULT_DATABASE_NAME; - catName = Warehouse.DEFAULT_CATALOG_NAME; + dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; + catName = MetaStoreUtils.DEFAULT_CATALOG_NAME; } public ISchemaBuilder setSchemaType(SchemaType schemaType) { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java index d6ee6739f8..187ba59b80 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java @@ -19,7 +19,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; @@ -45,7 +44,7 @@ public PartitionBuilder() { // Set some reasonable defaults partParams = new HashMap<>(); createTime = lastAccessTime = (int)(System.currentTimeMillis() / 1000); - dbName = Warehouse.DEFAULT_DATABASE_NAME; + dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; super.setChild(this); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java index f5adda1ecd..e7abc4dfb8 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java @@ -18,10 +18,10 @@ package org.apache.hadoop.hive.metastore.client.builder; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import java.util.ArrayList; import java.util.List; @@ -40,7 +40,7 @@ public SQLForeignKeyBuilder() { super.setChild(this); updateRule = deleteRule = 0; pkColumns = new ArrayList<>(); - pkDb = Warehouse.DEFAULT_DATABASE_NAME; + pkDb = MetaStoreUtils.DEFAULT_DATABASE_NAME; } public SQLForeignKeyBuilder setPkDb(String pkDb) { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java index 521be3e383..fcbc02e649 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java @@ -23,8 +23,8 @@ import org.apache.hadoop.hive.metastore.api.SchemaVersion; import org.apache.hadoop.hive.metastore.api.SchemaVersionState; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; public class SchemaVersionBuilder extends SerdeAndColsBuilder { private String schemaName, dbName, catName; // required diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java index fed3dda809..86f6592dc2 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.Database; @@ -58,7 +57,7 @@ public TableBuilder() { // Set some reasonable defaults - dbName = Warehouse.DEFAULT_DATABASE_NAME; + dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; tableParams = new HashMap<>(); createTime = lastAccessTime = (int)(System.currentTimeMillis() / 1000); retention = 0; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java index 32267b44eb..914ef64fd6 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java @@ -27,7 +27,6 @@ import org.antlr.runtime.CharStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.ColumnType; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; @@ -493,7 +492,7 @@ private static void makeFilterForEquals(String keyName, String value, String par // If a partition has multiple partition keys, we make the assumption that // makePartName with one key will return a substring of the name made // with both all the keys. - String escapedNameFragment = Warehouse.makePartName(partKeyToVal, false); + String escapedNameFragment = MetaStoreUtils.makePartName(partKeyToVal, false); if (keyCount == 1) { // Case where this is no other partition columns params.put(paramName, escapedNameFragment); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java index 91d790aa64..cd3d2dfd30 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java @@ -26,7 +26,7 @@ import java.util.List; import java.util.Map; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; /** * Implementation of PartitionSpecProxy that composes a list of PartitionSpecProxy. diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index 015dfd9ec9..9fd1c29622 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -62,7 +62,6 @@ import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.common.classification.RetrySemantics; import org.apache.hadoop.hive.metastore.DatabaseProduct; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.MetaStoreListenerNotifier; import org.apache.hadoop.hive.metastore.TransactionalMetaStoreEventListener; import org.apache.hadoop.hive.metastore.api.*; @@ -81,6 +80,7 @@ import org.apache.hadoop.hive.metastore.tools.SQLGenerator; import org.apache.hadoop.hive.metastore.utils.JavaUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.StringableMap; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; @@ -1769,7 +1769,7 @@ public LockResponse lockMaterializationRebuild(String dbName, String tableName, throws MetaException { if (LOG.isDebugEnabled()) { - LOG.debug("Acquiring lock for materialization rebuild with txnId={} for {}", txnId, Warehouse.getQualifiedName(dbName,tableName)); + LOG.debug("Acquiring lock for materialization rebuild with txnId={} for {}", txnId, MetaStoreUtils.getQualifiedName(dbName,tableName)); } TxnStore.MutexAPI.LockHandle handle = null; @@ -1839,7 +1839,7 @@ public boolean heartbeatLockMaterializationRebuild(String dbName, String tableNa if (rc < 1) { LOG.debug("Going to rollback"); dbConn.rollback(); - LOG.info("No lock found for rebuild of " + Warehouse.getQualifiedName(dbName, tableName) + + LOG.info("No lock found for rebuild of " + MetaStoreUtils.getQualifiedName(dbName, tableName) + " when trying to heartbeat"); // It could not be renewed, return that information return false; @@ -1852,7 +1852,7 @@ public boolean heartbeatLockMaterializationRebuild(String dbName, String tableNa LOG.debug("Going to rollback"); rollbackDBConn(dbConn); checkRetryable(dbConn, e, - "heartbeatLockMaterializationRebuild(" + Warehouse.getQualifiedName(dbName, tableName) + ", " + txnId + ")"); + "heartbeatLockMaterializationRebuild(" + MetaStoreUtils.getQualifiedName(dbName, tableName) + ", " + txnId + ")"); throw new MetaException("Unable to heartbeat rebuild lock due to " + StringUtils.stringifyException(e)); } finally { @@ -3009,7 +3009,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, while (partitionIterator.hasNext()) { Partition p = partitionIterator.next(); partVals = p.getValues(); - partName = Warehouse.makePartName(partCols, partVals); + partName = MetaStoreUtils.makePartName(partCols, partVals); buff.append("delete from TXN_COMPONENTS where tc_database='"); buff.append(dbName); @@ -4304,7 +4304,7 @@ private void ensureAllTxnsValid(String dbName, String tblName, List txnIds boolean isAborted = false; StringBuilder errorMsg = new StringBuilder(); errorMsg.append("Write ID allocation on ") - .append(Warehouse.getQualifiedName(dbName, tblName)) + .append(MetaStoreUtils.getQualifiedName(dbName, tblName)) .append(" failed for input txns: "); for (String query : queries) { LOG.debug("Going to execute query <" + query + ">"); @@ -4323,7 +4323,7 @@ private void ensureAllTxnsValid(String dbName, String tblName, List txnIds if (isAborted || isCommitted) { LOG.error(errorMsg.toString()); throw new IllegalStateException("Write ID allocation failed on " - + Warehouse.getQualifiedName(dbName, tblName) + + MetaStoreUtils.getQualifiedName(dbName, tblName) + " as not all input txns in open state"); } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java index 5233bee592..dfb06cae9c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java @@ -17,24 +17,23 @@ */ package org.apache.hadoop.hive.metastore.utils; -import org.apache.hadoop.hive.metastore.api.WMPoolSchedulingPolicy; - import com.google.common.base.Joiner; - -import org.apache.hadoop.conf.Configuration; import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.ColumnType; import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.WMPoolSchedulingPolicy; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; @@ -43,18 +42,21 @@ import org.slf4j.LoggerFactory; import javax.annotation.Nullable; - import java.io.File; +import java.io.IOException; import java.net.URL; import java.net.URLClassLoader; import java.text.DateFormat; import java.text.SimpleDateFormat; +import java.util.AbstractList; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.Set; import java.util.TimeZone; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -73,6 +75,13 @@ protected DateFormat initialValue() { }; // Indicates a type was derived from the deserializer rather than Hive's metadata. public static final String TYPE_FROM_DESERIALIZER = ""; + public static final String DEFAULT_CATALOG_NAME = "hive"; + public static final String DEFAULT_CATALOG_COMMENT = "Default catalog, for Hive"; + public static final String DEFAULT_DATABASE_NAME = "default"; + public static final String DEFAULT_DATABASE_COMMENT = "Default Hive database"; + public static final String DEFAULT_SERIALIZATION_FORMAT = "1"; + public static final String DATABASE_WAREHOUSE_SUFFIX = ".db"; + public static final Pattern pat = Pattern.compile("([^/]+)=([^/]+)"); private static final Logger LOG = LoggerFactory.getLogger(MetaStoreUtils.class); @@ -107,6 +116,7 @@ protected DateFormat initialValue() { // configuration parameter documentation // HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES in HiveConf as well. private static final char[] specialCharactersInTableNames = new char[] { '/' }; + private static final Pattern slash = Pattern.compile("/"); /** * Catches exceptions that can't be handled and bundles them to MetaException @@ -330,7 +340,7 @@ public static String makePartNameMatcher(Table table, List partVals) thr // or a regex of the form ".*" // This works because the "=" and "/" separating key names and partition key/values // are not escaped. - String partNameMatcher = Warehouse.makePartName(partCols, partVals, ".*"); + String partNameMatcher = makePartName(partCols, partVals, ".*"); // add ".*" to the regex to match anything else afterwards the partial spec. if (partVals.size() < numPartKeys) { partNameMatcher += ".*"; @@ -932,11 +942,287 @@ public static String prependCatalogToDbName(String dbName, Configuration conf) { public static String getDefaultCatalog(Configuration conf) { if (conf == null) { LOG.warn("Configuration is null, so going with default catalog."); - return Warehouse.DEFAULT_CATALOG_NAME; + return DEFAULT_CATALOG_NAME; } String catName = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CATALOG_DEFAULT); - if (catName == null || "".equals(catName)) catName = Warehouse.DEFAULT_CATALOG_NAME; + if (catName == null || "".equals(catName)) catName = DEFAULT_CATALOG_NAME; return catName; } + @Deprecated // Use TableName + public static String getQualifiedName(Table table) { + return TableName.getDbTable(table.getDbName(), table.getTableName()); + } + + @Deprecated // Use TableName + public static String getQualifiedName(String dbName, String tableName) { + return TableName.getDbTable(dbName, tableName); + } + + public static String getQualifiedName(Partition partition) { + return partition.getDbName() + "." + partition.getTableName() + partition.getValues(); + } + + /** + * Get table name in cat.db.table format. + * @param table table object + * @return fully qualified name. + */ + public static String getCatalogQualifiedTableName(Table table) { + return TableName.getQualified(table.getCatName(), table.getDbName(), table.getTableName()); + } + + public static String makePartName(List partCols, + List vals) throws MetaException { + return makePartName(partCols, vals, null); + } + + /** + * Makes a valid partition name. + * @param partCols The partition columns + * @param vals The partition values + * @param defaultStr + * The default name given to a partition value if the respective value is empty or null. + * @return An escaped, valid partition name. + * @throws MetaException + */ + public static String makePartName(List partCols, + List vals, String defaultStr) throws MetaException { + if ((partCols.size() != vals.size()) || (partCols.size() == 0)) { + String errorStr = "Invalid partition key & values; keys ["; + for (FieldSchema fs : partCols) { + errorStr += (fs.getName() + ", "); + } + errorStr += "], values ["; + for (String val : vals) { + errorStr += (val + ", "); + } + throw new MetaException(errorStr + "]"); + } + List colNames = new ArrayList<>(); + for (FieldSchema col: partCols) { + colNames.add(col.getName()); + } + return FileUtils.makePartName(colNames, vals, defaultStr); + } + + public static String unescapePathName(String path) { + return FileUtils.unescapePathName(path); + } + + /** + * Extracts values from partition name without the column names. + * @param name Partition name. + * @param result The result. Must be pre-sized to the expected number of columns. + */ + public static AbstractList makeValsFromName( + String name, AbstractList result) throws MetaException { + assert name != null; + String[] parts = slash.split(name, 0); + if (result == null) { + result = new ArrayList<>(parts.length); + for (int i = 0; i < parts.length; ++i) { + result.add(null); + } + } else if (parts.length != result.size()) { + throw new MetaException( + "Expected " + result.size() + " components, got " + parts.length + " (" + name + ")"); + } + for (int i = 0; i < parts.length; ++i) { + int eq = parts[i].indexOf('='); + if (eq <= 0) { + throw new MetaException("Unexpected component " + parts[i]); + } + result.set(i, unescapePathName(parts[i].substring(eq + 1))); + } + return result; + } + + public static LinkedHashMap makeSpecFromName(String name) + throws MetaException { + if (name == null || name.isEmpty()) { + throw new MetaException("Partition name is invalid. " + name); + } + LinkedHashMap partSpec = new LinkedHashMap<>(); + makeSpecFromName(partSpec, new Path(name), null); + return partSpec; + } + + public static boolean makeSpecFromName(Map partSpec, Path currPath, + Set requiredKeys) { + List kvs = new ArrayList<>(); + do { + String component = currPath.getName(); + Matcher m = pat.matcher(component); + if (m.matches()) { + String k = unescapePathName(m.group(1)); + String v = unescapePathName(m.group(2)); + String[] kv = new String[2]; + kv[0] = k; + kv[1] = v; + kvs.add(kv); + } + currPath = currPath.getParent(); + } while (currPath != null && !currPath.getName().isEmpty()); + + // reverse the list since we checked the part from leaf dir to table's base dir + for (int i = kvs.size(); i > 0; i--) { + String key = kvs.get(i - 1)[0]; + if (requiredKeys != null) { + requiredKeys.remove(key); + } + partSpec.put(key, kvs.get(i - 1)[1]); + } + if (requiredKeys == null || requiredKeys.isEmpty()) return true; + LOG.warn("Cannot create partition spec from " + currPath + "; missing keys " + requiredKeys); + return false; + } + + public static String escapePathName(String path) { + return FileUtils.escapePathName(path); + } + + /** + * Given a partition specification, return the path corresponding to the + * partition spec. By default, the specification does not include dynamic partitions. + * @param spec + * @return string representation of the partition specification. + * @throws MetaException + */ + public static String makePartPath(Map spec) + throws MetaException { + return makePartName(spec, true); + } + + /** + * Makes a partition name from a specification + * @param spec + * @param addTrailingSeperator if true, adds a trailing separator e.g. 'ds=1/' + * @return partition name + * @throws MetaException + */ + public static String makePartName(Map spec, + boolean addTrailingSeperator) + throws MetaException { + StringBuilder suffixBuf = new StringBuilder(); + int i = 0; + for (Map.Entry e : spec.entrySet()) { + if (e.getValue() == null || e.getValue().length() == 0) { + throw new MetaException("Partition spec is incorrect. " + spec); + } + if (i>0) { + suffixBuf.append(Path.SEPARATOR); + } + suffixBuf.append(escapePathName(e.getKey())); + suffixBuf.append('='); + suffixBuf.append(escapePathName(e.getValue())); + i++; + } + if (addTrailingSeperator) { + suffixBuf.append(Path.SEPARATOR); + } + return suffixBuf.toString(); + } + + /** + * Helper functions to convert IOException to MetaException + */ + public static FileSystem getFs(Path f, Configuration conf) throws MetaException { + try { + return f.getFileSystem(conf); + } catch (IOException e) { + logAndThrowMetaException(e); + } + return null; + } + + /** + * Hadoop File System reverse lookups paths with raw ip addresses The File + * System URI always contains the canonical DNS name of the Namenode. + * Subsequently, operations on paths with raw ip addresses cause an exception + * since they don't match the file system URI. + * + * This routine solves this problem by replacing the scheme and authority of a + * path with the scheme and authority of the FileSystem that it maps to. + * + * @param path + * Path to be canonicalized + * @return Path with canonical scheme and authority + */ + public static Path getDnsPath(Path path, Configuration conf) throws MetaException { + FileSystem fs = getFs(path, conf); + return (new Path(fs.getUri().getScheme(), fs.getUri().getAuthority(), path + .toUri().getPath())); + } + + /** + * Given a dynamic partition specification, return the path corresponding to the + * static part of partition specification. This is basically a copy of makePartName + * but we get rid of MetaException since it is not serializable. + * @param spec + * @return string representation of the static part of the partition specification. + */ + public static String makeDynamicPartName(Map spec) { + StringBuilder suffixBuf = new StringBuilder(); + for (Map.Entry e : spec.entrySet()) { + if (e.getValue() != null && e.getValue().length() > 0) { + suffixBuf.append(escapePathName(e.getKey())); + suffixBuf.append('='); + suffixBuf.append(escapePathName(e.getValue())); + suffixBuf.append(Path.SEPARATOR); + } else { // stop once we see a dynamic partition + break; + } + } + return suffixBuf.toString(); + } + + public static Map makeSpecFromValues(List partCols, + List values) { + Map spec = new LinkedHashMap<>(); + for (int i = 0; i < values.size(); i++) { + spec.put(partCols.get(i).getName(), values.get(i)); + } + return spec; + } + + public static Map makeEscSpecFromName(String name) throws MetaException { + + if (name == null || name.isEmpty()) { + throw new MetaException("Partition name is invalid. " + name); + } + LinkedHashMap partSpec = new LinkedHashMap<>(); + + Path currPath = new Path(name); + + List kvs = new ArrayList<>(); + do { + String component = currPath.getName(); + Matcher m = pat.matcher(component); + if (m.matches()) { + String k = m.group(1); + String v = m.group(2); + String[] kv = new String[2]; + kv[0] = k; + kv[1] = v; + kvs.add(kv); + } + currPath = currPath.getParent(); + } while (currPath != null && !currPath.getName().isEmpty()); + + // reverse the list since we checked the part from leaf dir to table's base dir + for (int i = kvs.size(); i > 0; i--) { + partSpec.put(kvs.get(i - 1)[0], kvs.get(i - 1)[1]); + } + + return partSpec; + } + + public static List getPartValuesFromPartName(String partName) + throws MetaException { + LinkedHashMap partSpec = makeSpecFromName(partName); + List values = new ArrayList<>(); + values.addAll(partSpec.values()); + return values; + } } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index 35abd006d4..6273e2006b 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hive.metastore; import static org.apache.hadoop.hive.metastore.HiveMetaStoreClient.callEmbeddedMetastore; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; import java.io.IOException; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java index 3d36b60ec9..37e0750b70 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java @@ -39,7 +39,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; public class MetaStoreTestUtils { private static final Logger LOG = LoggerFactory.getLogger(MetaStoreTestUtils.class); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/NonCatCallsWithCatalog.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/NonCatCallsWithCatalog.java index f750ca2a9b..10b9738c69 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/NonCatCallsWithCatalog.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/NonCatCallsWithCatalog.java @@ -77,7 +77,7 @@ import java.util.Set; import java.util.TreeSet; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; public abstract class NonCatCallsWithCatalog { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java index 75ab4e01ee..2fe177ac72 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java @@ -41,7 +41,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; @Category(MetastoreUnitTest.class) public class TestAggregateStatsCache { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogOldClient.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogOldClient.java index fc996c8c71..c93062cc88 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogOldClient.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogOldClient.java @@ -19,7 +19,7 @@ import org.apache.hadoop.hive.metastore.api.MetaException; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; /** * This tests calls with an older client, to make sure that if the client supplies no catalog diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java index 7dc69bc4e9..a0ccefe46c 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -160,11 +161,11 @@ public static void setUp() throws Exception { msc.dropDatabase(DBNAME2, true, true, true); Database db1 = new DatabaseBuilder() .setName(DBNAME1) - .setCatalogName(Warehouse.DEFAULT_CATALOG_NAME) + .setCatalogName(MetaStoreUtils.DEFAULT_CATALOG_NAME) .create(msc, conf); Database db2 = new DatabaseBuilder() .setName(DBNAME2) - .setCatalogName(Warehouse.DEFAULT_CATALOG_NAME) + .setCatalogName(MetaStoreUtils.DEFAULT_CATALOG_NAME) .create(msc, conf); new TableBuilder() .setDbName(DBNAME1) diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 60beab6350..95a9949777 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.utils.FileUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.SecurityUtils; import org.datanucleus.api.jdo.JDOPersistenceManager; import org.datanucleus.api.jdo.JDOPersistenceManagerFactory; @@ -985,7 +986,7 @@ public void testDatabase() throws Throwable { warehouse.getDatabasePath(db).toString(), db.getLocationUri()); assertEquals(db.getOwnerName(), SecurityUtils.getUser()); assertEquals(db.getOwnerType(), PrincipalType.USER); - assertEquals(Warehouse.DEFAULT_CATALOG_NAME, db.getCatalogName()); + assertEquals(MetaStoreUtils.DEFAULT_CATALOG_NAME, db.getCatalogName()); Database db2 = new DatabaseBuilder() .setName(TEST_DB2_NAME) .create(client, conf); @@ -2773,7 +2774,7 @@ private void createMultiPartitionTableSchema(String dbName, String tblName, @Test public void testDBOwner() throws TException { - Database db = client.getDatabase(Warehouse.DEFAULT_DATABASE_NAME); + Database db = client.getDatabase(MetaStoreUtils.DEFAULT_DATABASE_NAME); assertEquals(db.getOwnerName(), HiveMetaStore.PUBLIC); assertEquals(db.getOwnerType(), PrincipalType.ROLE); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreSchemaMethods.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreSchemaMethods.java index 3d48c5f542..4c60152aaf 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreSchemaMethods.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreSchemaMethods.java @@ -66,8 +66,8 @@ import java.util.List; import java.util.Map; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; // This does the testing using a remote metastore, as that finds more issues in thrift @Category(MetastoreCheckinTest.class) diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java index 38b3f6e531..40a5a35669 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java @@ -46,7 +46,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; import static org.junit.Assert.assertEquals; /** diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java index b919eeffe2..17cb19f6e4 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java @@ -22,12 +22,12 @@ import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -72,7 +72,7 @@ public void testEndFunctionListener() throws Exception { Database db = new DatabaseBuilder() .setName(dbName) - .setCatalogName(Warehouse.DEFAULT_CATALOG_NAME) + .setCatalogName(MetaStoreUtils.DEFAULT_CATALOG_NAME) .create(msc, conf); try { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java index 546422d476..30ba019142 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java @@ -23,7 +23,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; @@ -32,6 +31,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.events.ListenerEvent; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.junit.Before; import org.junit.Test; @@ -77,7 +77,7 @@ public void testEventStatus() throws Exception { String dbName = "tmpDb"; Database db = new DatabaseBuilder() .setName(dbName) - .setCatalogName(Warehouse.DEFAULT_CATALOG_NAME) + .setCatalogName(MetaStoreUtils.DEFAULT_CATALOG_NAME) .create(msc, conf); listSize += 1; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index b74c3048fa..cf585cc8fc 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -65,6 +65,7 @@ import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; import org.apache.hadoop.hive.metastore.model.MNotificationLog; import org.apache.hadoop.hive.metastore.model.MNotificationNextId; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.junit.Assert; import org.junit.Assume; import org.junit.Before; @@ -94,7 +95,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; @Category(MetastoreUnitTest.class) public class TestObjectStore { @@ -159,7 +160,7 @@ public void catalogs() throws MetaException, NoSuchObjectException { } Catalog cat = objectStore.getCatalog(fetchedNames.get(2)); Assert.assertEquals(DEFAULT_CATALOG_NAME, cat.getName()); - Assert.assertEquals(Warehouse.DEFAULT_CATALOG_COMMENT, cat.getDescription()); + Assert.assertEquals(MetaStoreUtils.DEFAULT_CATALOG_COMMENT, cat.getDescription()); // Location will vary by system. for (int i = 0; i < names.length; i++) objectStore.dropCatalog(names[i]); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreSchemaMethods.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreSchemaMethods.java index 137082f863..6381a055dd 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreSchemaMethods.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreSchemaMethods.java @@ -50,8 +50,8 @@ import java.util.List; import java.util.Random; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; @Category(MetastoreCheckinTest.class) public class TestObjectStoreSchemaMethods { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java index 36f91eb01d..3d6fac47a0 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog; import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; @@ -55,7 +54,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; @Category(MetastoreUnitTest.class) public class TestOldSchema { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java index 1b01432db4..67252445ad 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; import org.apache.hadoop.hive.metastore.api.Table; @@ -42,6 +41,7 @@ import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.thrift.TException; import org.junit.After; import org.junit.Assert; @@ -60,8 +60,8 @@ import java.util.Map; import java.util.Random; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; @Category(MetastoreCheckinTest.class) public class TestStats { @@ -96,7 +96,7 @@ public void tearDown() throws TException { } else { List databases = client.getAllDatabases(catName); for (String db : databases) { - if (!db.equalsIgnoreCase(Warehouse.DEFAULT_DATABASE_NAME)) { + if (!db.equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) { client.dropDatabase(catName, db, true, false, true); } } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java index c9a6a471cb..319e56bd71 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.metastore; import static org.apache.commons.lang.StringUtils.repeat; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import java.lang.reflect.AccessibleObject; import java.lang.reflect.Array; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java index bb20d9f42a..fddd6ed377 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java @@ -52,15 +52,12 @@ import org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector; import org.apache.hadoop.hive.metastore.columnstats.cache.StringColumnStatsDataInspector; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; -import jline.internal.Log; - -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; @Category(MetastoreCheckinTest.class) public class TestCachedStore { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java index 423dce8a68..fe1ce9293f 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -92,7 +93,7 @@ public void defaultHiveOnly() throws Exception { // Only the hive catalog should be cached List cachedCatalogs = cachedStore.getCatalogs(); Assert.assertEquals(1, cachedCatalogs.size()); - Assert.assertEquals(Warehouse.DEFAULT_CATALOG_NAME, cachedCatalogs.get(0)); + Assert.assertEquals(MetaStoreUtils.DEFAULT_CATALOG_NAME, cachedCatalogs.get(0)); } @Test @@ -115,7 +116,7 @@ public void cacheAll() throws Exception { cachedCatalogs.sort(Comparator.naturalOrder()); Assert.assertEquals(CAT1_NAME, cachedCatalogs.get(0)); Assert.assertEquals(CAT2_NAME, cachedCatalogs.get(1)); - Assert.assertEquals(Warehouse.DEFAULT_CATALOG_NAME, cachedCatalogs.get(2)); + Assert.assertEquals(MetaStoreUtils.DEFAULT_CATALOG_NAME, cachedCatalogs.get(2)); } @Test diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java index 4fc3688f2e..1cf97c5f0e 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hive.metastore.client; -import java.net.ProtocolException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -48,7 +47,6 @@ import com.google.common.collect.Lists; import org.junit.After; -import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -57,7 +55,7 @@ import org.junit.runners.Parameterized; import static java.util.stream.Collectors.joining; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestCatalogs.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestCatalogs.java index 28eb1fadca..e4237a14cd 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestCatalogs.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestCatalogs.java @@ -2,7 +2,6 @@ import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; @@ -13,6 +12,7 @@ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.thrift.TException; import org.junit.After; import org.junit.Assert; @@ -31,7 +31,7 @@ import java.util.Comparator; import java.util.List; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; /** * Licensed to the Apache Software Foundation (ASF) under one @@ -73,7 +73,7 @@ public void tearDown() throws Exception { // Drop any left over catalogs List catalogs = client.getCatalogs(); for (String catName : catalogs) { - if (!catName.equalsIgnoreCase(Warehouse.DEFAULT_CATALOG_NAME)) { + if (!catName.equalsIgnoreCase(MetaStoreUtils.DEFAULT_CATALOG_NAME)) { // First drop any databases in catalog List databases = client.getAllDatabases(catName); for (String db : databases) { @@ -139,7 +139,7 @@ public void catalogOperations() throws TException { Assert.assertEquals(4, catalogs.size()); catalogs.sort(Comparator.naturalOrder()); List expected = new ArrayList<>(catNames.length + 1); - expected.add(Warehouse.DEFAULT_CATALOG_NAME); + expected.add(MetaStoreUtils.DEFAULT_CATALOG_NAME); expected.addAll(Arrays.asList(catNames)); expected.sort(Comparator.naturalOrder()); for (int i = 0; i < catalogs.size(); i++) { @@ -175,7 +175,7 @@ public void catalogOperations() throws TException { catalogs = client.getCatalogs(); Assert.assertEquals(1, catalogs.size()); - Assert.assertTrue(catalogs.get(0).equalsIgnoreCase(Warehouse.DEFAULT_CATALOG_NAME)); + Assert.assertTrue(catalogs.get(0).equalsIgnoreCase(MetaStoreUtils.DEFAULT_CATALOG_NAME)); } @Test(expected = NoSuchObjectException.class) @@ -200,7 +200,7 @@ public void dropNonExistentCatalog() throws TException { @Test(expected = MetaException.class) public void dropHiveCatalog() throws TException { - client.dropCatalog(Warehouse.DEFAULT_CATALOG_NAME); + client.dropCatalog(MetaStoreUtils.DEFAULT_CATALOG_NAME); } @Test(expected = InvalidOperationException.class) diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestCheckConstraint.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestCheckConstraint.java index efac4cf2da..d79853f67b 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestCheckConstraint.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestCheckConstraint.java @@ -44,8 +44,8 @@ import java.util.List; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; @RunWith(Parameterized.class) @Category(MetastoreCheckinTest.class) diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java index d323ac6c90..6dfd60a7cb 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java @@ -21,7 +21,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.Catalog; @@ -38,6 +37,7 @@ import org.apache.hadoop.hive.metastore.client.builder.FunctionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.SecurityUtils; import org.apache.thrift.TException; import org.junit.After; @@ -56,7 +56,7 @@ import java.util.List; import java.util.Set; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; /** * Test class for IMetaStoreClient API. Testing the Database related functions. @@ -538,12 +538,12 @@ public void databasesInCatalogs() throws TException, URISyntaxException { fetchedDbs = new HashSet<>(client.getAllDatabases()); Assert.assertEquals(5, fetchedDbs.size()); - Assert.assertTrue(fetchedDbs.contains(Warehouse.DEFAULT_DATABASE_NAME)); + Assert.assertTrue(fetchedDbs.contains(MetaStoreUtils.DEFAULT_DATABASE_NAME)); // Intentionally using the deprecated method to make sure it returns correct results. fetchedDbs = new HashSet<>(client.getAllDatabases()); Assert.assertEquals(5, fetchedDbs.size()); - Assert.assertTrue(fetchedDbs.contains(Warehouse.DEFAULT_DATABASE_NAME)); + Assert.assertTrue(fetchedDbs.contains(MetaStoreUtils.DEFAULT_DATABASE_NAME)); fetchedDbs = new HashSet<>(client.getDatabases(catName, "d*")); Assert.assertEquals(3, fetchedDbs.size()); @@ -551,12 +551,12 @@ public void databasesInCatalogs() throws TException, URISyntaxException { fetchedDbs = new HashSet<>(client.getDatabases("d*")); Assert.assertEquals(1, fetchedDbs.size()); - Assert.assertTrue(fetchedDbs.contains(Warehouse.DEFAULT_DATABASE_NAME)); + Assert.assertTrue(fetchedDbs.contains(MetaStoreUtils.DEFAULT_DATABASE_NAME)); // Intentionally using the deprecated method to make sure it returns correct results. fetchedDbs = new HashSet<>(client.getDatabases("d*")); Assert.assertEquals(1, fetchedDbs.size()); - Assert.assertTrue(fetchedDbs.contains(Warehouse.DEFAULT_DATABASE_NAME)); + Assert.assertTrue(fetchedDbs.contains(MetaStoreUtils.DEFAULT_DATABASE_NAME)); fetchedDbs = new HashSet<>(client.getDatabases(catName, "*1")); Assert.assertEquals(1, fetchedDbs.size()); @@ -612,12 +612,12 @@ public void createDatabaseInNonExistentCatalog() throws TException { @Test(expected = NoSuchObjectException.class) public void fetchDatabaseInNonExistentCatalog() throws TException { - client.getDatabase("nosuch", Warehouse.DEFAULT_DATABASE_NAME); + client.getDatabase("nosuch", MetaStoreUtils.DEFAULT_DATABASE_NAME); } @Test(expected = NoSuchObjectException.class) public void dropDatabaseInNonExistentCatalog() throws TException { - client.dropDatabase("nosuch", Warehouse.DEFAULT_DATABASE_NAME, true, false, false); + client.dropDatabase("nosuch", MetaStoreUtils.DEFAULT_DATABASE_NAME, true, false, false); } private Database getDatabaseWithAllParametersSet() throws Exception { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDefaultConstraint.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDefaultConstraint.java index f3e026c9fb..3be2149ca2 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDefaultConstraint.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDefaultConstraint.java @@ -44,8 +44,8 @@ import java.util.List; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; @RunWith(Parameterized.class) @Category(MetastoreCheckinTest.class) diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDropPartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDropPartitions.java index 2a566e8813..7485d4d3ae 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDropPartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDropPartitions.java @@ -54,8 +54,6 @@ import com.google.common.collect.Lists; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; - /** * Tests for dropping partitions. */ diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java index 1a2b7e4f9f..346361391f 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java @@ -25,7 +25,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -36,6 +35,7 @@ import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.thrift.TException; import org.junit.After; import org.junit.Assert; @@ -113,8 +113,8 @@ public void testExchangePartitions() throws Exception { Assert.assertEquals(1, exchangedPartitions.size()); String partitionName = - Warehouse.makePartName(sourceTable.getPartitionKeys(), partitions[1].getValues()); - String exchangedPartitionName = Warehouse.makePartName(sourceTable.getPartitionKeys(), + MetaStoreUtils.makePartName(sourceTable.getPartitionKeys(), partitions[1].getValues()); + String exchangedPartitionName = MetaStoreUtils.makePartName(sourceTable.getPartitionKeys(), exchangedPartitions.get(0).getValues()); Assert.assertEquals(partitionName, exchangedPartitionName); @@ -163,7 +163,7 @@ public void testExchangePartitionsYearSet() throws Exception { List exchangedPartNames = new ArrayList<>(); for (Partition exchangedPartition : exchangedPartitions) { String partName = - Warehouse.makePartName(sourceTable.getPartitionKeys(), exchangedPartition.getValues()); + MetaStoreUtils.makePartName(sourceTable.getPartitionKeys(), exchangedPartition.getValues()); exchangedPartNames.add(partName); } Assert.assertTrue(exchangedPartNames.contains("year=2017/month=march/day=15")); @@ -1262,7 +1262,7 @@ private void checkExchangedPartitions(Table sourceTable, Table destTable, // Check the location of the result partition. It should be located in the destination table // folder. String partName = - Warehouse.makePartName(sourceTable.getPartitionKeys(), partition.getValues()); + MetaStoreUtils.makePartName(sourceTable.getPartitionKeys(), partition.getValues()); Assert.assertEquals(destTable.getSd().getLocation() + "/" + partName, resultPart.getSd().getLocation()); Assert.assertTrue(metaStore.isPathExists(new Path(resultPart.getSd().getLocation()))); @@ -1306,7 +1306,7 @@ private void checkRemainingPartitions(Table sourceTable, Table destTable, // Expected exception } String partName = - Warehouse.makePartName(sourceTable.getPartitionKeys(), partition.getValues()); + MetaStoreUtils.makePartName(sourceTable.getPartitionKeys(), partition.getValues()); Assert.assertFalse( metaStore.isPathExists(new Path(destTable.getSd().getLocation() + "/" + partName))); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestForeignKey.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestForeignKey.java index b058dd21fd..00c5d05d30 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestForeignKey.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestForeignKey.java @@ -47,7 +47,7 @@ import java.util.List; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; @RunWith(Parameterized.class) @Category(MetastoreCheckinTest.class) diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestFunctions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestFunctions.java index db6b599ae7..654f119f79 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestFunctions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestFunctions.java @@ -50,7 +50,7 @@ import java.util.List; import java.util.Set; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; /** * Test class for IMetaStoreClient API. Testing the Function related functions. diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java index 4d7f7c1220..814ce12c93 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java @@ -53,7 +53,6 @@ import static junit.framework.TestCase.assertNotNull; import static junit.framework.TestCase.assertNull; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestNotNullConstraint.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestNotNullConstraint.java index b32eeda8e9..7054a93084 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestNotNullConstraint.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestNotNullConstraint.java @@ -44,8 +44,8 @@ import java.util.List; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; @RunWith(Parameterized.class) @Category(MetastoreCheckinTest.class) diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestPrimaryKey.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestPrimaryKey.java index c33572b1ab..929a6f3c35 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestPrimaryKey.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestPrimaryKey.java @@ -45,7 +45,7 @@ import java.util.List; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; @RunWith(Parameterized.class) @Category(MetastoreCheckinTest.class) diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java index bf302ed491..b266534d28 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java @@ -51,7 +51,6 @@ import org.apache.thrift.TApplicationException; import org.apache.thrift.TException; import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.transport.TTransportException; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -73,8 +72,8 @@ import java.util.Map; import java.util.Set; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; /** * Test class for IMetaStoreClient API. Testing the Table related functions for metadata diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java index e885c0aa2e..c437d8f310 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java @@ -49,7 +49,7 @@ import java.util.List; import java.util.Set; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; /** * Test class for IMetaStoreClient API. Testing the Table related functions for metadata diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java index 20c3af0b3c..d52b064944 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java @@ -44,7 +44,7 @@ import java.util.List; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; /** * Test class for IMetaStoreClient API. Testing the Table related functions for metadata diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java index 5842ec574d..16ef76bd2d 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java @@ -44,8 +44,8 @@ import java.util.List; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; -import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DEFAULT_DATABASE_NAME; @RunWith(Parameterized.class) @Category(MetastoreCheckinTest.class) diff --git a/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java b/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java index 9e90d36dae..0b69a69d4e 100644 --- a/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java +++ b/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java @@ -44,9 +44,9 @@ import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.llap.LlapUtil; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.AcidOutputFormat; import org.apache.hadoop.hive.ql.io.RecordUpdater; @@ -159,7 +159,7 @@ public void init(StreamingConnection conn, long minWriteId, long maxWriteId) thr this.inputTypes .addAll(table.getPartitionKeys().stream().map(FieldSchema::getType).collect(Collectors.toList())); } - this.fullyQualifiedTableName = Warehouse.getQualifiedName(table.getDbName(), table.getTableName()); + this.fullyQualifiedTableName = MetaStoreUtils.getQualifiedName(table.getDbName(), table.getTableName()); String outFormatName = this.table.getSd().getOutputFormat(); try { this.acidOutputFormat = (AcidOutputFormat) ReflectionUtils diff --git a/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java b/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java index 6cf14b064f..e53ed7ae2d 100644 --- a/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java +++ b/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.LockComponentBuilder; import org.apache.hadoop.hive.metastore.LockRequestBuilder; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.DataOperationType; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -57,6 +56,7 @@ import org.apache.hadoop.hive.metastore.api.TxnAbortedException; import org.apache.hadoop.hive.metastore.api.TxnToWriteId; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; import org.apache.hadoop.hive.ql.lockmgr.LockException; @@ -370,10 +370,10 @@ public PartitionInfo createPartitionIfNotExists(final List partitionValu String partName = null; boolean exists = false; try { - Map partSpec = Warehouse.makeSpecFromValues(tableObject.getPartitionKeys(), partitionValues); + Map partSpec = MetaStoreUtils.makeSpecFromValues(tableObject.getPartitionKeys(), partitionValues); AddPartitionDesc addPartitionDesc = new AddPartitionDesc(database, table, true); - partName = Warehouse.makePartName(tableObject.getPartitionKeys(), partitionValues); - partLocation = new Path(tableObject.getDataLocation(), Warehouse.makePartPath(partSpec)).toString(); + partName = MetaStoreUtils.makePartName(tableObject.getPartitionKeys(), partitionValues); + partLocation = new Path(tableObject.getDataLocation(), MetaStoreUtils.makePartPath(partSpec)).toString(); addPartitionDesc.addPartition(partSpec, partLocation); Partition partition = Hive.convertAddSpecToMetaPartition(tableObject, addPartitionDesc.getPartition(0), conf); getMSC().add_partition(partition); @@ -646,7 +646,7 @@ private TransactionBatch(HiveStreamingConnection conn) throws StreamingException try { if (conn.isPartitionedTable() && !conn.isDynamicPartitioning()) { List partKeys = conn.tableObject.getPartitionKeys(); - partNameForLock = Warehouse.makePartName(partKeys, conn.staticPartitionValues); + partNameForLock = MetaStoreUtils.makePartName(partKeys, conn.staticPartitionValues); } this.conn = conn; this.username = conn.username; -- 2.16.3