commit 59da41ddf311d3f335c7854194af96c9e6ec9413 Author: Alan Gates Date: Wed Aug 2 12:35:07 2017 -0700 Moved ReplChangeManager Moved Warehouse to standalone. diff --git cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java index d9718c694d..effef09a0e 100644 --- cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java +++ cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java @@ -20,7 +20,7 @@ import static org.junit.Assert.assertEquals; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.Test; @@ -35,7 +35,7 @@ @Test public void testgetDbName() throws Exception { SessionState.start(new HiveConf()); - assertEquals(MetaStoreUtils.DEFAULT_DATABASE_NAME, + assertEquals(Warehouse.DEFAULT_DATABASE_NAME, SessionState.get().getCurrentDatabase()); } } diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatDriver.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatDriver.java index 07abd4232d..48735959b0 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatDriver.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/HCatDriver.java @@ -22,7 +22,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.ql.CommandNeedRetryException; import org.apache.hadoop.hive.ql.Driver; @@ -113,7 +112,7 @@ private int setFSPermsNGrp(SessionState ss) { } } else { // looks like a db operation - if (dbName.isEmpty() || dbName.equals(MetaStoreUtils.DEFAULT_DATABASE_NAME)) { + if (dbName.isEmpty() || dbName.equals(Warehouse.DEFAULT_DATABASE_NAME)) { // We dont set perms or groups for default dir. return 0; } else { diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java index 8b927af8a2..107faf7c84 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -426,7 +426,7 @@ public static HiveStorageHandler getStorageHandler(Configuration conf, public static Pair getDbAndTableName(String tableName) throws IOException { String[] dbTableNametokens = tableName.split("\\."); if (dbTableNametokens.length == 1) { - return new Pair(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + return new Pair(Warehouse.DEFAULT_DATABASE_NAME, tableName); } else if (dbTableNametokens.length == 2) { return new Pair(dbTableNametokens[0], dbTableNametokens[1]); } else { diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatTableInfo.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatTableInfo.java index 14c93ab945..b9a34255e2 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatTableInfo.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatTableInfo.java @@ -24,7 +24,7 @@ import java.util.List; import com.google.common.collect.Lists; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hive.hcatalog.common.HCatUtil; import org.apache.hive.hcatalog.data.schema.HCatFieldSchema; @@ -76,7 +76,7 @@ HCatSchema partitionColumns, StorerInfo storerInfo, Table table) { - this.databaseName = (databaseName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : databaseName; + this.databaseName = (databaseName == null) ? Warehouse.DEFAULT_DATABASE_NAME : databaseName; this.tableName = tableName; this.dataColumns = dataColumns; this.table = table; diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InputJobInfo.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InputJobInfo.java index 7ec6ae3929..c593dca11b 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InputJobInfo.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/InputJobInfo.java @@ -20,7 +20,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import java.io.IOException; import java.io.ObjectInputStream; @@ -84,7 +84,7 @@ private InputJobInfo(String databaseName, String filter, Properties properties) { this.databaseName = (databaseName == null) ? - MetaStoreUtils.DEFAULT_DATABASE_NAME : databaseName; + Warehouse.DEFAULT_DATABASE_NAME : databaseName; this.tableName = tableName; this.filter = filter; this.properties = properties == null ? new Properties() : properties; diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/OutputJobInfo.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/OutputJobInfo.java index ce0d6d1b38..4e8b93660e 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/OutputJobInfo.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/OutputJobInfo.java @@ -26,7 +26,7 @@ import java.util.Map; import java.util.Properties; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hive.hcatalog.data.schema.HCatSchema; /** The class used to serialize and store the output related information */ @@ -91,7 +91,7 @@ public static OutputJobInfo create(String databaseName, private OutputJobInfo(String databaseName, String tableName, Map partitionValues) { - this.databaseName = (databaseName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : databaseName; + this.databaseName = (databaseName == null) ? Warehouse.DEFAULT_DATABASE_NAME : databaseName; this.tableName = tableName; this.partitionValues = partitionValues; this.properties = new Properties(); diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java index e863372e3c..374c1d2065 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java @@ -106,7 +106,7 @@ protected void setUp() throws Exception { public void testCustomPerms() throws Exception { - String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; + String dbName = Warehouse.DEFAULT_DATABASE_NAME; String tblName = "simptbl"; String typeName = "Person"; @@ -151,7 +151,7 @@ public void testCustomPerms() throws Exception { // And no metadata gets created. try { - msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName); + msc.getTable(Warehouse.DEFAULT_DATABASE_NAME, tblName); assert false; } catch (Exception e) { assertTrue(e instanceof NoSuchObjectException); diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java index e41b1f1fec..f2598003c1 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -95,7 +95,7 @@ public void testCreateTblWithLowerCasePartNames() throws CommandNeedRetryExcepti CommandProcessorResponse resp = driver.run("create table junit_sem_analysis (a int) partitioned by (B string) stored as TEXTFILE"); assertEquals(resp.getResponseCode(), 0); assertEquals(null, resp.getErrorMessage()); - Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME); + Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); assertEquals("Partition key name case problem", "b", tbl.getPartitionKeys().get(0).getName()); driver.run("drop table junit_sem_analysis"); } @@ -108,13 +108,13 @@ public void testAlterTblFFpart() throws MetaException, TException, NoSuchObjectE driver.run("alter table junit_sem_analysis add partition (b='2010-10-10')"); hcatDriver.run("alter table junit_sem_analysis partition (b='2010-10-10') set fileformat RCFILE"); - Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME); + Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); assertEquals(TextInputFormat.class.getName(), tbl.getSd().getInputFormat()); assertEquals(HiveIgnoreKeyTextOutputFormat.class.getName(), tbl.getSd().getOutputFormat()); List partVals = new ArrayList(1); partVals.add("2010-10-10"); - Partition part = client.getPartition(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME, partVals); + Partition part = client.getPartition(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME, partVals); assertEquals(RCFileInputFormat.class.getName(), part.getSd().getInputFormat()); assertEquals(RCFileOutputFormat.class.getName(), part.getSd().getOutputFormat()); @@ -161,7 +161,7 @@ public void testCreateTableIfNotExists() throws MetaException, TException, NoSuc hcatDriver.run("drop table " + TBL_NAME); hcatDriver.run("create table " + TBL_NAME + " (a int) stored as RCFILE"); - Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME); + Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); List cols = tbl.getSd().getCols(); assertEquals(1, cols.size()); assertTrue(cols.get(0).equals(new FieldSchema("a", "int", null))); @@ -171,7 +171,7 @@ public void testCreateTableIfNotExists() throws MetaException, TException, NoSuc CommandProcessorResponse resp = hcatDriver.run("create table if not exists junit_sem_analysis (a int) stored as RCFILE"); assertEquals(0, resp.getResponseCode()); assertNull(resp.getErrorMessage()); - tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME); + tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); cols = tbl.getSd().getCols(); assertEquals(1, cols.size()); assertTrue(cols.get(0).equals(new FieldSchema("a", "int", null))); @@ -224,7 +224,7 @@ public void testAddReplaceCols() throws IOException, MetaException, TException, response = hcatDriver.run("describe extended junit_sem_analysis"); assertEquals(0, response.getResponseCode()); - Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME); + Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); List cols = tbl.getSd().getCols(); assertEquals(2, cols.size()); assertTrue(cols.get(0).equals(new FieldSchema("a1", "tinyint", null))); @@ -247,11 +247,11 @@ public void testAlterTableRename() throws CommandNeedRetryException, TException hcatDriver.run("drop table oldname"); hcatDriver.run("drop table newname"); hcatDriver.run("create table oldname (a int)"); - Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "oldname"); + Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, "oldname"); assertTrue("The old table location is: " + tbl.getSd().getLocation(), tbl.getSd().getLocation().contains("oldname")); hcatDriver.run("alter table oldname rename to newNAME"); - tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "newname"); + tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, "newname"); // since the oldname table is not under its database (See HIVE-15059), the renamed oldname table will keep // its location after HIVE-14909. I changed to check the existence of the newname table and its name instead // of verifying its location @@ -268,7 +268,7 @@ public void testAlterTableSetFF() throws IOException, MetaException, TException, hcatDriver.run("drop table junit_sem_analysis"); hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"); - Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME); + Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat()); assertEquals(RCFileOutputFormat.class.getName(), tbl.getSd().getOutputFormat()); @@ -276,7 +276,7 @@ public void testAlterTableSetFF() throws IOException, MetaException, TException, "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver'"); hcatDriver.run("desc extended junit_sem_analysis"); - tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME); + tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat()); assertEquals(RCFileOutputFormat.class.getName(), tbl.getSd().getOutputFormat()); @@ -332,7 +332,7 @@ public void testAddDriverInfo() throws IOException, MetaException, TException, N "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver' "; assertEquals(0, hcatDriver.run(query).getResponseCode()); - Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME); + Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); assertEquals(RCFileInputFormat.class.getName(), tbl.getSd().getInputFormat()); assertEquals(RCFileOutputFormat.class.getName(), tbl.getSd().getOutputFormat()); diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java index deee3a05d6..ae56ff7c81 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java @@ -36,8 +36,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; @@ -88,7 +88,7 @@ public abstract class HCatMapReduceTest extends HCatBaseTest { private static final Logger LOG = LoggerFactory.getLogger(HCatMapReduceTest.class); - protected static String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; + protected static String dbName = Warehouse.DEFAULT_DATABASE_NAME; protected static final String TABLE_NAME = "testHCatMapReduceTable"; private static List writeRecords = new ArrayList(); @@ -155,7 +155,7 @@ public static void setUpOneTime() throws Exception { @After public void deleteTable() throws Exception { try { - String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName; + String databaseName = (dbName == null) ? Warehouse.DEFAULT_DATABASE_NAME : dbName; client.dropTable(databaseName, tableName); // in case of external table, drop the table contents as well @@ -176,7 +176,7 @@ public void createTable() throws Exception { // SerDe is in the disabled serdes list. Assume.assumeTrue(!DISABLED_SERDES.contains(serdeClass)); - String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName; + String databaseName = (dbName == null) ? Warehouse.DEFAULT_DATABASE_NAME : dbName; try { client.dropTable(databaseName, tableName); } catch (Exception e) { diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java index 358dd50840..61b2f4156e 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; @@ -222,7 +223,7 @@ public void map(LongWritable key, Text value, Context context) } private void createTable(String dbName, String tableName) throws Exception { - String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME + String databaseName = (dbName == null) ? Warehouse.DEFAULT_DATABASE_NAME : dbName; try { msc.dropTable(databaseName, tableName); diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java index 975f94b06b..31857bf643 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java @@ -29,7 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.io.LongWritable; @@ -105,7 +105,7 @@ public void testSequenceTableWriteReadMR() throws Exception { TextInputFormat.setInputPaths(job, INPUT_FILE_NAME); HCatOutputFormat.setOutput(job, OutputJobInfo.create( - MetaStoreUtils.DEFAULT_DATABASE_NAME, "bad_props_table", null)); + Warehouse.DEFAULT_DATABASE_NAME, "bad_props_table", null)); job.setOutputFormatClass(HCatOutputFormat.class); HCatOutputFormat.setSchema(job, getSchema()); job.setNumReduceTasks(0); diff --git hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java index 337f4fb731..2e756b47d7 100644 --- hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java +++ hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hive.common.type.HiveVarchar; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.mapreduce.Job; @@ -70,7 +70,7 @@ private static final Logger LOG = LoggerFactory.getLogger(PigHCatUtil.class); static final int PIG_EXCEPTION_CODE = 1115; // http://wiki.apache.org/pig/PigErrorHandlingFunctionalSpecification#Error_codes - private static final String DEFAULT_DB = MetaStoreUtils.DEFAULT_DATABASE_NAME; + private static final String DEFAULT_DB = Warehouse.DEFAULT_DATABASE_NAME; private final Map, Table> hcatTableCache = new HashMap, Table>(); diff --git hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java index 903578b279..496f3c85da 100644 --- hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java +++ hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java @@ -26,10 +26,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.ql.CommandNeedRetryException; import org.apache.hadoop.hive.ql.Driver; -import org.apache.hadoop.hive.ql.io.IOConstants; import org.apache.hadoop.hive.ql.io.StorageFormats; import org.apache.hadoop.hive.ql.processors.CommandProcessor; import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory; @@ -44,7 +43,6 @@ import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; -import org.apache.hadoop.util.Shell; import org.apache.hive.hcatalog.HcatTestUtils; import org.apache.hive.hcatalog.common.HCatUtil; import org.apache.hive.hcatalog.data.HCatRecord; @@ -67,7 +65,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -333,7 +330,7 @@ public void testReadDataFromEncryptedHiveTableByHCatMR() throws Exception { job.setInputFormatClass(HCatInputFormat.class); job.setOutputFormatClass(TextOutputFormat.class); - HCatInputFormat.setInput(job, MetaStoreUtils.DEFAULT_DATABASE_NAME, ENCRYPTED_TABLE, null); + HCatInputFormat.setInput(job, Warehouse.DEFAULT_DATABASE_NAME, ENCRYPTED_TABLE, null); job.setMapOutputKeyClass(BytesWritable.class); job.setMapOutputValueClass(Text.class); diff --git hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java index 4ab497e578..17b9d03a21 100644 --- hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java +++ hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java @@ -34,8 +34,8 @@ import org.apache.hadoop.hive.common.classification.InterfaceStability; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; @@ -891,7 +891,7 @@ public void close() throws HCatException { private String checkDB(String name) { if (StringUtils.isEmpty(name)) { - return MetaStoreUtils.DEFAULT_DATABASE_NAME; + return Warehouse.DEFAULT_DATABASE_NAME; } else { return name; } diff --git hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java index ed40af767e..5050014925 100644 --- hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java +++ hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java @@ -25,7 +25,6 @@ import java.util.Map; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -47,7 +46,7 @@ private HCatTable hcatTable; private String tableName; - private String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; + private String dbName = Warehouse.DEFAULT_DATABASE_NAME; private List values; private int createTime; private int lastAccessTime; diff --git hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java index c08942cb1e..85ab0e22f8 100644 --- hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java +++ hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java @@ -28,8 +28,8 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.SerDeInfo; @@ -104,7 +104,7 @@ public static final String DEFAULT_INPUT_FORMAT_CLASS = org.apache.hadoop.mapred.TextInputFormat.class.getName(); public static final String DEFAULT_OUTPUT_FORMAT_CLASS = org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat.class.getName(); - private String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; + private String dbName = Warehouse.DEFAULT_DATABASE_NAME; private String tableName; private HiveConf conf; private String tableType; @@ -118,7 +118,7 @@ private String owner; public HCatTable(String dbName, String tableName) { - this.dbName = StringUtils.isBlank(dbName)? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName; + this.dbName = StringUtils.isBlank(dbName)? Warehouse.DEFAULT_DATABASE_NAME : dbName; this.tableName = tableName; this.sd = new StorageDescriptor(); this.sd.setInputFormat(DEFAULT_INPUT_FORMAT_CLASS); diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java index 48aed790b0..d2dbe8f287 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java @@ -31,7 +31,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.io.LongWritable; @@ -179,7 +179,7 @@ public void testSequenceTableWriteReadMR() throws Exception { TextInputFormat.setInputPaths(job, inputFileName); HCatOutputFormat.setOutput(job, OutputJobInfo.create( - MetaStoreUtils.DEFAULT_DATABASE_NAME, "demo_table_2", null)); + Warehouse.DEFAULT_DATABASE_NAME, "demo_table_2", null)); job.setOutputFormatClass(HCatOutputFormat.class); HCatOutputFormat.setSchema(job, getSchema()); job.setNumReduceTasks(0); @@ -226,7 +226,7 @@ public void testTextTableWriteReadMR() throws Exception { TextInputFormat.setInputPaths(job, inputFileName); HCatOutputFormat.setOutput(job, OutputJobInfo.create( - MetaStoreUtils.DEFAULT_DATABASE_NAME, "demo_table_3", null)); + Warehouse.DEFAULT_DATABASE_NAME, "demo_table_3", null)); job.setOutputFormatClass(HCatOutputFormat.class); HCatOutputFormat.setSchema(job, getSchema()); assertTrue(job.waitForCompletion(true)); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 50e5274f00..98dad7a093 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -3212,7 +3212,7 @@ private void createMultiPartitionTableSchema(String dbName, String tblName, @Test public void testDBOwner() throws NoSuchObjectException, MetaException, TException { - Database db = client.getDatabase(MetaStoreUtils.DEFAULT_DATABASE_NAME); + Database db = client.getDatabase(Warehouse.DEFAULT_DATABASE_NAME); assertEquals(db.getOwnerName(), HiveMetaStore.PUBLIC); assertEquals(db.getOwnerType(), PrincipalType.ROLE); } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java index 8d5530fbbb..bec715d37a 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hive.common.LogUtils.LogInitializationException; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.history.HiveHistory.Keys; import org.apache.hadoop.hive.ql.history.HiveHistory.QueryInfo; @@ -101,7 +101,7 @@ protected void setUp() { cols.add("key"); cols.add("value"); for (String src : srctables) { - db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true); + db.dropTable(Warehouse.DEFAULT_DATABASE_NAME, src, true, true); db.createTable(src, cols, null, TextInputFormat.class, IgnoreKeyTextOutputFormat.class); db.loadTable(hadoopDataFile[i], src, false, false, false, false, false); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java index 3027ef4f1f..2170ca3706 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; @@ -47,7 +47,7 @@ public void testHookLoading() throws Exception{ assertEquals(0, resp.getResponseCode()); assertNull(resp.getErrorMessage()); - Map params = Hive.get(conf).getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "testDL").getParameters(); + Map params = Hive.get(conf).getTable(Warehouse.DEFAULT_DATABASE_NAME, "testDL").getParameters(); assertEquals(DummyCreateTableHook.class.getName(),params.get("createdBy")); assertEquals("Open Source rocks!!", params.get("Message")); diff --git itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java index aeb72155e6..0cc9a89085 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java +++ itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java @@ -20,7 +20,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.ql.QTestUtil; import java.util.List; @@ -109,7 +109,7 @@ public void cleanUp(String tname) throws Exception { super.cleanUp(tname); // drop in case leftover from unsuccessful run - db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, HBASE_SRC_NAME); + db.dropTable(Warehouse.DEFAULT_DATABASE_NAME, HBASE_SRC_NAME); HBaseAdmin admin = null; try { diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index b3677322ca..05f8a5ff2d 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.ql; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; @@ -101,7 +101,7 @@ import org.apache.hadoop.hive.llap.LlapItUtils; import org.apache.hadoop.hive.llap.daemon.MiniLlapCluster; import org.apache.hadoop.hive.llap.io.api.LlapProxy; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.Task; @@ -1423,7 +1423,7 @@ public void convertSequenceFileToTextFile() throws Exception { .run("FROM dest4_sequencefile INSERT OVERWRITE TABLE dest4 SELECT dest4_sequencefile.*"); // Drop dest4_sequencefile - db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "dest4_sequencefile", + db.dropTable(Warehouse.DEFAULT_DATABASE_NAME, "dest4_sequencefile", true, true); } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index cf33cca24f..f7f5cea073 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hive.metastore; import static org.apache.commons.lang.StringUtils.join; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_COMMENT; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_COMMENT; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; import static org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName; import java.io.IOException; diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 75e4180391..494aa31646 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.metastore; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; import static org.apache.hadoop.hive.metastore.MetaStoreUtils.isIndexTable; import java.io.IOException; diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index bbe13fd77b..6cf9a5c48a 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -120,12 +120,6 @@ protected static final Logger LOG = LoggerFactory.getLogger("hive.log"); - public static final String DEFAULT_DATABASE_NAME = "default"; - public static final String DEFAULT_DATABASE_COMMENT = "Default Hive database"; - public static final String DEFAULT_SERIALIZATION_FORMAT = "1"; - - public static final String DATABASE_WAREHOUSE_SUFFIX = ".db"; - // Right now we only support one special character '/'. // More special characters can be added accordingly in the future. // NOTE: @@ -151,7 +145,7 @@ public static Table createColumnsetSchema(String name, List columns, serdeInfo.setSerializationLib(LazySimpleSerDe.class.getName()); serdeInfo.setParameters(new HashMap()); serdeInfo.getParameters().put(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, - DEFAULT_SERIALIZATION_FORMAT); + Warehouse.DEFAULT_SERIALIZATION_FORMAT); List fields = new ArrayList(columns.size()); sd.setCols(fields); @@ -1887,22 +1881,6 @@ public static ClassLoader addToClassPath(ClassLoader cloader, String[] newPaths) return new URLClassLoader(curPath.toArray(new URL[0]), loader); } - public static String encodeTableName(String name) { - // The encoding method is simple, e.g., replace - // all the special characters with the corresponding number in ASCII. - // Note that unicode is not supported in table names. And we have explicit - // checks for it. - StringBuilder sb = new StringBuilder(); - for (char ch : name.toCharArray()) { - if (Character.isLetterOrDigit(ch) || ch == '_') { - sb.append(ch); - } else { - sb.append('-').append((int) ch).append('-'); - } - } - return sb.toString(); - } - // this function will merge csOld into csNew. public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld) throws InvalidObjectException { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 3053dcb50b..63421c0bb2 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -7477,7 +7477,7 @@ public boolean deletePartitionColumnStatistics(String dbName, String tableName, boolean ret = false; Query query = null; if (dbName == null) { - dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; + dbName = Warehouse.DEFAULT_DATABASE_NAME; } if (tableName == null) { throw new InvalidInputException("Table name is null."); @@ -7555,7 +7555,7 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri boolean ret = false; Query query = null; if (dbName == null) { - dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; + dbName = Warehouse.DEFAULT_DATABASE_NAME; } if (tableName == null) { throw new InvalidInputException("Table name is null."); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 8aa2d90b76..714ea1f3fd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -2413,7 +2413,7 @@ else if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC) { if (tbl.getStorageHandler() == null) { // If serialization.format property has the default value, it will not to be included in // SERDE properties - if (MetaStoreUtils.DEFAULT_SERIALIZATION_FORMAT.equals(serdeParams.get( + if (Warehouse.DEFAULT_SERIALIZATION_FORMAT.equals(serdeParams.get( serdeConstants.SERIALIZATION_FORMAT))){ serdeParams.remove(serdeConstants.SERIALIZATION_FORMAT); } @@ -4823,11 +4823,11 @@ public static void makeLocationQualified(String databaseName, StorageDescriptor if (!sd.isSetLocation()) { // Location is not set, leave it as-is if this is not a default DB - if (databaseName.equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) + if (databaseName.equalsIgnoreCase(Warehouse.DEFAULT_DATABASE_NAME)) { // Default database name path is always ignored, use METASTOREWAREHOUSE and object name // instead - path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.METASTOREWAREHOUSE), MetaStoreUtils.encodeTableName(name.toLowerCase())); + path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.METASTOREWAREHOUSE), org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(name.toLowerCase())); } } else @@ -4855,7 +4855,7 @@ private void makeLocationQualified(CreateIndexDesc crtIndex, String name) throws if (crtIndex.getLocation() == null) { // Location is not set, leave it as-is if index doesn't belong to default DB // Currently all indexes are created in current DB only - if (Utilities.getDatabaseName(name).equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) { + if (Utilities.getDatabaseName(name).equalsIgnoreCase(Warehouse.DEFAULT_DATABASE_NAME)) { // Default database name path is always ignored, use METASTOREWAREHOUSE and object name // instead String warehouse = HiveConf.getVar(conf, ConfVars.METASTOREWAREHOUSE); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java index c22d69bb19..4db68066d6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java @@ -22,7 +22,6 @@ import java.io.Serializable; import java.util.ArrayList; import java.util.Collection; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.concurrent.Callable; @@ -350,7 +349,7 @@ private String getAggregationPrefix(Table table, Partition partition) throws MetaException { // prefix is of the form dbName.tblName - String prefix = table.getDbName() + "." + MetaStoreUtils.encodeTableName(table.getTableName()); + String prefix = table.getDbName() + "." + org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(table.getTableName()); if (partition != null) { return Utilities.join(prefix, Warehouse.makePartPath(partition.getSpec())); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java index acf9746667..cef07ad99a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java @@ -18,10 +18,10 @@ Licensed to the Apache Software Foundation (ASF) under one package org.apache.hadoop.hive.ql.exec.repl.bootstrap.load.table; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.ReplCopyTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java index 9ffd152f40..2bf3784102 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java @@ -18,10 +18,10 @@ Licensed to the Apache Software Foundation (ASF) under one package org.apache.hadoop.hive.ql.exec.repl.bootstrap.load.table; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.ReplCopyTask; import org.apache.hadoop.hive.ql.exec.Task; diff --git ql/src/java/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java ql/src/java/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java index 4569ed5e41..107ce683d1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java +++ ql/src/java/org/apache/hadoop/hive/ql/hooks/EnforceReadOnlyTables.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.ql.hooks; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; import java.util.HashSet; import java.util.Set; diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java index 24df25b8ca..9c9a3ab41f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java @@ -22,10 +22,8 @@ import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.ValidReadTxnList; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.ql.Context; -import org.apache.hadoop.hive.ql.Driver.DriverState; import org.apache.hadoop.hive.ql.Driver.LockedDriverState; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryPlan; @@ -35,7 +33,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.util.ReflectionUtils; import java.util.*; @@ -345,7 +342,7 @@ private HiveLockMode getWriteEntityLockMode (WriteEntity we) { try { locks.add(new HiveLockObj( new HiveLockObject(new DummyPartition(p.getTable(), p.getTable().getDbName() - + "/" + MetaStoreUtils.encodeTableName(p.getTable().getTableName()) + + "/" + org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(p.getTable().getTableName()) + "/" + partialName, partialSpec), lockData), mode)); partialName += "/"; diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java index a51433992d..eb66c31734 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockObject.java @@ -24,7 +24,6 @@ import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.hadoop.hive.common.StringInternUtils; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.metadata.DummyPartition; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -198,12 +197,12 @@ public HiveLockObject(String[] paths, HiveLockObjectData lockData) { } public HiveLockObject(Table tbl, HiveLockObjectData lockData) { - this(new String[] {tbl.getDbName(), MetaStoreUtils.encodeTableName(tbl.getTableName())}, lockData); + this(new String[] {tbl.getDbName(), org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(tbl.getTableName())}, lockData); } public HiveLockObject(Partition par, HiveLockObjectData lockData) { this(new String[] {par.getTable().getDbName(), - MetaStoreUtils.encodeTableName(par.getTable().getTableName()), par.getName()}, lockData); + org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(par.getTable().getTableName()), par.getName()}, lockData); } public HiveLockObject(DummyPartition par, HiveLockObjectData lockData) { diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java index 4add83600c..5737c66f24 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java @@ -48,7 +48,6 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -101,7 +100,7 @@ public void checkMetastore(String dbName, String tableName, throws HiveException, IOException { if (dbName == null || "".equalsIgnoreCase(dbName)) { - dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; + dbName = Warehouse.DEFAULT_DATABASE_NAME; } try { diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java index dcea0e5a75..12e2e24340 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java @@ -27,7 +27,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.GroupByOperator; @@ -181,7 +180,7 @@ private void replaceTableScanProcess(TableScanOperator scanOperator) throws Sema TableScanDesc indexTableScanDesc = new TableScanDesc(indexTableHandle); indexTableScanDesc.setGatherStats(false); - String k = MetaStoreUtils.encodeTableName(indexTableName) + Path.SEPARATOR; + String k = org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(indexTableName) + Path.SEPARATOR; indexTableScanDesc.setStatsAggPrefix(k); scanOperator.setConf(indexTableScanDesc); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index aa4c660c26..ed004fea5d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; @@ -839,7 +838,7 @@ private static void createReplImportTasks( tblDesc.setLocation( wh.getDnsPath(new Path( wh.getDefaultDatabasePath(tblDesc.getDatabaseName()), - MetaStoreUtils.encodeTableName(tblDesc.getTableName().toLowerCase()) + org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(tblDesc.getTableName().toLowerCase()) ) ).toString()); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java index fe065f830b..0ae95beb39 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java @@ -28,11 +28,11 @@ import java.util.Stack; import java.util.LinkedHashSet; +import org.apache.hadoop.hive.metastore.Warehouse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -170,7 +170,7 @@ private void analyzeDropMacro(ASTNode ast) throws SemanticException { } private void addEntities() throws SemanticException { - Database database = getDatabase(MetaStoreUtils.DEFAULT_DATABASE_NAME); + Database database = getDatabase(Warehouse.DEFAULT_DATABASE_NAME); // This restricts macro creation to privileged users. outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK)); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 1c74779dec..182678fdcb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -10506,7 +10506,7 @@ private void setupStats(TableScanDesc tsDesc, QBParseInfo qbp, Table tab, String // db_name.table_name + partitionSec // as the prefix for easy of read during explain and debugging. // Currently, partition spec can only be static partition. - String k = MetaStoreUtils.encodeTableName(tblName) + Path.SEPARATOR; + String k = org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(tblName) + Path.SEPARATOR; tsDesc.setStatsAggPrefix(tab.getDbName()+"."+k); // set up WriteEntity for replication diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java index 462963ab09..6bd29b495f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java @@ -29,6 +29,7 @@ import java.util.Map; import java.util.Set; +import org.apache.hadoop.hive.metastore.Warehouse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FileStatus; @@ -39,7 +40,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.HiveObjectRef; @@ -274,7 +274,7 @@ private static boolean isOwner(IMetaStoreClient metastoreClient, String userName return userName.equals(thriftTableObj.getOwner()); } case DATABASE: { - if (MetaStoreUtils.DEFAULT_DATABASE_NAME.equalsIgnoreCase(hivePrivObject.getDbname())) { + if (Warehouse.DEFAULT_DATABASE_NAME.equalsIgnoreCase(hivePrivObject.getDbname())) { return true; } Database db = null; diff --git ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index 8b64407d53..97c8124293 100644 --- ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hive.ql.session; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; import java.io.File; import java.io.IOException; diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java index b4898e2d33..c0c496feea 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java @@ -26,13 +26,13 @@ import junit.framework.TestCase; +import org.apache.hadoop.hive.metastore.Warehouse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.QueryState; @@ -137,7 +137,7 @@ cols.add("key"); cols.add("value"); for (String src : srctables) { - db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true); + db.dropTable(Warehouse.DEFAULT_DATABASE_NAME, src, true, true); db.createTable(src, cols, null, TextInputFormat.class, HiveIgnoreKeyTextOutputFormat.class); db.loadTable(hadoopDataFile[i], src, false, true, false, false, false); @@ -492,7 +492,7 @@ private void executePlan() throws Exception { public void testMapPlan1() throws Exception { LOG.info("Beginning testMapPlan1"); - populateMapPlan1(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "src")); + populateMapPlan1(db.getTable(Warehouse.DEFAULT_DATABASE_NAME, "src")); executePlan(); fileDiff("lt100.txt.deflate", "mapplan1.out"); } @@ -500,7 +500,7 @@ public void testMapPlan1() throws Exception { public void testMapPlan2() throws Exception { LOG.info("Beginning testMapPlan2"); - populateMapPlan2(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "src")); + populateMapPlan2(db.getTable(Warehouse.DEFAULT_DATABASE_NAME, "src")); executePlan(); fileDiff("lt100.txt", "mapplan2.out"); } @@ -508,7 +508,7 @@ public void testMapPlan2() throws Exception { public void testMapRedPlan1() throws Exception { LOG.info("Beginning testMapRedPlan1"); - populateMapRedPlan1(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, + populateMapRedPlan1(db.getTable(Warehouse.DEFAULT_DATABASE_NAME, "src")); executePlan(); fileDiff("kv1.val.sorted.txt", "mapredplan1.out"); @@ -517,7 +517,7 @@ public void testMapRedPlan1() throws Exception { public void testMapRedPlan2() throws Exception { LOG.info("Beginning testMapPlan2"); - populateMapRedPlan2(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, + populateMapRedPlan2(db.getTable(Warehouse.DEFAULT_DATABASE_NAME, "src")); executePlan(); fileDiff("lt100.sorted.txt", "mapredplan2.out"); @@ -526,8 +526,8 @@ public void testMapRedPlan2() throws Exception { public void testMapRedPlan3() throws Exception { LOG.info("Beginning testMapPlan3"); - populateMapRedPlan3(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, - "src"), db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "src2")); + populateMapRedPlan3(db.getTable(Warehouse.DEFAULT_DATABASE_NAME, + "src"), db.getTable(Warehouse.DEFAULT_DATABASE_NAME, "src2")); executePlan(); fileDiff("kv1kv2.cogroup.txt", "mapredplan3.out"); } @@ -535,7 +535,7 @@ public void testMapRedPlan3() throws Exception { public void testMapRedPlan4() throws Exception { LOG.info("Beginning testMapPlan4"); - populateMapRedPlan4(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, + populateMapRedPlan4(db.getTable(Warehouse.DEFAULT_DATABASE_NAME, "src")); executePlan(); fileDiff("kv1.string-sorted.txt", "mapredplan4.out"); @@ -544,7 +544,7 @@ public void testMapRedPlan4() throws Exception { public void testMapRedPlan5() throws Exception { LOG.info("Beginning testMapPlan5"); - populateMapRedPlan5(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, + populateMapRedPlan5(db.getTable(Warehouse.DEFAULT_DATABASE_NAME, "src")); executePlan(); fileDiff("kv1.string-sorted.txt", "mapredplan5.out"); @@ -553,7 +553,7 @@ public void testMapRedPlan5() throws Exception { public void testMapRedPlan6() throws Exception { LOG.info("Beginning testMapPlan6"); - populateMapRedPlan6(db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, + populateMapRedPlan6(db.getTable(Warehouse.DEFAULT_DATABASE_NAME, "src")); executePlan(); fileDiff("lt100.sorted.txt", "mapredplan6.out"); diff --git ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index 91eb033c87..dc7d51d77e 100755 --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.ql.metadata; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; import java.util.ArrayList; import java.util.Arrays; @@ -33,7 +33,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.PartitionDropOptions; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; @@ -118,13 +117,13 @@ public void testTable() throws Throwable { // create a simple table and test create, drop, get String tableName = "table_for_testtable"; try { - hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName); } catch (HiveException e1) { e1.printStackTrace(); assertTrue("Unable to drop table", false); } - Table tbl = new Table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + Table tbl = new Table(Warehouse.DEFAULT_DATABASE_NAME, tableName); List fields = tbl.getCols(); fields.add(new FieldSchema("col1", serdeConstants.INT_TYPE_NAME, "int -- first column")); @@ -184,9 +183,9 @@ public void testTable() throws Throwable { validateTable(tbl, tableName); try { - hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, true, + hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName, true, false); - Table ft2 = hm.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, + Table ft2 = hm.getTable(Warehouse.DEFAULT_DATABASE_NAME, tableName, false); assertNull("Unable to drop table ", ft2); } catch (HiveException e) { @@ -216,12 +215,12 @@ public void testThriftTable() throws Throwable { String tableName = "table_for_test_thrifttable"; try { try { - hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName); } catch (HiveException e1) { System.err.println(StringUtils.stringifyException(e1)); assertTrue("Unable to drop table", false); } - Table tbl = new Table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + Table tbl = new Table(Warehouse.DEFAULT_DATABASE_NAME, tableName); tbl.setInputFormatClass(SequenceFileInputFormat.class.getName()); tbl.setOutputFormatClass(SequenceFileOutputFormat.class.getName()); tbl.setSerializationLib(ThriftDeserializer.class.getName()); @@ -308,7 +307,7 @@ private void validateTable(Table tbl, String tableName) throws MetaException { // (create table sets it to empty (non null) structures) tbl.getTTable().setPrivilegesIsSet(false); - ft = hm.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + ft = hm.getTable(Warehouse.DEFAULT_DATABASE_NAME, tableName); assertNotNull("Unable to fetch table", ft); ft.checkValidity(hiveConf); assertEquals("Table names didn't match for table: " + tableName, tbl @@ -526,7 +525,7 @@ private void cleanUpTableQuietly(String dbName, String tableName) { * @throws Exception on failure. */ public void testDropPartitionsWithPurge() throws Exception { - String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; + String dbName = Warehouse.DEFAULT_DATABASE_NAME; String tableName = "table_for_testDropPartitionsWithPurge"; try { @@ -589,7 +588,7 @@ public void testDropPartitionsWithPurge() throws Exception { */ public void testAutoPurgeTablesAndPartitions() throws Throwable { - String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; + String dbName = Warehouse.DEFAULT_DATABASE_NAME; String tableName = "table_for_testAutoPurgeTablesAndPartitions"; try { @@ -643,7 +642,7 @@ public void testPartition() throws Throwable { try { String tableName = "table_for_testpartition"; try { - hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName); } catch (HiveException e) { System.err.println(StringUtils.stringifyException(e)); assertTrue("Unable to drop table: " + tableName, false); @@ -664,7 +663,7 @@ public void testPartition() throws Throwable { } Table tbl = null; try { - tbl = hm.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + tbl = hm.getTable(Warehouse.DEFAULT_DATABASE_NAME, tableName); } catch (HiveException e) { System.err.println(StringUtils.stringifyException(e)); assertTrue("Unable to fetch table: " + tableName, false); @@ -679,7 +678,7 @@ public void testPartition() throws Throwable { System.err.println(StringUtils.stringifyException(e)); assertTrue("Unable to create parition for table: " + tableName, false); } - hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName); } catch (Throwable e) { System.err.println(StringUtils.stringifyException(e)); System.err.println("testPartition() failed"); @@ -696,15 +695,15 @@ public void testIndex() throws Throwable { try{ // create a simple table String tableName = "table_for_testindex"; - String qTableName = MetaStoreUtils.DEFAULT_DATABASE_NAME + "." + tableName; + String qTableName = Warehouse.DEFAULT_DATABASE_NAME + "." + tableName; try { - hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName); } catch (HiveException e) { e.printStackTrace(); assertTrue("Unable to drop table", false); } - Table tbl = new Table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); + Table tbl = new Table(Warehouse.DEFAULT_DATABASE_NAME, tableName); List fields = tbl.getCols(); fields.add(new FieldSchema("col1", serdeConstants.INT_TYPE_NAME, "int -- first column")); @@ -731,7 +730,7 @@ public void testIndex() throws Throwable { List indexedCols = new ArrayList(); indexedCols.add("col1"); String indexTableName = "index_on_table_for_testindex_table"; - String qIndexTableName = MetaStoreUtils.DEFAULT_DATABASE_NAME + "." + indexTableName; + String qIndexTableName = Warehouse.DEFAULT_DATABASE_NAME + "." + indexTableName; boolean deferredRebuild = true; String inputFormat = SequenceFileInputFormat.class.getName(); String outputFormat = SequenceFileOutputFormat.class.getName(); @@ -776,7 +775,7 @@ public void testIndex() throws Throwable { // Drop index try { - hm.dropIndex(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, indexName, false, true); + hm.dropIndex(Warehouse.DEFAULT_DATABASE_NAME, tableName, indexName, false, true); } catch (HiveException e) { System.err.println(StringUtils.stringifyException(e)); assertTrue("Unable to drop index: " + indexName, false); diff --git ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java index fdebb94916..64a90c56d7 100644 --- ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java +++ ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java @@ -20,21 +20,18 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; -import java.io.BufferedReader; import java.io.File; import java.io.IOException; -import java.io.InputStreamReader; import java.lang.reflect.Method; import java.util.Arrays; import java.util.Collection; import org.apache.commons.io.FileUtils; -import org.apache.commons.io.IOUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hive.common.util.HiveTestUtils; import org.junit.After; import org.junit.Assert; @@ -112,7 +109,7 @@ public void tearDown(){ @Test public void testgetDbName() throws Exception { //check that we start with default db - assertEquals(MetaStoreUtils.DEFAULT_DATABASE_NAME, + assertEquals(Warehouse.DEFAULT_DATABASE_NAME, SessionState.get().getCurrentDatabase()); final String newdb = "DB_2"; @@ -123,7 +120,7 @@ public void testgetDbName() throws Exception { //verify that a new sessionstate has default db SessionState.start(new HiveConf()); - assertEquals(MetaStoreUtils.DEFAULT_DATABASE_NAME, + assertEquals(Warehouse.DEFAULT_DATABASE_NAME, SessionState.get().getCurrentDatabase()); } diff --git standalone-metastore/pom.xml standalone-metastore/pom.xml index 5cdbbcb4b3..d91b22de50 100644 --- standalone-metastore/pom.xml +++ standalone-metastore/pom.xml @@ -63,6 +63,11 @@ ${dropwizard.version} + org.apache.commons + commons-lang3 + ${commons-lang3.version} + + org.apache.hadoop hadoop-common ${hadoop.version} @@ -80,6 +85,12 @@ org.apache.hadoop + hadoop-distcp + ${hadoop.version} + provided + + + org.apache.hadoop hadoop-hdfs ${hadoop.version} true diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java similarity index 89% rename from metastore/src/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java index 88d6a7a1e8..3ad0a57f4d 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java @@ -33,10 +33,10 @@ import org.apache.hadoop.fs.Trash; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.utils.FileUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; @@ -49,7 +49,7 @@ private static boolean inited = false; private static boolean enabled = false; private static Path cmroot; - private static HiveConf hiveConf; + private static Configuration conf; private String msUser; private String msGroup; private FileSystem fs; @@ -63,22 +63,22 @@ COPY } - public static ReplChangeManager getInstance(HiveConf hiveConf) throws MetaException { + public static ReplChangeManager getInstance(Configuration conf) throws MetaException { if (instance == null) { - instance = new ReplChangeManager(hiveConf); + instance = new ReplChangeManager(conf); } return instance; } - private ReplChangeManager(HiveConf hiveConf) throws MetaException { + private ReplChangeManager(Configuration conf) throws MetaException { try { if (!inited) { - if (hiveConf.getBoolVar(HiveConf.ConfVars.REPLCMENABLED)) { + if (MetastoreConf.getBoolVar(conf, ConfVars.REPLCMENABLED)) { ReplChangeManager.enabled = true; - ReplChangeManager.cmroot = new Path(hiveConf.get(HiveConf.ConfVars.REPLCMDIR.varname)); - ReplChangeManager.hiveConf = hiveConf; + ReplChangeManager.cmroot = new Path(MetastoreConf.getVar(conf, ConfVars.REPLCMDIR)); + ReplChangeManager.conf = conf; - fs = cmroot.getFileSystem(hiveConf); + fs = cmroot.getFileSystem(conf); // Create cmroot with permission 700 if not exist if (!fs.exists(cmroot)) { fs.mkdirs(cmroot); @@ -132,7 +132,7 @@ int recycle(Path path, RecycleType type, boolean ifPurge) throws MetaException { } } else { String fileCheckSum = checksumFor(path, fs); - Path cmPath = getCMPath(hiveConf, fileCheckSum); + Path cmPath = getCMPath(conf, fileCheckSum); // set timestamp before moving to cmroot, so we can // avoid race condition CM remove the file before setting @@ -164,7 +164,7 @@ int recycle(Path path, RecycleType type, boolean ifPurge) throws MetaException { // It is possible to have a file with same checksum in cmPath but the content is // partially copied or corrupted. In this case, just overwrite the existing file with // new one. - success = FileUtils.copy(fs, path, fs, cmPath, false, true, hiveConf); + success = FileUtils.copy(fs, path, fs, cmPath, false, true, conf); break; } default: @@ -257,26 +257,26 @@ static Path getCMPath(Configuration conf, String checkSum) throws IOException, M * matches, return the file; otherwise, use chksumString to retrieve it from cmroot * @param src Original file location * @param checksumString Checksum of the original file - * @param hiveConf + * @param conf * @return Corresponding FileStatus object */ static public FileStatus getFileStatus(Path src, String checksumString, - HiveConf hiveConf) throws MetaException { + Configuration conf) throws MetaException { try { - FileSystem srcFs = src.getFileSystem(hiveConf); + FileSystem srcFs = src.getFileSystem(conf); if (checksumString == null) { return srcFs.getFileStatus(src); } if (!srcFs.exists(src)) { - return srcFs.getFileStatus(getCMPath(hiveConf, checksumString)); + return srcFs.getFileStatus(getCMPath(conf, checksumString)); } String currentChecksumString = checksumFor(src, srcFs); if (currentChecksumString == null || checksumString.equals(currentChecksumString)) { return srcFs.getFileStatus(src); } else { - return srcFs.getFileStatus(getCMPath(hiveConf, checksumString)); + return srcFs.getFileStatus(getCMPath(conf, checksumString)); } } catch (IOException e) { throw new MetaException(StringUtils.stringifyException(e)); @@ -325,12 +325,12 @@ public static boolean isCMFileUri(Path fromPath, FileSystem srcFs) { static class CMClearer implements Runnable { private Path cmroot; private long secRetain; - private HiveConf hiveConf; + private Configuration conf; - CMClearer(String cmrootString, long secRetain, HiveConf hiveConf) { + CMClearer(String cmrootString, long secRetain, Configuration conf) { this.cmroot = new Path(cmrootString); this.secRetain = secRetain; - this.hiveConf = hiveConf; + this.conf = conf; } @Override @@ -339,7 +339,7 @@ public void run() { LOG.info("CMClearer started"); long now = System.currentTimeMillis(); - FileSystem fs = cmroot.getFileSystem(hiveConf); + FileSystem fs = cmroot.getFileSystem(conf); FileStatus[] files = fs.listStatus(cmroot); for (FileStatus file : files) { @@ -347,7 +347,7 @@ public void run() { if (now - modifiedTime > secRetain*1000) { try { if (fs.getXAttrs(file.getPath()).containsKey(REMAIN_IN_TRASH_TAG)) { - boolean succ = Trash.moveToAppropriateTrash(fs, file.getPath(), hiveConf); + boolean succ = Trash.moveToAppropriateTrash(fs, file.getPath(), conf); if (succ) { if (LOG.isDebugEnabled()) { LOG.debug("Move " + file.toString() + " to trash"); @@ -377,16 +377,16 @@ public void run() { } // Schedule CMClearer thread. Will be invoked by metastore - static void scheduleCMClearer(HiveConf hiveConf) { - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.REPLCMENABLED)) { + static void scheduleCMClearer(Configuration conf) { + if (MetastoreConf.getBoolVar(conf, ConfVars.REPLCMENABLED)) { ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor( new BasicThreadFactory.Builder() .namingPattern("cmclearer-%d") .daemon(true) .build()); - executor.scheduleAtFixedRate(new CMClearer(hiveConf.get(HiveConf.ConfVars.REPLCMDIR.varname), - hiveConf.getTimeVar(ConfVars.REPLCMRETIAN, TimeUnit.SECONDS), hiveConf), - 0, hiveConf.getTimeVar(ConfVars.REPLCMINTERVAL, TimeUnit.SECONDS), TimeUnit.SECONDS); + executor.scheduleAtFixedRate(new CMClearer(MetastoreConf.getVar(conf, ConfVars.REPLCMDIR), + MetastoreConf.getTimeVar(conf, ConfVars.REPLCMRETIAN, TimeUnit.SECONDS), conf), + 0, MetastoreConf.getTimeVar(conf, ConfVars.REPLCMINTERVAL, TimeUnit.SECONDS), TimeUnit.SECONDS); } } } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java similarity index 88% rename from metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java index 1dd50ded6c..649437facb 100755 --- metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,9 +18,6 @@ package org.apache.hadoop.hive.metastore; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DATABASE_WAREHOUSE_SUFFIX; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; - import java.io.FileNotFoundException; import java.io.IOException; import java.util.AbstractList; @@ -33,7 +30,12 @@ import java.util.regex.Pattern; import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.utils.FileUtils; import org.apache.hadoop.hive.metastore.utils.HdfsUtils; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -42,10 +44,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.common.HiveStatsUtils; -import org.apache.hadoop.hive.common.JavaUtils; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.ReplChangeManager.RecycleType; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -59,6 +57,11 @@ * This class represents a warehouse where data of Hive tables is stored */ public class Warehouse { + public static final String DEFAULT_DATABASE_NAME = "default"; + public static final String DEFAULT_DATABASE_COMMENT = "Default Hive database"; + public static final String DEFAULT_SERIALIZATION_FORMAT = "1"; + public static final String DATABASE_WAREHOUSE_SUFFIX = ".db"; + private Path whRoot; private final Configuration conf; private final String whRootString; @@ -71,21 +74,19 @@ public Warehouse(Configuration conf) throws MetaException { this.conf = conf; - whRootString = HiveConf.getVar(conf, HiveConf.ConfVars.METASTOREWAREHOUSE); + whRootString = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE); if (StringUtils.isBlank(whRootString)) { - throw new MetaException(HiveConf.ConfVars.METASTOREWAREHOUSE.varname + throw new MetaException(ConfVars.WAREHOUSE.varname + " is not set in the config or blank"); } fsHandler = getMetaStoreFsHandler(conf); - cm = ReplChangeManager.getInstance((HiveConf)conf); - storageAuthCheck = HiveConf.getBoolVar(conf, - HiveConf.ConfVars.METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS); + cm = ReplChangeManager.getInstance(conf); + storageAuthCheck = MetastoreConf.getBoolVar(conf, ConfVars.AUTHORIZATION_STORAGE_AUTH_CHECKS); } private MetaStoreFS getMetaStoreFsHandler(Configuration conf) throws MetaException { - String handlerClassStr = HiveConf.getVar(conf, - HiveConf.ConfVars.HIVE_METASTORE_FS_HANDLER_CLS); + String handlerClassStr = MetastoreConf.getVar(conf, ConfVars.FS_HANDLER_CLS); try { Class handlerClass = (Class) Class .forName(handlerClassStr, true, JavaUtils.getClassLoader()); @@ -187,10 +188,10 @@ public static String getQualifiedName(Partition partition) { } public boolean mkdirs(Path f) throws MetaException { - FileSystem fs = null; + FileSystem fs; try { fs = getFs(f); - return FileUtils.mkdir(fs, f, conf); + return FileUtils.mkdir(fs, f); } catch (IOException e) { MetaStoreUtils.logAndThrowMetaException(e); } @@ -205,7 +206,7 @@ public boolean renameDir(Path sourcePath, Path destPath, boolean needCmRecycle) cm.recycle(sourcePath, RecycleType.COPY, true); } FileSystem fs = getFs(sourcePath); - return FileUtils.rename(fs, sourcePath, destPath, conf); + return FileUtils.rename(fs, sourcePath, destPath); } catch (Exception ex) { MetaStoreUtils.logAndThrowMetaException(ex); } @@ -267,38 +268,11 @@ public boolean isWritable(Path path) throws IOException { } } - /* - // NOTE: This is for generating the internal path name for partitions. Users - // should always use the MetaStore API to get the path name for a partition. - // Users should not directly take partition values and turn it into a path - // name by themselves, because the logic below may change in the future. - // - // In the future, it's OK to add new chars to the escape list, and old data - // won't be corrupt, because the full path name in metastore is stored. - // In that case, Hive will continue to read the old data, but when it creates - // new partitions, it will use new names. - static BitSet charToEscape = new BitSet(128); - static { - for (char c = 0; c < ' '; c++) { - charToEscape.set(c); - } - char[] clist = new char[] { '"', '#', '%', '\'', '*', '/', ':', '=', '?', - '\\', '\u00FF' }; - for (char c : clist) { - charToEscape.set(c); - } - } - - static boolean needsEscaping(char c) { - return c >= 0 && c < charToEscape.size() && charToEscape.get(c); - } - */ - - static String escapePathName(String path) { + private static String escapePathName(String path) { return FileUtils.escapePathName(path); } - static String unescapePathName(String path) { + private static String unescapePathName(String path) { return FileUtils.unescapePathName(path); } @@ -402,13 +376,13 @@ public static String makeDynamicPartName(Map spec) { if (name == null || name.isEmpty()) { throw new MetaException("Partition name is invalid. " + name); } - LinkedHashMap partSpec = new LinkedHashMap(); + LinkedHashMap partSpec = new LinkedHashMap<>(); makeSpecFromName(partSpec, new Path(name)); return partSpec; } public static void makeSpecFromName(Map partSpec, Path currPath) { - List kvs = new ArrayList(); + List kvs = new ArrayList<>(); do { String component = currPath.getName(); Matcher m = pat.matcher(component); @@ -434,11 +408,11 @@ public static void makeSpecFromName(Map partSpec, Path currPath) if (name == null || name.isEmpty()) { throw new MetaException("Partition name is invalid. " + name); } - LinkedHashMap partSpec = new LinkedHashMap(); + LinkedHashMap partSpec = new LinkedHashMap<>(); Path currPath = new Path(name); - List kvs = new ArrayList(); + List kvs = new ArrayList<>(); do { String component = currPath.getName(); Matcher m = pat.matcher(component); @@ -523,7 +497,7 @@ public Path getPartitionPath(Database db, Table table, List vals) } public boolean isDir(Path f) throws MetaException { - FileSystem fs = null; + FileSystem fs; try { fs = getFs(f); FileStatus fstatus = fs.getFileStatus(f); @@ -563,7 +537,7 @@ public static String makePartName(List partCols, try { Path path = new Path(location); FileSystem fileSys = path.getFileSystem(conf); - return HiveStatsUtils.getFileStatusRecurse(path, -1, fileSys); + return FileUtils.getFileStatusRecurse(path, -1, fileSys); } catch (IOException ioe) { MetaStoreUtils.logAndThrowMetaException(ioe); } @@ -571,7 +545,8 @@ public static String makePartName(List partCols, } /** - * @param table + * @param db database + * @param table table * @return array of FileStatus objects corresponding to the files making up the passed * unpartitioned table */ @@ -580,7 +555,7 @@ public static String makePartName(List partCols, Path tablePath = getDnsPath(new Path(table.getSd().getLocation())); try { FileSystem fileSys = tablePath.getFileSystem(conf); - return HiveStatsUtils.getFileStatusRecurse(tablePath, -1, fileSys); + return FileUtils.getFileStatusRecurse(tablePath, -1, fileSys); } catch (IOException ioe) { MetaStoreUtils.logAndThrowMetaException(ioe); } @@ -609,7 +584,7 @@ public static String makePartName(List partCols, } throw new MetaException(errorStr + "]"); } - List colNames = new ArrayList(); + List colNames = new ArrayList<>(); for (FieldSchema col: partCols) { colNames.add(col.getName()); } @@ -619,14 +594,14 @@ public static String makePartName(List partCols, public static List getPartValuesFromPartName(String partName) throws MetaException { LinkedHashMap partSpec = Warehouse.makeSpecFromName(partName); - List values = new ArrayList(); + List values = new ArrayList<>(); values.addAll(partSpec.values()); return values; } public static Map makeSpecFromValues(List partCols, List values) { - Map spec = new LinkedHashMap(); + Map spec = new LinkedHashMap<>(); for (int i = 0; i < values.size(); i++) { spec.put(partCols.get(i).getName(), values.get(i)); } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index 0fb878a267..5b7e6e192f 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -534,6 +534,14 @@ public static ConfVars getMetaConf(String name) { "Inteval for cmroot cleanup thread."), REPLCMENABLED("metastore.repl.cm.enabled", "hive.repl.cm.enabled", false, "Turn on ChangeManager, so delete files will go to cmrootdir."), + REPL_COPYFILE_MAXNUMFILES("metastore.repl..copyfile.maxnumfiles", + "hive.exec.copyfile.maxnumfiles", 1L, + "Maximum number of files Hive uses to do sequential HDFS copies between directories." + + "Distributed copies (distcp) will be used instead for larger numbers of files so that copies can be done faster."), + REPL_COPYFILE_MAXSIZE("metastore.repl.copyfile.maxsize", + "hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/, + "Maximum file size (in bytes) that Hive uses to do single HDFS copies between directories." + + "Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."), SCHEMA_INFO_CLASS("metastore.schema.info.class", "hive.metastore.schema.info.class", "org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo", "Fully qualified class name for the metastore schema information class \n" diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java index 2310df6c82..da0ee80a9e 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java @@ -18,29 +18,47 @@ package org.apache.hadoop.hive.metastore.utils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.Trash; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.Collections; +import java.util.List; public class FileUtils { private static final Logger LOG = LoggerFactory.getLogger(FileUtils.class); + public static final PathFilter HIDDEN_FILES_PATH_FILTER = new PathFilter() { + @Override + public boolean accept(Path p) { + String name = p.getName(); + return !name.startsWith("_") && !name.startsWith("."); + } + }; + /** * Move a particular file or directory to the trash. * @param fs FileSystem to use * @param f path of file or directory to move to trash. - * @param conf + * @param conf configuration object * @return true if move successful * @throws IOException */ public static boolean moveToTrash(FileSystem fs, Path f, Configuration conf, boolean purge) throws IOException { LOG.debug("deleting " + f); - boolean result = false; + boolean result; try { if(purge) { LOG.debug("purge is set to true. Not moving to Trash " + f); @@ -63,4 +81,286 @@ public static boolean moveToTrash(FileSystem fs, Path f, Configuration conf, boo } return result; } + + /** + * Copies files between filesystems. + */ + public static boolean copy(FileSystem srcFS, Path src, + FileSystem dstFS, Path dst, + boolean deleteSource, + boolean overwrite, + Configuration conf) throws IOException { + boolean copied = false; + boolean triedDistcp = false; + + /* Run distcp if source file/dir is too big */ + if (srcFS.getUri().getScheme().equals("hdfs")) { + ContentSummary srcContentSummary = srcFS.getContentSummary(src); + if (srcContentSummary.getFileCount() > + MetastoreConf.getLongVar(conf, ConfVars.REPL_COPYFILE_MAXNUMFILES) + && srcContentSummary.getLength() > + MetastoreConf.getLongVar(conf,ConfVars.REPL_COPYFILE_MAXSIZE)) { + + LOG.info("Source is " + srcContentSummary.getLength() + " bytes. (MAX: " + + MetastoreConf.getLongVar(conf, ConfVars.REPL_COPYFILE_MAXSIZE) + ")"); + LOG.info("Source is " + srcContentSummary.getFileCount() + " files. (MAX: " + + MetastoreConf.getLongVar(conf, ConfVars.REPL_COPYFILE_MAXNUMFILES) + ")"); + LOG.info("Launch distributed copy (distcp) job."); + triedDistcp = true; + copied = distCp(srcFS, Collections.singletonList(src), dst, deleteSource, null, conf); + } + } + if (!triedDistcp) { + // Note : Currently, this implementation does not "fall back" to regular copy if distcp + // is tried and it fails. We depend upon that behaviour in cases like replication, + // wherein if distcp fails, there is good reason to not plod along with a trivial + // implementation, and fail instead. + copied = FileUtil.copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf); + } + return copied; + } + + private static boolean distCp(FileSystem srcFS, List srcPaths, Path dst, + boolean deleteSource, String doAsUser, + Configuration conf) throws IOException { + boolean copied; + if (doAsUser == null){ + copied = HdfsUtils.runDistCp(srcPaths, dst, conf); + } else { + copied = HdfsUtils.runDistCpAs(srcPaths, dst, conf, doAsUser); + } + if (copied && deleteSource) { + for (Path path : srcPaths) { + srcFS.delete(path, true); + } + } + return copied; + } + + /** + * Creates the directory and all necessary parent directories. + * @param fs FileSystem to use + * @param f path to create. + * @return true if directory created successfully. False otherwise, including if it exists. + * @throws IOException exception in creating the directory + */ + public static boolean mkdir(FileSystem fs, Path f) throws IOException { + LOG.info("Creating directory if it doesn't exist: " + f); + return fs.mkdirs(f); + } + + /** + * Rename a file. Unlike {@link FileSystem#rename(Path, Path)}, if the destPath already exists + * and is a directory, this will NOT move the sourcePath into it. It will throw an IOException + * instead. + * @param fs file system paths are on + * @param sourcePath source file or directory to move + * @param destPath destination file name. This must be a file and not an existing directory. + * @return result of fs.rename. + * @throws IOException if fs.rename throws it, or if destPath already exists. + */ + public static boolean rename(FileSystem fs, Path sourcePath, Path destPath) throws IOException { + LOG.info("Renaming " + sourcePath + " to " + destPath); + + // If destPath directory exists, rename call will move the sourcePath + // into destPath without failing. So check it before renaming. + if (fs.exists(destPath)) { + throw new IOException("Cannot rename the source path. The destination " + + "path already exists."); + } + return fs.rename(sourcePath, destPath); + } + + // NOTE: This is for generating the internal path name for partitions. Users + // should always use the MetaStore API to get the path name for a partition. + // Users should not directly take partition values and turn it into a path + // name by themselves, because the logic below may change in the future. + // + // In the future, it's OK to add new chars to the escape list, and old data + // won't be corrupt, because the full path name in metastore is stored. + // In that case, Hive will continue to read the old data, but when it creates + // new partitions, it will use new names. + // edit : There are some use cases for which adding new chars does not seem + // to be backward compatible - Eg. if partition was created with name having + // a special char that you want to start escaping, and then you try dropping + // the partition with a hive version that now escapes the special char using + // the list below, then the drop partition fails to work. + + private static BitSet charToEscape = new BitSet(128); + static { + for (char c = 0; c < ' '; c++) { + charToEscape.set(c); + } + + /* + * ASCII 01-1F are HTTP control characters that need to be escaped. + * \u000A and \u000D are \n and \r, respectively. + */ + char[] clist = new char[] {'\u0001', '\u0002', '\u0003', '\u0004', + '\u0005', '\u0006', '\u0007', '\u0008', '\u0009', '\n', '\u000B', + '\u000C', '\r', '\u000E', '\u000F', '\u0010', '\u0011', '\u0012', + '\u0013', '\u0014', '\u0015', '\u0016', '\u0017', '\u0018', '\u0019', + '\u001A', '\u001B', '\u001C', '\u001D', '\u001E', '\u001F', + '"', '#', '%', '\'', '*', '/', ':', '=', '?', '\\', '\u007F', '{', + '[', ']', '^'}; + + for (char c : clist) { + charToEscape.set(c); + } + } + + private static boolean needsEscaping(char c) { + return c >= 0 && c < charToEscape.size() && charToEscape.get(c); + } + + public static String escapePathName(String path) { + return escapePathName(path, null); + } + + /** + * Escapes a path name. + * @param path The path to escape. + * @param defaultPath + * The default name for the path, if the given path is empty or null. + * @return An escaped path name. + */ + public static String escapePathName(String path, String defaultPath) { + + // __HIVE_DEFAULT_NULL__ is the system default value for null and empty string. + // TODO: we should allow user to specify default partition or HDFS file location. + if (path == null || path.length() == 0) { + if (defaultPath == null) { + //previously, when path is empty or null and no default path is specified, + // __HIVE_DEFAULT_PARTITION__ was the return value for escapePathName + return "__HIVE_DEFAULT_PARTITION__"; + } else { + return defaultPath; + } + } + + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < path.length(); i++) { + char c = path.charAt(i); + if (needsEscaping(c)) { + sb.append('%'); + sb.append(String.format("%1$02X", (int) c)); + } else { + sb.append(c); + } + } + return sb.toString(); + } + + public static String unescapePathName(String path) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < path.length(); i++) { + char c = path.charAt(i); + if (c == '%' && i + 2 < path.length()) { + int code = -1; + try { + code = Integer.parseInt(path.substring(i + 1, i + 3), 16); + } catch (Exception e) { + code = -1; + } + if (code >= 0) { + sb.append((char) code); + i += 2; + continue; + } + } + sb.append(c); + } + return sb.toString(); + } + + /** + * Get all file status from a root path and recursively go deep into certain levels. + * + * @param path + * the root path + * @param level + * the depth of directory to explore + * @param fs + * the file system + * @return array of FileStatus + * @throws IOException + */ + public static FileStatus[] getFileStatusRecurse(Path path, int level, FileSystem fs) + throws IOException { + + // if level is <0, the return all files/directories under the specified path + if ( level < 0) { + List result = new ArrayList<>(); + try { + FileStatus fileStatus = fs.getFileStatus(path); + FileUtils.listStatusRecursively(fs, fileStatus, result); + } catch (IOException e) { + // globStatus() API returns empty FileStatus[] when the specified path + // does not exist. But getFileStatus() throw IOException. To mimic the + // similar behavior we will return empty array on exception. For external + // tables, the path of the table will not exists during table creation + return new FileStatus[0]; + } + return result.toArray(new FileStatus[result.size()]); + } + + // construct a path pattern (e.g., /*/*) to find all dynamically generated paths + StringBuilder sb = new StringBuilder(path.toUri().getPath()); + for (int i = 0; i < level; i++) { + sb.append(Path.SEPARATOR).append("*"); + } + Path pathPattern = new Path(path, sb.toString()); + return fs.globStatus(pathPattern, FileUtils.HIDDEN_FILES_PATH_FILTER); + } + + /** + * Recursively lists status for all files starting from a particular directory (or individual file + * as base case). + * + * @param fs + * file system + * + * @param fileStatus + * starting point in file system + * + * @param results + * receives enumeration of all files found + */ + public static void listStatusRecursively(FileSystem fs, FileStatus fileStatus, + List results) throws IOException { + + if (fileStatus.isDir()) { + for (FileStatus stat : fs.listStatus(fileStatus.getPath(), HIDDEN_FILES_PATH_FILTER)) { + listStatusRecursively(fs, stat, results); + } + } else { + results.add(fileStatus); + } + } + + public static String makePartName(List partCols, List vals) { + return makePartName(partCols, vals, null); + } + + /** + * Makes a valid partition name. + * @param partCols The partition keys' names + * @param vals The partition values + * @param defaultStr + * The default name given to a partition value if the respective value is empty or null. + * @return An escaped, valid partition name. + */ + public static String makePartName(List partCols, List vals, + String defaultStr) { + StringBuilder name = new StringBuilder(); + for (int i = 0; i < partCols.size(); i++) { + if (i > 0) { + name.append(Path.SEPARATOR); + } + name.append(escapePathName((partCols.get(i)).toLowerCase(), defaultStr)); + name.append('='); + name.append(escapePathName(vals.get(i), defaultStr)); + } + return name.toString(); + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java index 7588c9f119..c10e36f94a 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java @@ -27,6 +27,8 @@ import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.tools.DistCp; +import org.apache.hadoop.tools.DistCpOptions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,9 +36,14 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; public class HdfsUtils { private static final Logger LOG = LoggerFactory.getLogger(HdfsUtils.class); + private static final String DISTCP_OPTIONS_PREFIX = "distcp.options."; /** * Check the permissions on a file. @@ -122,4 +129,72 @@ private static boolean arrayContains(String[] array, String value) { return false; } + public static boolean runDistCpAs(List srcPaths, Path dst, Configuration conf, + String doAsUser) throws IOException { + UserGroupInformation proxyUser = UserGroupInformation.createProxyUser( + doAsUser, UserGroupInformation.getLoginUser()); + try { + return proxyUser.doAs(new PrivilegedExceptionAction() { + @Override + public Boolean run() throws Exception { + return runDistCp(srcPaths, dst, conf); + } + }); + } catch (InterruptedException e) { + throw new IOException(e); + } + } + + public static boolean runDistCp(List srcPaths, Path dst, Configuration conf) + throws IOException { + DistCpOptions options = new DistCpOptions(srcPaths, dst); + options.setSyncFolder(true); + options.setSkipCRC(true); + options.preserve(DistCpOptions.FileAttribute.BLOCKSIZE); + + // Creates the command-line parameters for distcp + List params = constructDistCpParams(srcPaths, dst, conf); + + try { + conf.setBoolean("mapred.mapper.new-api", true); + DistCp distcp = new DistCp(conf, options); + + // HIVE-13704 states that we should use run() instead of execute() due to a hadoop known issue + // added by HADOOP-10459 + if (distcp.run(params.toArray(new String[params.size()])) == 0) { + return true; + } else { + return false; + } + } catch (Exception e) { + throw new IOException("Cannot execute DistCp process: " + e, e); + } finally { + conf.setBoolean("mapred.mapper.new-api", false); + } + } + + private static List constructDistCpParams(List srcPaths, Path dst, + Configuration conf) { + List params = new ArrayList<>(); + for (Map.Entry entry : conf.getPropsWithPrefix(DISTCP_OPTIONS_PREFIX).entrySet()){ + String distCpOption = entry.getKey(); + String distCpVal = entry.getValue(); + params.add("-" + distCpOption); + if ((distCpVal != null) && (!distCpVal.isEmpty())){ + params.add(distCpVal); + } + } + if (params.size() == 0){ + // if no entries were added via conf, we initiate our defaults + params.add("-update"); + params.add("-skipcrccheck"); + params.add("-pb"); + } + for (Path src : srcPaths) { + params.add(src.toString()); + } + params.add(dst.toString()); + return params; + } + } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java index 3ef7e514fd..37fc56bda8 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java @@ -38,4 +38,19 @@ public static void logAndThrowMetaException(Exception e) throws MetaException { throw new MetaException(exInfo); } + public static String encodeTableName(String name) { + // The encoding method is simple, e.g., replace + // all the special characters with the corresponding number in ASCII. + // Note that unicode is not supported in table names. And we have explicit + // checks for it. + StringBuilder sb = new StringBuilder(); + for (char ch : name.toCharArray()) { + if (Character.isLetterOrDigit(ch) || ch == '_') { + sb.append(ch); + } else { + sb.append('-').append((int) ch).append('-'); + } + } + return sb.toString(); + } }