diff --git bin/ext/metatool.sh bin/ext/metatool.sh new file mode 100755 index 0000000..3befb1f --- /dev/null +++ bin/ext/metatool.sh @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +THISSERVICE=metatool +export SERVICE_LIST="${SERVICE_LIST}${THISSERVICE} " + +metatool () { + + CLASS=org.apache.hadoop.hive.metastore.tools.HiveMetaTool + execHiveCmd $CLASS "$@" +} + +metatool_help () { + CLASS=org.apache.hadoop.hive.metastore.tools.HiveMetaTool + execHiveCmd $CLASS "--help" +} + diff --git bin/metatool bin/metatool new file mode 100755 index 0000000..df85300 --- /dev/null +++ bin/metatool @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +hive --service metatool "$@" diff --git build.xml build.xml index 6712af9..1d378bc 100644 --- build.xml +++ build.xml @@ -419,6 +419,7 @@ + diff --git conf/hive-default.xml.template conf/hive-default.xml.template index cc1565f..f97b8a8 100644 --- conf/hive-default.xml.template +++ conf/hive-default.xml.template @@ -222,7 +222,7 @@ datanucleus.identifierFactory datanucleus - Name of the identifier factory to use when generating table/column names etc. 'datanucleus' is used for backward compatibility + Name of the identifier factory to use when generating table/column names etc. 'datanucleus1' is used for backward compatibility diff --git eclipse-templates/TestHiveMetaTool.launchtemplate eclipse-templates/TestHiveMetaTool.launchtemplate new file mode 100644 index 0000000..107d26f --- /dev/null +++ eclipse-templates/TestHiveMetaTool.launchtemplate @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git metastore/ivy.xml metastore/ivy.xml index 3011d2f..1e42550 100644 --- metastore/ivy.xml +++ metastore/ivy.xml @@ -45,7 +45,7 @@ - diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 6f4716c..795cb65 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -3893,6 +3893,184 @@ public class ObjectStore implements RawStore, Configurable { return join(storedVals,','); } + /** The following APIs + * + * - executeJDOQLSelect + * - executeJDOQLUpdate + * - listHDFSRoots + * - upgradeForHA + * + * are used by HiveMetaTool. These APIs **shouldn't** be exposed via Thrift. + * + */ + public void executeJDOQLSelect(String query) { + + boolean committed = false; + + System.out.println("Query to be executed: " + query); + + try { + openTransaction(); + Query q = pm.newQuery(query); + Collection result = (Collection) q.execute(); + committed = commitTransaction(); + + Iterator iter = result.iterator(); + + while (iter.hasNext()) { + System.out.println(iter.next().toString()); + } + + + } finally { + if (!committed) { + rollbackTransaction(); + } + } + + } + + + public void executeJDOQLUpdate(String query) { + + boolean committed = false; + long numUpdated = 0; + + System.out.println("Query to be executed: " + query); + + try { + openTransaction(); + Query q = pm.newQuery(query); + numUpdated = (Long) q.execute(); + committed = commitTransaction(); + + System.out.println("Num updated: " + numUpdated); + + } finally { + if (!committed) { + rollbackTransaction(); + } + } + + } + + public void listHDFSRoots() { + + boolean committed = false; + + Query query = pm.newQuery(MDatabase.class); + + List mDBs = (List) query.execute(); + pm.retrieveAll(mDBs); + + Iterator iter = mDBs.iterator(); + + System.out.println("HDFS root locations:"); + + while (iter.hasNext()) { + + MDatabase mDB = (MDatabase) iter.next(); + System.out.println(mDB.getLocationUri()); + } + + } + + public void upgradeForHA(String oldLoc, String newLoc) + { + + boolean committed = false; + + oldLoc = oldLoc.toLowerCase(); + newLoc = newLoc.toLowerCase(); + + System.out.println("Old HDFS root location: " + oldLoc + " New HDFS root location: " + newLoc); + System.out.println("Performing HA upgrade..."); + + int count = 0; + + try { + + openTransaction(); + + // upgrade locationURI in mDatabase + Query query = pm.newQuery(MDatabase.class, "locationUri == oldLoc"); + query.declareParameters("java.lang.String oldLoc"); + + List mDBs = (List) query.execute(oldLoc); + pm.retrieveAll(mDBs); + + Iterator iter = mDBs.iterator(); + + while (iter.hasNext()) { + + MDatabase mDB = (MDatabase) iter.next(); + mDB.setLocationUri(newLoc); + count++; + } + + // upgrade location in mStorageDescriptor + query = pm.newQuery(MStorageDescriptor.class); + + List mSDSs = (List) query.execute(); + pm.retrieveAll(mSDSs); + + iter = mSDSs.iterator(); + + while (iter.hasNext()) { + + MStorageDescriptor mSDS = (MStorageDescriptor) iter.next(); + + if (mSDS.getLocation().startsWith(oldLoc)) { + String tblLoc = mSDS.getLocation().replaceAll(oldLoc, newLoc); + mSDS.setLocation(tblLoc); + count++; + } + + } + + // upgrade schema.url for avro serde + + query = pm.newQuery(MSerDeInfo.class); + + List mSerdes = (List) query.execute(); + pm.retrieveAll(mSerdes); + + iter = mSerdes.iterator(); + + String key = new String("schema.url"); + + while (iter.hasNext()) { + + MSerDeInfo mSerde = (MSerDeInfo) iter.next(); + + String schemaLoc = mSerde.getParameters().get(key); + + if (schemaLoc != null) { + + if (schemaLoc.startsWith(oldLoc)) { + mSerde.getParameters().put(key, schemaLoc.replaceAll(oldLoc, newLoc)); + count++; + } + } + } + + committed = commitTransaction(); + + } finally { + if (!committed) { + rollbackTransaction(); + System.out.println("HA upgrade failed"); + } else { + if (count > 0) { + System.out.println("HA upgrade successful"); + System.out.println("Num entries updated: " + count); + } else { + System.out.println("No entries found to update"); + } + } + } + } + @Override public long cleanupEvents() { boolean commited = false; diff --git metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java new file mode 100644 index 0000000..8040981 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java @@ -0,0 +1,143 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.tools; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.GnuParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.OptionBuilder; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.ObjectStore; + +/** + * This class provides Hive admins a tool to + * - execute JDOQL against the metastore database using data nucleus + * - perform HA name node upgrade + */ + +public class HiveMetaTool { + + private static final Log LOG = LogFactory.getLog(HiveMetaTool.class.getName()); + private final Options cmdLineOptions = new Options(); + + public HiveMetaTool() { + } + + @SuppressWarnings("static-access") + private void init() { + + System.out.println("Initializing HiveMetaTool.."); + + Option help = new Option("help", "prints this message"); + Option listHDFSRoot = new Option("ListHDFSRoot", "prints the current HDFS root location"); + Option executeJDOQL = + OptionBuilder.withArgName("query-string") + .hasArgs() + .withDescription("executes the given JDOQL query") + .create("ExecuteJDOQL"); + + Option HAUpgrade = + OptionBuilder.withArgName("new-loc " + " old-loc") + .hasArgs(2) + .withDescription("upgrades HA root to new location") + .create("HAUpgrade"); + + cmdLineOptions.addOption(help); + cmdLineOptions.addOption(listHDFSRoot); + cmdLineOptions.addOption(executeJDOQL); + cmdLineOptions.addOption(HAUpgrade); + + } + + public static void main(String[] args) throws Exception { + int ret = run(args); + //System.exit(ret); + } + + private static int run(String[] args) throws Exception { + + HiveConf hiveConf = new HiveConf(HiveMetaTool.class); + + HiveMetaTool metaTool = new HiveMetaTool(); + ObjectStore objStore = null; + metaTool.init(); + + CommandLineParser parser = new GnuParser(); + + try { + + CommandLine line = parser.parse(metaTool.cmdLineOptions, args); + + if (line.hasOption("help")) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("metatool", metaTool.cmdLineOptions); + } else if (line.hasOption("ListHDFSRoot")) { + objStore = new ObjectStore(); + objStore.setConf(hiveConf); + objStore.listHDFSRoots(); + + } else if (line.hasOption("ExecuteJDOQL")) { + String query = line.getOptionValue("ExecuteJDOQL"); + + objStore = new ObjectStore(); + objStore.setConf(hiveConf); + + if (query.toLowerCase().trim().startsWith("select")) { + objStore.executeJDOQLSelect(query); + } else if (query.toLowerCase().trim().startsWith("update")) { + objStore.executeJDOQLUpdate(query); + } else { + throw new Exception("Unsupported statement type"); + } + + } else if (line.hasOption("HAUpgrade")) { + String[] loc = line.getOptionValues("HAUpgrade"); + + if (loc.length !=2) { + throw new Exception("HAUpgrade takes in 2 arguements but was passed " + + loc.length + " arguements"); + } + + objStore = new ObjectStore(); + objStore.setConf(hiveConf); + + objStore.upgradeForHA(loc[1], loc[0]); + + } + else { + throw new Exception("Invalid option"); + } + + } catch (ParseException e) { + System.err.println("Parsing failed. Reason: " + e.getMessage()); + } finally { + if (objStore != null) { + System.out.println("HiveMetaTool shutdown.."); + objStore.shutdown(); + } + } + return 1; + } +} diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 5ec7a67..1c3f962 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -1558,166 +1558,167 @@ public abstract class TestHiveMetaStore extends TestCase { } } - /** - * Tests for list partition by filter functionality. - * @throws Exception - */ public void testPartitionFilter() throws Exception { - String dbName = "filterdb"; - String tblName = "filtertbl"; - - List vals = new ArrayList(3); - vals.add("p11"); - vals.add("p21"); - vals.add("p31"); - List vals2 = new ArrayList(3); - vals2.add("p11"); - vals2.add("p22"); - vals2.add("p31"); - List vals3 = new ArrayList(3); - vals3.add("p12"); - vals3.add("p21"); - vals3.add("p31"); - List vals4 = new ArrayList(3); - vals4.add("p12"); - vals4.add("p23"); - vals4.add("p31"); - List vals5 = new ArrayList(3); - vals5.add("p13"); - vals5.add("p24"); - vals5.add("p31"); - List vals6 = new ArrayList(3); - vals6.add("p13"); - vals6.add("p25"); - vals6.add("p31"); + String dbName = "filterdb"; + String tblName = "filtertbl"; + + List vals = new ArrayList(3); + vals.add("p11"); + vals.add("p21"); + vals.add("p31"); + List vals2 = new ArrayList(3); + vals2.add("p11"); + vals2.add("p22"); + vals2.add("p31"); + List vals3 = new ArrayList(3); + vals3.add("p12"); + vals3.add("p21"); + vals3.add("p31"); + List vals4 = new ArrayList(3); + vals4.add("p12"); + vals4.add("p23"); + vals4.add("p31"); + List vals5 = new ArrayList(3); + vals5.add("p13"); + vals5.add("p24"); + vals5.add("p31"); + List vals6 = new ArrayList(3); + vals6.add("p13"); + vals6.add("p25"); + vals6.add("p31"); + + silentDropDatabase(dbName); - silentDropDatabase(dbName); - - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); - - ArrayList cols = new ArrayList(2); - cols.add(new FieldSchema("c1", Constants.STRING_TYPE_NAME, "")); - cols.add(new FieldSchema("c2", Constants.INT_TYPE_NAME, "")); - - ArrayList partCols = new ArrayList(3); - partCols.add(new FieldSchema("p1", Constants.STRING_TYPE_NAME, "")); - partCols.add(new FieldSchema("p2", Constants.STRING_TYPE_NAME, "")); - partCols.add(new FieldSchema("p3", Constants.INT_TYPE_NAME, "")); + Database db = new Database(); + db.setName(dbName); + client.createDatabase(db); - Table tbl = new Table(); - tbl.setDbName(dbName); - tbl.setTableName(tblName); - StorageDescriptor sd = new StorageDescriptor(); - tbl.setSd(sd); - sd.setCols(cols); - sd.setCompressed(false); - sd.setNumBuckets(1); - sd.setParameters(new HashMap()); - sd.setBucketCols(new ArrayList()); - sd.setSerdeInfo(new SerDeInfo()); - sd.getSerdeInfo().setName(tbl.getTableName()); - sd.getSerdeInfo().setParameters(new HashMap()); - sd.getSerdeInfo().getParameters() - .put(Constants.SERIALIZATION_FORMAT, "1"); - sd.setSortCols(new ArrayList()); + ArrayList cols = new ArrayList(2); + cols.add(new FieldSchema("c1", Constants.STRING_TYPE_NAME, "")); + cols.add(new FieldSchema("c2", Constants.INT_TYPE_NAME, "")); - tbl.setPartitionKeys(partCols); - client.createTable(tbl); + ArrayList partCols = new ArrayList(3); + partCols.add(new FieldSchema("p1", Constants.STRING_TYPE_NAME, "")); + partCols.add(new FieldSchema("p2", Constants.STRING_TYPE_NAME, "")); + partCols.add(new FieldSchema("p3", Constants.INT_TYPE_NAME, "")); - tbl = client.getTable(dbName, tblName); + Table tbl = new Table(); + tbl.setDbName(dbName); + tbl.setTableName(tblName); + StorageDescriptor sd = new StorageDescriptor(); + tbl.setSd(sd); + sd.setCols(cols); + sd.setCompressed(false); + sd.setNumBuckets(1); + sd.setParameters(new HashMap()); + sd.setBucketCols(new ArrayList()); + sd.setSerdeInfo(new SerDeInfo()); + sd.getSerdeInfo().setName(tbl.getTableName()); + sd.getSerdeInfo().setParameters(new HashMap()); + sd.getSerdeInfo().getParameters() + .put(Constants.SERIALIZATION_FORMAT, "1"); + sd.setSortCols(new ArrayList()); - add_partition(client, tbl, vals, "part1"); - add_partition(client, tbl, vals2, "part2"); - add_partition(client, tbl, vals3, "part3"); - add_partition(client, tbl, vals4, "part4"); - add_partition(client, tbl, vals5, "part5"); - add_partition(client, tbl, vals6, "part6"); + tbl.setPartitionKeys(partCols); + client.createTable(tbl); - checkFilter(client, dbName, tblName, "p1 = \"p11\"", 2); - checkFilter(client, dbName, tblName, "p1 = \"p12\"", 2); - checkFilter(client, dbName, tblName, "p2 = \"p21\"", 2); - checkFilter(client, dbName, tblName, "p2 = \"p23\"", 1); - checkFilter(client, dbName, tblName, "p1 = \"p11\" and p2=\"p22\"", 1); - checkFilter(client, dbName, tblName, "p1 = \"p11\" or p2=\"p23\"", 3); - checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4); - - checkFilter(client, dbName, tblName, - "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\")", 3); - checkFilter(client, dbName, tblName, - "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\") Or " + - "(p1=\"p13\" aNd p2=\"p24\")", 4); - //test for and or precedence - checkFilter(client, dbName, tblName, - "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1); - checkFilter(client, dbName, tblName, - "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2); - - checkFilter(client, dbName, tblName, "p1 > \"p12\"", 2); - checkFilter(client, dbName, tblName, "p1 >= \"p12\"", 4); - checkFilter(client, dbName, tblName, "p1 < \"p12\"", 2); - checkFilter(client, dbName, tblName, "p1 <= \"p12\"", 4); - checkFilter(client, dbName, tblName, "p1 <> \"p12\"", 4); - checkFilter(client, dbName, tblName, "p1 like \"p1.*\"", 6); - checkFilter(client, dbName, tblName, "p2 like \"p.*3\"", 1); - - //Test for setting the maximum partition count - List partitions = client.listPartitionsByFilter(dbName, - tblName, "p1 >= \"p12\"", (short) 2); - assertEquals("User specified row limit for partitions", - 2, partitions.size()); + tbl = client.getTable(dbName, tblName); + + add_partition(client, tbl, vals, "part1"); + add_partition(client, tbl, vals2, "part2"); + add_partition(client, tbl, vals3, "part3"); + add_partition(client, tbl, vals4, "part4"); + add_partition(client, tbl, vals5, "part5"); + add_partition(client, tbl, vals6, "part6"); + + checkFilter(client, dbName, tblName, "p1 = \"p11\"", 2); + checkFilter(client, dbName, tblName, "p1 = \"p12\"", 2); + checkFilter(client, dbName, tblName, "p2 = \"p21\"", 2); + checkFilter(client, dbName, tblName, "p2 = \"p23\"", 1); + checkFilter(client, dbName, tblName, "p1 = \"p11\" and p2=\"p22\"", 1); + checkFilter(client, dbName, tblName, "p1 = \"p11\" or p2=\"p23\"", 3); + checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4); + + checkFilter(client, dbName, tblName, + "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\")", 3); + checkFilter(client, dbName, tblName, + "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\") Or " + + "(p1=\"p13\" aNd p2=\"p24\")", 4); + //test for and or precedence + checkFilter(client, dbName, tblName, + "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1); + checkFilter(client, dbName, tblName, + "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2); + + checkFilter(client, dbName, tblName, "p1 > \"p12\"", 2); + checkFilter(client, dbName, tblName, "p1 >= \"p12\"", 4); + checkFilter(client, dbName, tblName, "p1 < \"p12\"", 2); + checkFilter(client, dbName, tblName, "p1 <= \"p12\"", 4); + checkFilter(client, dbName, tblName, "p1 <> \"p12\"", 4); + checkFilter(client, dbName, tblName, "p1 like \"p1.*\"", 6); + checkFilter(client, dbName, tblName, "p2 like \"p.*3\"", 1); + + //Test for setting the maximum partition count + List partitions = client.listPartitionsByFilter(dbName, + tblName, "p1 >= \"p12\"", (short) 2); + assertEquals("User specified row limit for partitions", + 2, partitions.size()); - //Negative tests - Exception me = null; - try { - client.listPartitionsByFilter(dbName, - tblName, "p3 >= \"p12\"", (short) -1); - } catch(MetaException e) { - me = e; - } - assertNotNull(me); - assertTrue("Filter on int partition key", me.getMessage().contains( - "Filtering is supported only on partition keys of type string")); + //Negative tests + Exception me = null; + try { + client.listPartitionsByFilter(dbName, + tblName, "p3 >= \"p12\"", (short) -1); + } catch(MetaException e) { + me = e; + } + assertNotNull(me); + assertTrue("Filter on int partition key", me.getMessage().contains( + "Filtering is supported only on partition keys of type string")); - me = null; - try { - client.listPartitionsByFilter(dbName, - tblName, "c1 >= \"p12\"", (short) -1); - } catch(MetaException e) { - me = e; - } - assertNotNull(me); - assertTrue("Filter on invalid key", me.getMessage().contains( - " is not a partitioning key for the table")); + me = null; + try { + client.listPartitionsByFilter(dbName, + tblName, "c1 >= \"p12\"", (short) -1); + } catch(MetaException e) { + me = e; + } + assertNotNull(me); + assertTrue("Filter on invalid key", me.getMessage().contains( + " is not a partitioning key for the table")); - me = null; - try { - client.listPartitionsByFilter(dbName, - tblName, "c1 >= ", (short) -1); - } catch(MetaException e) { - me = e; - } - assertNotNull(me); - assertTrue("Invalid filter string", me.getMessage().contains( - "Error parsing partition filter")); + me = null; + try { + client.listPartitionsByFilter(dbName, + tblName, "c1 >= ", (short) -1); + } catch(MetaException e) { + me = e; + } + assertNotNull(me); + assertTrue("Invalid filter string", me.getMessage().contains( + "Error parsing partition filter")); - me = null; - try { - client.listPartitionsByFilter("invDBName", - "invTableName", "p1 = \"p11\"", (short) -1); - } catch(NoSuchObjectException e) { - me = e; - } - assertNotNull(me); - assertTrue("NoSuchObject exception", me.getMessage().contains( - "database/table does not exist")); + me = null; + try { + client.listPartitionsByFilter("invDBName", + "invTableName", "p1 = \"p11\"", (short) -1); + } catch(NoSuchObjectException e) { + me = e; + } + assertNotNull(me); + assertTrue("NoSuchObject exception", me.getMessage().contains( + "database/table does not exist")); - client.dropTable(dbName, tblName); - client.dropDatabase(dbName); + client.dropTable(dbName, tblName); + client.dropDatabase(dbName); } /** + * Tests for list partition by filter functionality. + * @throws Exception + */ + + /** * Test filtering on table with single partition * @throws Exception */ @@ -2288,5 +2289,4 @@ public abstract class TestHiveMetaStore extends TestCase { createPartitions(dbName, tbl, values); } - } diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaTool.java metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaTool.java new file mode 100644 index 0000000..16fbee1 --- /dev/null +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaTool.java @@ -0,0 +1,236 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.io.ByteArrayOutputStream; +import java.io.OutputStream; +import java.io.PrintStream; +import java.util.ArrayList; +import java.util.HashMap; + +import junit.framework.TestCase; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.Type; +import org.apache.hadoop.hive.metastore.tools.HiveMetaTool; +import org.apache.hadoop.hive.serde.Constants; +import org.apache.hadoop.util.StringUtils; + +public class TestHiveMetaTool extends TestCase { + + private HiveMetaStoreClient client; + private Warehouse wh; + private PrintStream originalOut; + private OutputStream os; + private PrintStream ps; + + + private void dropDatabase(String dbName) throws Exception { + + try { + client.dropDatabase(dbName); + } catch (NoSuchObjectException e) { + } catch (InvalidOperationException e) { + } catch (Exception e) { + throw e; + } + } + + @Override + protected void setUp() throws Exception { + + super.setUp(); + + try { + + HiveConf hiveConf = new HiveConf(HiveMetaTool.class); + wh = new Warehouse(hiveConf); + client = new HiveMetaStoreClient(hiveConf, null); + + // Setup output stream to redirect output to + os = new ByteArrayOutputStream(); + ps = new PrintStream(os); + + // create a dummy database and a couple of dummy tables + String dbName = "testDB"; + String typeName = "Person"; + String tblName = "simpleTbl"; + + Database db = new Database(); + db.setName(dbName); + db.setLocationUri("file:/user/hive/warehouse"); + client.dropTable(dbName, tblName); + dropDatabase(dbName); + client.createDatabase(db); + + client.dropType(typeName); + Type typ1 = new Type(); + typ1.setName(typeName); + typ1.setFields(new ArrayList(2)); + typ1.getFields().add( + new FieldSchema("name", Constants.STRING_TYPE_NAME, "")); + typ1.getFields().add( + new FieldSchema("income", Constants.INT_TYPE_NAME, "")); + client.createType(typ1); + + Table tbl = new Table(); + tbl.setDbName(dbName); + tbl.setTableName(tblName); + StorageDescriptor sd = new StorageDescriptor(); + tbl.setSd(sd); + sd.setCols(typ1.getFields()); + sd.setCompressed(false); + sd.setNumBuckets(1); + sd.setParameters(new HashMap()); + sd.getParameters().put("test_param_1", "Use this for comments etc"); + sd.setBucketCols(new ArrayList(2)); + sd.getBucketCols().add("name"); + sd.setSerdeInfo(new SerDeInfo()); + sd.getSerdeInfo().setName(tbl.getTableName()); + sd.getSerdeInfo().setParameters(new HashMap()); + sd.getSerdeInfo().getParameters().put( + org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1"); + sd.getSerdeInfo().setSerializationLib( + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName()); + tbl.setPartitionKeys(new ArrayList()); + + client.createTable(tbl); + client.close(); + + } catch (Throwable e) { + System.err.println("Unable to setup the hive metatool test"); + System.err.println(StringUtils.stringifyException(e)); + throw new Exception(e); + } + } + + private void redirectOutputStream() { + + originalOut = System.out; + System.setOut(ps); + + } + + private void restoreOutputStream() { + + System.setOut(originalOut); + } + + public void testListHDFSRoot() { + + redirectOutputStream(); + String[] args = new String[1]; + args[0] = new String("-ListHDFSRoot"); + + try { + HiveMetaTool.main(args); + String out = os.toString(); + boolean b = out.contains("file:/user/hive/warehouse"); + assertTrue(b); + } catch (Exception e) { + System.err.println("Exception during testlistHDFSRoot"); + System.err.println(StringUtils.stringifyException(e)); + e.printStackTrace(); + } finally { + restoreOutputStream(); + System.out.println("Completed testListHDFSRoot"); + } + } + + public void testExecuteJDOQL() { + + redirectOutputStream(); + String[] args = new String[2]; + args[0] = new String("-ExecuteJDOQL"); + args[1] = new String("select locationUri from org.apache.hadoop.hive.metastore.model.MDatabase"); + + try { + HiveMetaTool.main(args); + String out = os.toString(); + boolean b = out.contains("file:/user/hive/warehouse"); + assertTrue(b); + } catch (Exception e) { + System.err.println("Exception during testlistHDFSRoot"); + System.err.println(StringUtils.stringifyException(e)); + e.printStackTrace(); + } finally { + restoreOutputStream(); + System.out.println("Completed testExecuteJDOQL"); + } + } + + public void testHAUpgrade() { + + redirectOutputStream(); + String[] args = new String[3]; + args[0] = new String("-HAUpgrade"); + args[1] = new String("file:/user/hive/warehouse2"); + args[2] = new String("file:/user/hive/warehouse"); + + String[] args2 = new String[1]; + args2[0] = new String("-ListHDFSRoot"); + + try { + + // perform HA upgrade + HiveMetaTool.main(args); + + // obtain new HDFS root + HiveMetaTool.main(args2); + + // assert if all location were not updated + String out = os.toString(); + boolean b = out.contains("file:/user/hive/warehouse2"); + assertTrue(b); + + // restore the original HDFS root + args[1] = new String("file:/user/hive/warehouse"); + args[2] = new String("file:/user/hive/warehouse2"); + HiveMetaTool.main(args); + + } catch (Exception e) { + System.err.println("Exception during testlistHDFSRoot"); + System.err.println(StringUtils.stringifyException(e)); + e.printStackTrace(); + } finally { + restoreOutputStream(); + System.out.println("Completed testHAUprade.."); + ; + } + } + + @Override + protected void tearDown() throws Exception { + try { + super.tearDown(); + + } catch (Throwable e) { + System.err.println("Unable to close metastore"); + System.err.println(StringUtils.stringifyException(e)); + throw new Exception(e); + } + } +}