diff --git bin/ext/metatool.sh bin/ext/metatool.sh
new file mode 100755
index 0000000..3befb1f
--- /dev/null
+++ bin/ext/metatool.sh
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+THISSERVICE=metatool
+export SERVICE_LIST="${SERVICE_LIST}${THISSERVICE} "
+
+metatool () {
+
+ CLASS=org.apache.hadoop.hive.metastore.tools.HiveMetaTool
+ execHiveCmd $CLASS "$@"
+}
+
+metatool_help () {
+ CLASS=org.apache.hadoop.hive.metastore.tools.HiveMetaTool
+ execHiveCmd $CLASS "--help"
+}
+
diff --git bin/metatool bin/metatool
new file mode 100755
index 0000000..df85300
--- /dev/null
+++ bin/metatool
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+hive --service metatool "$@"
diff --git build.xml build.xml
index 6712af9..1d378bc 100644
--- build.xml
+++ build.xml
@@ -419,6 +419,7 @@
+
diff --git conf/hive-default.xml.template conf/hive-default.xml.template
index cc1565f..f97b8a8 100644
--- conf/hive-default.xml.template
+++ conf/hive-default.xml.template
@@ -222,7 +222,7 @@
datanucleus.identifierFactory
datanucleus
- Name of the identifier factory to use when generating table/column names etc. 'datanucleus' is used for backward compatibility
+ Name of the identifier factory to use when generating table/column names etc. 'datanucleus1' is used for backward compatibility
diff --git eclipse-templates/TestHiveMetaTool.launchtemplate eclipse-templates/TestHiveMetaTool.launchtemplate
new file mode 100644
index 0000000..107d26f
--- /dev/null
+++ eclipse-templates/TestHiveMetaTool.launchtemplate
@@ -0,0 +1,43 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git metastore/ivy.xml metastore/ivy.xml
index 3011d2f..1e42550 100644
--- metastore/ivy.xml
+++ metastore/ivy.xml
@@ -45,7 +45,7 @@
-
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 045b550..8f788ba 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -3974,6 +3974,184 @@ public class ObjectStore implements RawStore, Configurable {
return join(storedVals,',');
}
+ /** The following APIs
+ *
+ * - executeJDOQLSelect
+ * - executeJDOQLUpdate
+ * - listHDFSRoots
+ * - updateHDFSRootLocation
+ *
+ * are used by HiveMetaTool. These APIs **shouldn't** be exposed via Thrift.
+ *
+ */
+ public void executeJDOQLSelect(String query) {
+
+ boolean committed = false;
+
+ System.out.println("Query to be executed: " + query);
+
+ try {
+ openTransaction();
+ Query q = pm.newQuery(query);
+ Collection> result = (Collection>) q.execute();
+ committed = commitTransaction();
+
+ Iterator> iter = result.iterator();
+
+ while (iter.hasNext()) {
+ System.out.println(iter.next().toString());
+ }
+
+
+ } finally {
+ if (!committed) {
+ rollbackTransaction();
+ }
+ }
+
+ }
+
+
+ public void executeJDOQLUpdate(String query) {
+
+ boolean committed = false;
+ long numUpdated = 0;
+
+ System.out.println("Query to be executed: " + query);
+
+ try {
+ openTransaction();
+ Query q = pm.newQuery(query);
+ numUpdated = (Long) q.execute();
+ committed = commitTransaction();
+
+ System.out.println("Num updated: " + numUpdated);
+
+ } finally {
+ if (!committed) {
+ rollbackTransaction();
+ }
+ }
+
+ }
+
+ public void listHDFSRoots() {
+
+ boolean committed = false;
+
+ Query query = pm.newQuery(MDatabase.class);
+
+ List mDBs = (List) query.execute();
+ pm.retrieveAll(mDBs);
+
+ Iterator> iter = mDBs.iterator();
+
+ System.out.println("HDFS root locations:");
+
+ while (iter.hasNext()) {
+
+ MDatabase mDB = (MDatabase) iter.next();
+ System.out.println(mDB.getLocationUri());
+ }
+
+ }
+
+ public void updateHDFSRootLocation(String oldLoc, String newLoc)
+ {
+
+ boolean committed = false;
+
+ oldLoc = oldLoc.toLowerCase();
+ newLoc = newLoc.toLowerCase();
+
+ System.out.println("Old HDFS root location: " + oldLoc + " New HDFS root location: " + newLoc);
+ System.out.println("Updating HDFS root location...");
+
+ int count = 0;
+
+ try {
+
+ openTransaction();
+
+ // upgrade locationURI in mDatabase
+ Query query = pm.newQuery(MDatabase.class, "locationUri == oldLoc");
+ query.declareParameters("java.lang.String oldLoc");
+
+ List mDBs = (List) query.execute(oldLoc);
+ pm.retrieveAll(mDBs);
+
+ Iterator> iter = mDBs.iterator();
+
+ while (iter.hasNext()) {
+
+ MDatabase mDB = (MDatabase) iter.next();
+ mDB.setLocationUri(newLoc);
+ count++;
+ }
+
+ // upgrade location in mStorageDescriptor
+ query = pm.newQuery(MStorageDescriptor.class);
+
+ List mSDSs = (List) query.execute();
+ pm.retrieveAll(mSDSs);
+
+ iter = mSDSs.iterator();
+
+ while (iter.hasNext()) {
+
+ MStorageDescriptor mSDS = (MStorageDescriptor) iter.next();
+
+ if (mSDS.getLocation().startsWith(oldLoc)) {
+ String tblLoc = mSDS.getLocation().replaceAll(oldLoc, newLoc);
+ mSDS.setLocation(tblLoc);
+ count++;
+ }
+
+ }
+
+ // upgrade schema.url for avro serde
+
+ query = pm.newQuery(MSerDeInfo.class);
+
+ List mSerdes = (List) query.execute();
+ pm.retrieveAll(mSerdes);
+
+ iter = mSerdes.iterator();
+
+ String key = new String("schema.url");
+
+ while (iter.hasNext()) {
+
+ MSerDeInfo mSerde = (MSerDeInfo) iter.next();
+
+ String schemaLoc = mSerde.getParameters().get(key);
+
+ if (schemaLoc != null) {
+
+ if (schemaLoc.startsWith(oldLoc)) {
+ mSerde.getParameters().put(key, schemaLoc.replaceAll(oldLoc, newLoc));
+ count++;
+ }
+ }
+ }
+
+ committed = commitTransaction();
+
+ } finally {
+ if (!committed) {
+ rollbackTransaction();
+ System.out.println("HDFS root location update failed");
+ } else {
+ if (count > 0) {
+ System.out.println("Successfully updated HDFS root location");
+ System.out.println("Num entries updated: " + count);
+ } else {
+ System.out.println("No entries found to update");
+ }
+ }
+ }
+ }
+
@Override
public long cleanupEvents() {
boolean commited = false;
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java
new file mode 100644
index 0000000..8b1fe95
--- /dev/null
+++ metastore/src/java/org/apache/hadoop/hive/metastore/tools/HiveMetaTool.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.tools;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.ObjectStore;
+
+/**
+ * This class provides Hive admins a tool to
+ * - execute JDOQL against the metastore database using data nucleus
+ * - perform HA name node upgrade
+ */
+
+public class HiveMetaTool {
+
+ private static final Log LOG = LogFactory.getLog(HiveMetaTool.class.getName());
+ private final Options cmdLineOptions = new Options();
+
+ public HiveMetaTool() {
+ }
+
+ @SuppressWarnings("static-access")
+ private void init() {
+
+ System.out.println("Initializing HiveMetaTool..");
+
+ Option help = new Option("help", "prints this message");
+ Option listHDFSRoot = new Option("ListHDFSRoot", "prints the current HDFS root location");
+ Option executeJDOQL =
+ OptionBuilder.withArgName("query-string")
+ .hasArgs()
+ .withDescription("executes the given JDOQL query")
+ .create("ExecuteJDOQL");
+
+ Option HAUpgrade =
+ OptionBuilder.withArgName("new-loc " + " old-loc")
+ .hasArgs(2)
+ .withDescription("updates HDFS root location in the metastore to new location")
+ .create("UpdateLocation");
+
+ cmdLineOptions.addOption(help);
+ cmdLineOptions.addOption(listHDFSRoot);
+ cmdLineOptions.addOption(executeJDOQL);
+ cmdLineOptions.addOption(HAUpgrade);
+
+ }
+
+ public static void main(String[] args) throws Exception {
+ int ret = run(args);
+ }
+
+ private static int run(String[] args) throws Exception {
+
+ HiveConf hiveConf = new HiveConf(HiveMetaTool.class);
+
+ HiveMetaTool metaTool = new HiveMetaTool();
+ ObjectStore objStore = null;
+ metaTool.init();
+
+ CommandLineParser parser = new GnuParser();
+
+ try {
+
+ CommandLine line = parser.parse(metaTool.cmdLineOptions, args);
+
+ if (line.hasOption("help")) {
+ HelpFormatter formatter = new HelpFormatter();
+ formatter.printHelp("metatool", metaTool.cmdLineOptions);
+ } else if (line.hasOption("ListHDFSRoot")) {
+ objStore = new ObjectStore();
+ objStore.setConf(hiveConf);
+ objStore.listHDFSRoots();
+
+ } else if (line.hasOption("ExecuteJDOQL")) {
+ String query = line.getOptionValue("ExecuteJDOQL");
+
+ objStore = new ObjectStore();
+ objStore.setConf(hiveConf);
+
+ if (query.toLowerCase().trim().startsWith("select")) {
+ objStore.executeJDOQLSelect(query);
+ } else if (query.toLowerCase().trim().startsWith("update")) {
+ objStore.executeJDOQLUpdate(query);
+ } else {
+ throw new Exception("Unsupported statement type");
+ }
+
+ } else if (line.hasOption("UpdateLocation")) {
+ String[] loc = line.getOptionValues("UpdateLocation");
+
+ if (loc.length !=2) {
+ throw new Exception("UpdateLocation takes in 2 arguements but was passed " +
+ loc.length + " arguements");
+ }
+
+ objStore = new ObjectStore();
+ objStore.setConf(hiveConf);
+
+ objStore.updateHDFSRootLocation(loc[1], loc[0]);
+
+ }
+ else {
+ throw new Exception("Invalid option");
+ }
+
+ } catch (ParseException e) {
+ System.err.println("Parsing failed. Reason: " + e.getMessage());
+ } finally {
+ if (objStore != null) {
+ System.out.println("HiveMetaTool shutdown..");
+ objStore.shutdown();
+ }
+ }
+ return 1;
+ }
+}
diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaTool.java metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaTool.java
new file mode 100644
index 0000000..3c962bd
--- /dev/null
+++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaTool.java
@@ -0,0 +1,236 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.io.ByteArrayOutputStream;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.HashMap;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.Type;
+import org.apache.hadoop.hive.metastore.tools.HiveMetaTool;
+import org.apache.hadoop.hive.serde.Constants;
+import org.apache.hadoop.util.StringUtils;
+
+public class TestHiveMetaTool extends TestCase {
+
+ private HiveMetaStoreClient client;
+ private Warehouse wh;
+ private PrintStream originalOut;
+ private OutputStream os;
+ private PrintStream ps;
+
+
+ private void dropDatabase(String dbName) throws Exception {
+
+ try {
+ client.dropDatabase(dbName);
+ } catch (NoSuchObjectException e) {
+ } catch (InvalidOperationException e) {
+ } catch (Exception e) {
+ throw e;
+ }
+ }
+
+ @Override
+ protected void setUp() throws Exception {
+
+ super.setUp();
+
+ try {
+
+ HiveConf hiveConf = new HiveConf(HiveMetaTool.class);
+ wh = new Warehouse(hiveConf);
+ client = new HiveMetaStoreClient(hiveConf, null);
+
+ // Setup output stream to redirect output to
+ os = new ByteArrayOutputStream();
+ ps = new PrintStream(os);
+
+ // create a dummy database and a couple of dummy tables
+ String dbName = "testDB";
+ String typeName = "Person";
+ String tblName = "simpleTbl";
+
+ Database db = new Database();
+ db.setName(dbName);
+ db.setLocationUri("file:/user/hive/warehouse");
+ client.dropTable(dbName, tblName);
+ dropDatabase(dbName);
+ client.createDatabase(db);
+
+ client.dropType(typeName);
+ Type typ1 = new Type();
+ typ1.setName(typeName);
+ typ1.setFields(new ArrayList(2));
+ typ1.getFields().add(
+ new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
+ typ1.getFields().add(
+ new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
+ client.createType(typ1);
+
+ Table tbl = new Table();
+ tbl.setDbName(dbName);
+ tbl.setTableName(tblName);
+ StorageDescriptor sd = new StorageDescriptor();
+ tbl.setSd(sd);
+ sd.setCols(typ1.getFields());
+ sd.setCompressed(false);
+ sd.setNumBuckets(1);
+ sd.setParameters(new HashMap());
+ sd.getParameters().put("test_param_1", "Use this for comments etc");
+ sd.setBucketCols(new ArrayList(2));
+ sd.getBucketCols().add("name");
+ sd.setSerdeInfo(new SerDeInfo());
+ sd.getSerdeInfo().setName(tbl.getTableName());
+ sd.getSerdeInfo().setParameters(new HashMap());
+ sd.getSerdeInfo().getParameters().put(
+ org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
+ sd.getSerdeInfo().setSerializationLib(
+ org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
+ tbl.setPartitionKeys(new ArrayList());
+
+ client.createTable(tbl);
+ client.close();
+
+ } catch (Throwable e) {
+ System.err.println("Unable to setup the hive metatool test");
+ System.err.println(StringUtils.stringifyException(e));
+ throw new Exception(e);
+ }
+ }
+
+ private void redirectOutputStream() {
+
+ originalOut = System.out;
+ System.setOut(ps);
+
+ }
+
+ private void restoreOutputStream() {
+
+ System.setOut(originalOut);
+ }
+
+ public void testListHDFSRoot() {
+
+ redirectOutputStream();
+ String[] args = new String[1];
+ args[0] = new String("-ListHDFSRoot");
+
+ try {
+ HiveMetaTool.main(args);
+ String out = os.toString();
+ boolean b = out.contains("file:/user/hive/warehouse");
+ assertTrue(b);
+ } catch (Exception e) {
+ System.err.println("Exception during testlistHDFSRoot");
+ System.err.println(StringUtils.stringifyException(e));
+ e.printStackTrace();
+ } finally {
+ restoreOutputStream();
+ System.out.println("Completed testListHDFSRoot");
+ }
+ }
+
+ public void testExecuteJDOQL() {
+
+ redirectOutputStream();
+ String[] args = new String[2];
+ args[0] = new String("-ExecuteJDOQL");
+ args[1] = new String("select locationUri from org.apache.hadoop.hive.metastore.model.MDatabase");
+
+ try {
+ HiveMetaTool.main(args);
+ String out = os.toString();
+ boolean b = out.contains("file:/user/hive/warehouse");
+ assertTrue(b);
+ } catch (Exception e) {
+ System.err.println("Exception during testExecuteJDOQL");
+ System.err.println(StringUtils.stringifyException(e));
+ e.printStackTrace();
+ } finally {
+ restoreOutputStream();
+ System.out.println("Completed testExecuteJDOQL");
+ }
+ }
+
+ public void testUpdateHDFSRootLocation() {
+
+ redirectOutputStream();
+ String[] args = new String[3];
+ args[0] = new String("-UpdateLocation");
+ args[1] = new String("file:/user/hive/warehouse2");
+ args[2] = new String("file:/user/hive/warehouse");
+
+ String[] args2 = new String[1];
+ args2[0] = new String("-ListHDFSRoot");
+
+ try {
+
+ // perform HA upgrade
+ HiveMetaTool.main(args);
+
+ // obtain new HDFS root
+ HiveMetaTool.main(args2);
+
+ // assert if all location were not updated
+ String out = os.toString();
+ boolean b = out.contains("file:/user/hive/warehouse2");
+ assertTrue(b);
+
+ // restore the original HDFS root
+ args[1] = new String("file:/user/hive/warehouse");
+ args[2] = new String("file:/user/hive/warehouse2");
+ HiveMetaTool.main(args);
+
+ } catch (Exception e) {
+ System.err.println("Exception during testUpdateHDFSRootLocation");
+ System.err.println(StringUtils.stringifyException(e));
+ e.printStackTrace();
+ } finally {
+ restoreOutputStream();
+ System.out.println("Completed testUpdateHDFSRootLocation..");
+ ;
+ }
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ try {
+ super.tearDown();
+
+ } catch (Throwable e) {
+ System.err.println("Unable to close metastore");
+ System.err.println(StringUtils.stringifyException(e));
+ throw new Exception(e);
+ }
+ }
+}