Index: conf/hive-default.xml
===================================================================
--- conf/hive-default.xml (revision 1208829)
+++ conf/hive-default.xml (working copy)
@@ -239,6 +239,12 @@
+ hive.metastore.end.function.listeners
+
+ list of comma separated listeners for the end of metastore functions.
+
+
+
hive.metastore.event.expiry.duration
0L
Duration after which events expire from events table (in seconds)
Index: metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEndFunctionListener.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEndFunctionListener.java (revision 0)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEndFunctionListener.java (revision 0)
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.util.AbstractMap;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * This abstract class needs to be extended to provide implementation of actions that need
+ * to be performed when a function ends. These methods are called whenever a function ends.
+ *
+ * It also provides a way to add fb303 counters through the exportCounters method.
+ */
+
+public abstract class MetaStoreEndFunctionListener implements Configurable {
+
+ private Configuration conf;
+
+ public MetaStoreEndFunctionListener(Configuration config){
+ this.conf = config;
+ }
+
+ public abstract void onEndFunction(String functionName, boolean successful);
+
+ // Unless this is overridden, it does nothing
+ public void exportCounters(AbstractMap counters) {
+ }
+
+ @Override
+ public Configuration getConf() {
+ return this.conf;
+ }
+
+ @Override
+ public void setConf(Configuration config) {
+ this.conf = config;
+ }
+
+
+}
Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 1208829)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy)
@@ -24,6 +24,7 @@
import static org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName;
import java.io.IOException;
+import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Formatter;
@@ -230,6 +231,7 @@
private ClassLoader classLoader;
private AlterHandler alterHandler;
private List listeners;
+ private List endFunctionListeners;
{
classLoader = Thread.currentThread().getContextClassLoader();
@@ -270,7 +272,13 @@
}
}
- listeners = MetaStoreUtils.getMetaStoreListener(hiveConf);
+
+ listeners = MetaStoreUtils.getMetaStoreListeners(MetaStoreEventListener.class, hiveConf,
+ hiveConf.getVar(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS));
+ endFunctionListeners = MetaStoreUtils.getMetaStoreListeners(
+ MetaStoreEndFunctionListener.class, hiveConf,
+ hiveConf.getVar(HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS));
+
long cleanFreq = hiveConf.getLongVar(ConfVars.METASTORE_EVENT_CLEAN_FREQ) * 1000L;
if(cleanFreq > 0){
// In default config, there is no timer.
@@ -558,12 +566,16 @@
return startFunction(function, " : db=" + db + " tbl=" + tbl + "partition=" + partName);
}
- public void endFunction(String function) {
+ public void endFunction(String function, boolean successful) {
try {
Metrics.endScope(function);
} catch (IOException e) {
LOG.debug("Exception when closing metrics scope" + e);
}
+
+ for (MetaStoreEndFunctionListener listener : endFunctionListeners) {
+ listener.onEndFunction(function, successful);
+ }
}
@Override
@@ -582,6 +594,20 @@
logInfo("Metastore shutdown complete.");
}
+ @Override
+ public AbstractMap getCounters() {
+ AbstractMap counters = super.getCounters();
+
+ // Allow endFunctionListeners to add any counters they have collected
+ if (endFunctionListeners != null) {
+ for (MetaStoreEndFunctionListener listener : endFunctionListeners) {
+ listener.exportCounters(counters);
+ }
+ }
+
+ return counters;
+ }
+
private static final String DATABASE_WAREHOUSE_SUFFIX = ".db";
private Path getDefaultDatabasePath(String dbName) throws MetaException {
@@ -636,6 +662,7 @@
+ db.getName() + " "
+ db.getLocationUri() + " "
+ db.getDescription());
+ boolean success = false;
try {
try {
if(null != get_database(db.getName())) {
@@ -644,7 +671,7 @@
} catch (NoSuchObjectException e) {
// expected
}
- executeWithRetry(new Command() {
+ success = executeWithRetry(new Command() {
@Override
public Boolean run(RawStore ms) throws Exception {
create_database_core(ms, db);
@@ -661,7 +688,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("create_database");
+ endFunction("create_database", success);
}
}
@@ -684,7 +711,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("get_database");
+ endFunction("get_database", db != null);
}
return db;
}
@@ -692,8 +719,9 @@
public void alter_database(final String dbName, final Database db)
throws NoSuchObjectException, TException, MetaException {
startFunction("alter_database" + dbName);
+ boolean success = false;
try {
- executeWithRetry(new Command() {
+ success = executeWithRetry(new Command() {
@Override
public Boolean run(RawStore ms) throws Exception {
return ms.alterDatabase(dbName, db);
@@ -709,7 +737,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException) e;
} finally {
- endFunction("alter_database");
+ endFunction("alter_database", success);
}
}
@@ -753,12 +781,13 @@
startFunction("drop_database", ": " + dbName);
if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(dbName)) {
- endFunction("drop_database");
+ endFunction("drop_database", false);
throw new MetaException("Can not drop default database");
}
+ boolean success = false;
try {
- executeWithRetry(new Command() {
+ success = executeWithRetry(new Command() {
@Override
public Boolean run(RawStore ms) throws Exception {
drop_database_core(ms, dbName, deleteData, cascade);
@@ -775,7 +804,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("drop_database");
+ endFunction("drop_database", success);
}
}
@@ -796,7 +825,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("get_databases");
+ endFunction("get_databases", ret != null);
}
return ret;
}
@@ -818,7 +847,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("get_all_databases");
+ endFunction("get_all_databases", ret != null);
}
return ret;
}
@@ -847,7 +876,7 @@
public boolean create_type(final Type type) throws AlreadyExistsException,
MetaException, InvalidObjectException {
startFunction("create_type", ": " + type.getName());
- Boolean ret = null;
+ boolean ret = false;
try {
ret = executeWithRetry(new Command() {
@Override
@@ -866,16 +895,16 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("create_type");
+ endFunction("create_type", ret);
}
- return ret.booleanValue();
+ return ret;
}
public Type get_type(final String name) throws MetaException, NoSuchObjectException {
startFunction("get_type", ": " + name);
- Type ret;
+ Type ret = null;
try {
ret = executeWithRetry(new Command() {
@Override
@@ -895,7 +924,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("get_type");
+ endFunction("get_type", ret != null);
}
return ret;
}
@@ -929,7 +958,7 @@
public boolean drop_type(final String name) throws MetaException {
startFunction("drop_type", ": " + name);
- Boolean ret = null;
+ boolean ret = false;
try {
ret = executeWithRetry(new Command() {
@Override
@@ -944,7 +973,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("drop_type");
+ endFunction("drop_type", ret);
}
return ret;
}
@@ -952,7 +981,7 @@
public Map get_type_all(String name) throws MetaException {
// TODO Auto-generated method stub
startFunction("get_type_all", ": " + name);
- endFunction("get_type_all");
+ endFunction("get_type_all", false);
throw new MetaException("Not yet implemented");
}
@@ -1034,8 +1063,9 @@
MetaException, InvalidObjectException {
startFunction("create_table", ": db=" + tbl.getDbName() + " tbl="
+ tbl.getTableName());
+ boolean success = false;
try {
- executeWithRetry(new Command() {
+ success = executeWithRetry(new Command() {
@Override
public Boolean run(RawStore ms) throws Exception {
create_table_core(ms, tbl);
@@ -1054,7 +1084,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("create_table");
+ endFunction("create_table", success);
}
}
@@ -1134,8 +1164,9 @@
throws NoSuchObjectException, MetaException {
startTableFunction("drop_table", dbname, name);
+ boolean success = false;
try {
- executeWithRetry(new Command() {
+ success = executeWithRetry(new Command() {
@Override
public Boolean run(RawStore ms) throws Exception {
drop_table_core(ms, dbname, name, deleteData);
@@ -1150,7 +1181,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("drop_table");
+ endFunction("drop_table", success);
}
}
@@ -1194,7 +1225,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("get_table");
+ endFunction("get_table", t != null);
}
return t;
}
@@ -1216,7 +1247,7 @@
*/
public List get_table_objects_by_name(final String dbname, final List names)
throws MetaException, InvalidOperationException, UnknownDBException {
- List tables = new ArrayList();
+ List tables = null;
startMultiTableFunction("get_multi_table", dbname, names);
try {
tables = executeWithRetry(new Command>() {
@@ -1242,7 +1273,7 @@
} catch (Exception e) {
throw new MetaException(e.toString());
} finally {
- endFunction("get_multi_table");
+ endFunction("get_multi_table", tables != null);
}
return tables;
}
@@ -1251,7 +1282,7 @@
public List get_table_names_by_filter(
final String dbName, final String filter, final short maxTables)
throws MetaException, InvalidOperationException, UnknownDBException {
- List tables = new ArrayList();
+ List tables = null;
startFunction("get_table_names_by_filter", ": db = " + dbName + ", filter = " + filter);
try {
tables = executeWithRetry(new Command>() {
@@ -1276,14 +1307,14 @@
} catch (Exception e) {
throw new MetaException(e.toString());
} finally {
- endFunction("get_table_names_by_filter");
+ endFunction("get_table_names_by_filter", tables != null);
}
return tables;
}
public boolean set_table_parameters(String dbname, String name,
Map params) throws NoSuchObjectException, MetaException {
- endFunction(startTableFunction("set_table_parameters", dbname, name));
+ endFunction(startTableFunction("set_table_parameters", dbname, name), false);
// TODO Auto-generated method stub
return false;
}
@@ -1384,7 +1415,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("append_partition");
+ endFunction("append_partition", ret != null);
}
return ret;
}
@@ -1445,7 +1476,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("add_partition");
+ endFunction("add_partition", ret != null);
}
return ret;
}
@@ -1584,7 +1615,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("add_partition");
+ endFunction("add_partition", ret != null);
}
return ret;
@@ -1661,7 +1692,7 @@
startPartitionFunction("drop_partition", db_name, tbl_name, part_vals);
LOG.info("Partition values:" + part_vals);
- Boolean ret = null;
+ boolean ret = false;
try {
ret = executeWithRetry(new Command() {
@Override
@@ -1680,9 +1711,9 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("drop_partition");
+ endFunction("drop_partition", ret);
}
- return ret.booleanValue();
+ return ret;
}
@@ -1706,7 +1737,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("get_partition");
+ endFunction("get_partition", ret != null);
}
return ret;
}
@@ -1736,7 +1767,7 @@
assert (e instanceof RuntimeException);
throw (RuntimeException) e;
} finally {
- endFunction("get_partition_with_auth");
+ endFunction("get_partition_with_auth", ret != null);
}
return ret;
}
@@ -1761,7 +1792,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("get_partitions");
+ endFunction("get_partitions", ret != null);
}
return ret;
@@ -1791,7 +1822,7 @@
assert (e instanceof RuntimeException);
throw (RuntimeException) e;
} finally {
- endFunction("get_partitions_with_auth");
+ endFunction("get_partitions_with_auth", ret != null);
}
return ret;
@@ -1815,7 +1846,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("get_partition_names");
+ endFunction("get_partition_names", ret != null);
}
return ret;
}
@@ -1837,8 +1868,9 @@
LOG.info("Old Partition values:" + part_vals);
}
+ boolean success = false;
try {
- executeWithRetry(new Command() {
+ success = executeWithRetry(new Command() {
@Override
public Boolean run(RawStore ms) throws Exception {
alter_partition_core(ms, db_name, tbl_name, part_vals, new_part);
@@ -1857,7 +1889,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("alter_partition");
+ endFunction("alter_partition", success);
}
return;
}
@@ -2007,7 +2039,7 @@
public boolean create_index(Index index_def)
throws IndexAlreadyExistsException, MetaException {
- endFunction(startFunction("create_index"));
+ endFunction(startFunction("create_index"), false);
// TODO Auto-generated method stub
throw new MetaException("Not yet implemented");
}
@@ -2020,8 +2052,9 @@
newIndex.putToParameters(Constants.DDL_TIME, Long.toString(System
.currentTimeMillis() / 1000));
+ boolean success = false;
try {
- executeWithRetry(new Command() {
+ success = executeWithRetry(new Command() {
@Override
public Boolean run(RawStore ms) throws Exception {
ms.alterIndex(dbname, base_table_name, index_name, newIndex);
@@ -2036,13 +2069,13 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("alter_index");
+ endFunction("alter_index", false);
}
return;
}
public String getVersion() throws TException {
- endFunction(startFunction("getVersion"));
+ endFunction(startFunction("getVersion"), true);
return "3.0";
}
@@ -2057,9 +2090,10 @@
newTable.putToParameters(Constants.DDL_TIME, Long.toString(System
.currentTimeMillis() / 1000));
}
+ boolean success = false;
try {
Table oldt = get_table(dbname, name);
- boolean success = executeWithRetry(new Command() {
+ success = executeWithRetry(new Command() {
@Override
public Boolean run(RawStore ms) throws Exception {
alterHandler.alterTable(ms, wh, dbname, name, newTable);
@@ -2080,7 +2114,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("alter_table");
+ endFunction("alter_table", success);
}
}
@@ -2088,7 +2122,7 @@
throws MetaException {
startFunction("get_tables", ": db=" + dbname + " pat=" + pattern);
- List ret;
+ List ret = null;
try {
ret = executeWithRetry(new Command>() {
@Override
@@ -2102,7 +2136,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("get_tables");
+ endFunction("get_tables", ret != null);
}
return ret;
}
@@ -2110,7 +2144,7 @@
public List get_all_tables(final String dbname) throws MetaException {
startFunction("get_all_tables", ": db=" + dbname);
- List ret;
+ List ret = null;
try {
ret = executeWithRetry(new Command>() {
@Override
@@ -2124,7 +2158,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("get_all_tables");
+ endFunction("get_all_tables", ret != null);
}
return ret;
}
@@ -2136,6 +2170,7 @@
String base_table_name = names[0];
Table tbl;
+ List ret = null;
try {
try {
tbl = get_table(db, base_table_name);
@@ -2145,19 +2180,21 @@
boolean getColsFromSerDe = SerDeUtils.shouldGetColsFromSerDe(
tbl.getSd().getSerdeInfo().getSerializationLib());
if (!getColsFromSerDe) {
- return tbl.getSd().getCols();
+ ret = tbl.getSd().getCols();
} else {
try {
Deserializer s = MetaStoreUtils.getDeserializer(hiveConf, tbl);
- return MetaStoreUtils.getFieldsFromDeserializer(tableName, s);
+ ret = MetaStoreUtils.getFieldsFromDeserializer(tableName, s);
} catch (SerDeException e) {
StringUtils.stringifyException(e);
throw new MetaException(e.getMessage());
}
}
} finally {
- endFunction("get_fields");
+ endFunction("get_fields", ret != null);
}
+
+ return ret;
}
/**
@@ -2176,6 +2213,7 @@
public List get_schema(String db, String tableName)
throws MetaException, UnknownTableException, UnknownDBException {
startFunction("get_schema", ": db=" + db + "tbl=" + tableName);
+ boolean success = false;
try {
String[] names = tableName.split("\\.");
String base_table_name = names[0];
@@ -2197,9 +2235,10 @@
// whole schema
fieldSchemas.addAll(tbl.getPartitionKeys());
}
+ success = true;
return fieldSchemas;
} finally {
- endFunction("get_schema");
+ endFunction("get_schema", success);
}
}
@@ -2217,8 +2256,10 @@
throws TException, ConfigValSecurityException {
startFunction("get_config_value", ": name=" + name + " defaultValue="
+ defaultValue);
+ boolean success = false;
try {
if (name == null) {
+ success = true;
return defaultValue;
}
// Allow only keys that start with hive.*, hdfs.*, mapred.* for security
@@ -2236,9 +2277,10 @@
+ "RuntimeException thrown in get_config_value - msg: "
+ e.getMessage() + " cause: " + e.getCause());
}
+ success = true;
return toReturn;
} finally {
- endFunction("get_config_value");
+ endFunction("get_config_value", success);
}
}
@@ -2310,7 +2352,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("get_partition_by_name");
+ endFunction("get_partition_by_name", ret != null);
}
return ret;
}
@@ -2342,7 +2384,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("append_partition_by_name");
+ endFunction("append_partition_by_name", ret != null);
}
return ret;
}
@@ -2369,7 +2411,7 @@
startFunction("drop_partition_by_name", ": db=" + db_name + " tbl="
+ tbl_name + " part=" + part_name);
- Boolean ret = null;
+ boolean ret = false;
try {
ret = executeWithRetry(new Command() {
@Override
@@ -2388,10 +2430,10 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("drop_partition_by_name");
+ endFunction("drop_partition_by_name", ret);
}
- return ret.booleanValue();
+ return ret;
}
@Override
@@ -2399,13 +2441,17 @@
final String tbl_name, final List part_vals,
final short max_parts) throws MetaException, TException {
startPartitionFunction("get_partitions_ps", db_name, tbl_name, part_vals);
+
+ List ret = null;
try {
- return get_partitions_ps_with_auth(db_name, tbl_name, part_vals,
+ ret = get_partitions_ps_with_auth(db_name, tbl_name, part_vals,
max_parts, null, null);
}
finally {
- endFunction("get_partitions_ps");
+ endFunction("get_partitions_ps", ret != null);
}
+
+ return ret;
}
@Override
@@ -2415,7 +2461,7 @@
final List groupNames) throws MetaException, TException {
startPartitionFunction("get_partitions_ps_with_auth", db_name, tbl_name,
part_vals);
- List ret;
+ List ret = null;
try {
ret = executeWithRetry(new Command>() {
@Override
@@ -2432,7 +2478,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("get_partitions_ps_with_auth");
+ endFunction("get_partitions_ps_with_auth", ret != null);
}
return ret;
}
@@ -2442,7 +2488,7 @@
final String tbl_name, final List part_vals, final short max_parts)
throws MetaException, TException {
startPartitionFunction("get_partitions_names_ps", db_name, tbl_name, part_vals);
- List ret;
+ List ret = null;
try {
ret = executeWithRetry(new Command>() {
@Override
@@ -2456,7 +2502,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("get_partitions_names_ps");
+ endFunction("get_partitions_names_ps", ret != null);
}
return ret;
}
@@ -2505,7 +2551,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("add_index");
+ endFunction("add_index", ret != null);
}
return ret;
}
@@ -2574,7 +2620,7 @@
startFunction("drop_index_by_name", ": db=" + dbName + " tbl="
+ tblName + " index=" + indexName);
- Boolean ret = null;
+ boolean ret = false;
try {
ret = executeWithRetry(new Command() {
@Override
@@ -2593,10 +2639,10 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("drop_index_by_name");
+ endFunction("drop_index_by_name", ret);
}
- return ret.booleanValue();
+ return ret;
}
private boolean drop_index_by_name_core(final RawStore ms,
@@ -2677,7 +2723,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("drop_index_by_name");
+ endFunction("drop_index_by_name", ret != null);
}
return ret;
}
@@ -2713,7 +2759,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("get_index_names");
+ endFunction("get_index_names", ret != null);
}
return ret;
}
@@ -2738,7 +2784,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("get_indexes");
+ endFunction("get_indexes", ret != null);
}
return ret;
}
@@ -2765,7 +2811,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("get_partitions_by_filter");
+ endFunction("get_partitions_by_filter", ret != null);
}
return ret;
}
@@ -2793,7 +2839,7 @@
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
} finally {
- endFunction("get_partitions_by_names");
+ endFunction("get_partitions_by_names", ret != null);
}
return ret;
}
@@ -3394,12 +3440,14 @@
public void cancel_delegation_token(String token_str_form)
throws MetaException, TException {
startFunction("cancel_delegation_token");
+ boolean success = false;
try {
HiveMetaStore.cancelDelegationToken(token_str_form);
+ success = true;
} catch(IOException e) {
throw new MetaException(e.getMessage());
} finally {
- endFunction("cancel_delegation_token");
+ endFunction("cancel_delegation_token", success);
}
}
@@ -3407,13 +3455,15 @@
public long renew_delegation_token(String token_str_form)
throws MetaException, TException {
startFunction("renew_delegation_token");
+ Long ret = null;
try {
- return HiveMetaStore.renewDelegationToken(token_str_form);
+ ret = HiveMetaStore.renewDelegationToken(token_str_form);
} catch(IOException e) {
throw new MetaException(e.getMessage());
} finally {
- endFunction("renew_delegation_token");
+ endFunction("renew_delegation_token", ret != null);
}
+ return ret;
}
@Override
@@ -3421,8 +3471,9 @@
String renewer_kerberos_principal_name)
throws MetaException, TException {
startFunction("get_delegation_token");
+ String ret = null;
try {
- return
+ ret =
HiveMetaStore.getDelegationToken(token_owner,
renewer_kerberos_principal_name);
} catch(IOException e) {
@@ -3430,8 +3481,9 @@
} catch (InterruptedException e) {
throw new MetaException(e.getMessage());
} finally {
- endFunction("get_delegation_token");
+ endFunction("get_delegation_token", ret != null);
}
+ return ret;
}
@Override
@@ -3440,9 +3492,9 @@
MetaException,TException, NoSuchObjectException, UnknownDBException, UnknownTableException,
InvalidPartitionException, UnknownPartitionException {
+ Table tbl = null;
try {
startPartitionFunction("markPartitionForEvent", db_name, tbl_name, partName);
- Table tbl = null;
try{
tbl = executeWithRetry(new Command(){
@Override
@@ -3479,7 +3531,7 @@
}
}
finally{
- endFunction("markPartitionForEvent");
+ endFunction("markPartitionForEvent", tbl != null);
}
}
@@ -3490,8 +3542,9 @@
TException, UnknownPartitionException, InvalidPartitionException {
startPartitionFunction("isPartitionMarkedForEvent", db_name, tbl_name, partName);
+ Boolean ret = null;
try {
- return executeWithRetry(new Command(){
+ ret = executeWithRetry(new Command(){
@Override
public Boolean run(RawStore ms) throws Exception {
return ms.isPartitionMarkedForEvent(db_name, tbl_name, partName, evtType);
@@ -3519,8 +3572,10 @@
}
}
finally{
- endFunction("isPartitionMarkedForEvent");
+ endFunction("isPartitionMarkedForEvent", ret != null);
}
+
+ return ret;
}
}
Index: metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (revision 1208829)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (working copy)
@@ -960,27 +960,28 @@
/**
* create listener instances as per the configuration.
+ *
+ * @param clazz
* @param conf
+ * @param listenerImplList
* @return
* @throws MetaException
*/
- static List getMetaStoreListener (HiveConf conf)
- throws MetaException {
+ static List getMetaStoreListeners(Class clazz,
+ HiveConf conf, String listenerImplList) throws MetaException {
- List listeners = new ArrayList();
- String listenerImplList = conf.getVar(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS);
+ List listeners = new ArrayList();
listenerImplList = listenerImplList.trim();
if (listenerImplList.equals("")) {
return listeners;
-}
+ }
String[] listenerImpls = listenerImplList.split(",");
for (String listenerImpl : listenerImpls) {
try {
- MetaStoreEventListener listener = (MetaStoreEventListener) Class.forName(
+ T listener = (T) Class.forName(
listenerImpl.trim(), true, JavaUtils.getClassLoader()).getConstructor(
Configuration.class).newInstance(conf);
- listener.setConf(conf);
listeners.add(listener);
} catch (Exception e) {
throw new MetaException("Failed to instantiate listener named: "+
Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
===================================================================
--- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 1208829)
+++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy)
@@ -92,6 +92,7 @@
HiveConf.ConfVars.METASTORE_EVENT_CLEAN_FREQ,
HiveConf.ConfVars.METASTORE_EVENT_EXPIRY_DURATION,
HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL,
+ HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS,
};
/**
@@ -245,6 +246,7 @@
METASTORE_DETACH_ALL_ON_COMMIT("javax.jdo.option.DetachAllOnCommit", true),
METASTORE_NON_TRANSACTIONAL_READ("javax.jdo.option.NonTransactionalRead", true),
METASTORE_CONNECTION_USER_NAME("javax.jdo.option.ConnectionUserName", "APP"),
+ METASTORE_END_FUNCTION_LISTENERS("hive.metastore.end.function.listeners", ""),
// CLI
CLIIGNOREERRORS("hive.cli.errors.ignore", false),