diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 02b39fb..6fd3057 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -45,7 +45,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.LogUtils; import org.apache.hadoop.hive.common.LogUtils.LogInitializationException; import org.apache.hadoop.hive.common.classification.InterfaceAudience; @@ -92,7 +91,6 @@ import org.apache.hadoop.hive.metastore.events.DropPartitionEvent; import org.apache.hadoop.hive.metastore.events.DropTableEvent; import org.apache.hadoop.hive.metastore.events.EventCleanerTask; import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent; -import org.apache.hadoop.hive.metastore.hooks.JDOConnectionURLHook; import org.apache.hadoop.hive.metastore.model.MDBPrivilege; import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege; import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege; @@ -127,7 +125,7 @@ import com.facebook.fb303.fb_status; */ public class HiveMetaStore extends ThriftHiveMetastore { public static final Log LOG = LogFactory.getLog( - HiveMetaStore.class); + HiveMetaStore.class); /** * default port on which to start the Hive server @@ -148,35 +146,35 @@ public class HiveMetaStore extends ThriftHiveMetastore { private Warehouse wh; // hdfs warehouse private final ThreadLocal threadLocalMS = new ThreadLocal() { - @Override - protected synchronized RawStore initialValue() { - return null; - } - }; + @Override + protected synchronized RawStore initialValue() { + return null; + } + }; // Thread local configuration is needed as many threads could make changes // to the conf using the connection hook private final ThreadLocal threadLocalConf = new ThreadLocal() { - @Override - protected synchronized Configuration initialValue() { - return null; - } - }; + @Override + protected synchronized Configuration initialValue() { + return null; + } + }; public static final String AUDIT_FORMAT = - "ugi=%s\t" + // ugi - "ip=%s\t" + // remote IP - "cmd=%s\t"; // command + "ugi=%s\t" + // ugi + "ip=%s\t" + // remote IP + "cmd=%s\t"; // command public static final Log auditLog = LogFactory.getLog( HiveMetaStore.class.getName() + ".audit"); private static final ThreadLocal auditFormatter = - new ThreadLocal() { - @Override - protected Formatter initialValue() { - return new Formatter(new StringBuilder(AUDIT_FORMAT.length() * 4)); - } - }; + new ThreadLocal() { + @Override + protected Formatter initialValue() { + return new Formatter(new StringBuilder(AUDIT_FORMAT.length() * 4)); + } + }; private final void logAuditEvent(String cmd) { if (!useSasl || cmd == null) { @@ -190,27 +188,21 @@ public class HiveMetaStore extends ThriftHiveMetastore { throw new RuntimeException(ex); } final Formatter fmt = auditFormatter.get(); - ((StringBuilder)fmt.out()).setLength(0); + ((StringBuilder) fmt.out()).setLength(0); auditLog.info(fmt.format(AUDIT_FORMAT, ugi.getUserName(), - saslServer.getRemoteAddress().toString(), cmd).toString()); + saslServer.getRemoteAddress().toString(), cmd).toString()); } // The next serial number to be assigned private boolean checkForDefaultDb; private static int nextSerialNum = 0; - private static ThreadLocal threadLocalId = new ThreadLocal() { + private static ThreadLocal threadLocalId = new ThreadLocal() { @Override - protected synchronized Object initialValue() { + protected synchronized Integer initialValue() { return new Integer(nextSerialNum++); } }; - // Used for retrying JDO calls on datastore failures - private int retryInterval = 0; - private int retryLimit = 0; - private JDOConnectionURLHook urlHook = null; - private String urlHookClassName = ""; - public static Integer get() { return threadLocalId.get(); } @@ -253,15 +245,6 @@ public class HiveMetaStore extends ThriftHiveMetastore { alterHandlerName, AlterHandler.class), hiveConf); wh = new Warehouse(hiveConf); - retryInterval = HiveConf.getIntVar(hiveConf, - HiveConf.ConfVars.METASTOREINTERVAL); - retryLimit = HiveConf.getIntVar(hiveConf, - HiveConf.ConfVars.METASTOREATTEMPTS); - // Using the hook on startup ensures that the hook always has priority - // over settings in *.xml. We can use hiveConf as only a single thread - // will be calling the constructor. - updateConnectionURL(hiveConf, null); - createDefaultDB(); if (hiveConf.getBoolean("hive.metastore.metrics.enabled", false)) { @@ -283,7 +266,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { hiveConf.getVar(HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS)); long cleanFreq = hiveConf.getLongVar(ConfVars.METASTORE_EVENT_CLEAN_FREQ) * 1000L; - if(cleanFreq > 0){ + if (cleanFreq > 0) { // In default config, there is no timer. Timer cleaner = new Timer("Metastore Events Cleaner Thread", true); cleaner.schedule(new EventCleanerTask(this), cleanFreq, cleanFreq); @@ -295,92 +278,6 @@ public class HiveMetaStore extends ThriftHiveMetastore { return threadLocalId.get() + ": " + s; } - /** - * A Command is a closure used to pass a block of code from individual - * functions to executeWithRetry, which centralizes connection error - * handling. Command is parameterized on the return type of the function. - * - * The general transformation is: - * - * From: - * String foo(int a) throws ExceptionB { - * - * } - * - * To: - * String foo(final int a) throws ExceptionB { - * String ret = null; - * try { - * ret = executeWithRetry(new Command() { - * String run(RawStore ms) { - * - * } - * } - * } catch (ExceptionB e) { - * throw e; - * } catch (Exception e) { - * // Since run is only supposed to throw ExceptionB it could only - * // be a runtime exception - * throw (RuntimeException)e; - * } - * } - * - * The catch blocks are used to ensure that the exceptions thrown by the - * follow the function definition. - */ - @InterfaceAudience.LimitedPrivate({"HCATALOG"}) - @InterfaceStability.Evolving - public static class Command { - - @InterfaceAudience.LimitedPrivate({"HCATALOG"}) - @InterfaceStability.Evolving - public T run(RawStore ms) throws Exception { - return null; - } - } - @InterfaceAudience.LimitedPrivate({"HCATALOG"}) - @InterfaceStability.Evolving - public T executeWithRetry(Command cmd) throws Exception { - T ret = null; - - boolean gotNewConnectUrl = false; - boolean reloadConf = HiveConf.getBoolVar(hiveConf, - HiveConf.ConfVars.METASTOREFORCERELOADCONF); - - if (reloadConf) { - updateConnectionURL(getConf(), null); - } - - int retryCount = 0; - Exception caughtException = null; - while(true) { - try { - RawStore ms = getMS(reloadConf || gotNewConnectUrl); - ret = cmd.run(ms); - break; - } catch (javax.jdo.JDOException e) { - caughtException = e; - } - - if (retryCount >= retryLimit) { - throw caughtException; - } - - assert(retryInterval >= 0); - retryCount++; - LOG.error( - String.format( - "JDO datastore error. Retrying metastore command " + - "after %d ms (attempt %d of %d)", retryInterval, retryCount, retryLimit)); - Thread.sleep(retryInterval); - // If we have a connection error, the JDO connection URL hook might - // provide us with a new URL to access the datastore. - String lastUrl = getConnectionURL(getConf()); - gotNewConnectUrl = updateConnectionURL(getConf(), lastUrl); - } - return ret; - } - private Configuration getConf() { Configuration conf = threadLocalConf.get(); if (conf == null) { @@ -398,85 +295,25 @@ public class HiveMetaStore extends ThriftHiveMetastore { */ @InterfaceAudience.LimitedPrivate({"HCATALOG"}) @InterfaceStability.Evolving - public RawStore getMS(boolean reloadConf) throws MetaException { + public RawStore getMS() throws MetaException { RawStore ms = threadLocalMS.get(); if (ms == null) { - LOG.info(addPrefix("Opening raw store with implemenation class:" - + rawStoreClassName)); - ms = (RawStore) ReflectionUtils.newInstance(getClass(rawStoreClassName, - RawStore.class), getConf()); + ms = newRawStore(); threadLocalMS.set(ms); ms = threadLocalMS.get(); } - if (reloadConf) { - ms.setConf(getConf()); - } - return ms; } - /** - * Updates the connection URL in hiveConf using the hook - * @return true if a new connection URL was loaded into the thread local - * configuration - */ - private boolean updateConnectionURL(Configuration conf, String badUrl) - throws MetaException { - String connectUrl = null; - String currentUrl = getConnectionURL(conf); - try { - // We always call init because the hook name in the configuration could - // have changed. - initConnectionUrlHook(); - if (urlHook != null) { - if (badUrl != null) { - urlHook.notifyBadConnectionUrl(badUrl); - } - connectUrl = urlHook.getJdoConnectionUrl(hiveConf); - } - } catch (Exception e) { - LOG.error("Exception while getting connection URL from the hook: " + - e); - } - - if (connectUrl != null && !connectUrl.equals(currentUrl)) { - LOG.error(addPrefix( - String.format("Overriding %s with %s", - HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(), - connectUrl))); - conf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(), - connectUrl); - return true; - } - return false; - } - - private static String getConnectionURL(Configuration conf) { - return conf.get( - HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(), ""); - } - - // Multiple threads could try to initialize at the same time. - synchronized private void initConnectionUrlHook() - throws ClassNotFoundException { + private RawStore newRawStore() throws MetaException { + LOG.info(addPrefix("Opening raw store with implemenation class:" + + rawStoreClassName)); + Configuration conf = getConf(); + RawStore ms = (RawStore) ReflectionUtils.newInstance(getClass(rawStoreClassName, + RawStore.class), conf); - String className = - hiveConf.get(HiveConf.ConfVars.METASTORECONNECTURLHOOK.toString(), "").trim(); - if (className.equals("")) { - urlHookClassName = ""; - urlHook = null; - return; - } - boolean urlHookChanged = !urlHookClassName.equals(className); - if (urlHook == null || urlHookChanged) { - urlHookClassName = className.trim(); - - Class urlHookClass = Class.forName(urlHookClassName, true, - JavaUtils.getClassLoader()); - urlHook = (JDOConnectionURLHook) ReflectionUtils.newInstance(urlHookClass, null); - } - return; + return RetryingRawStore.getProxy(hiveConf, conf, ms, threadLocalId.get()); } private void createDefaultDB_core(RawStore ms) throws MetaException, InvalidObjectException { @@ -489,6 +326,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { } HMSHandler.createDefaultDB = true; } + /** * create default database if it doesn't exist * @@ -500,20 +338,14 @@ public class HiveMetaStore extends ThriftHiveMetastore { } try { - executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - createDefaultDB_core(ms); - return Boolean.TRUE; - } - }); + createDefaultDB_core(getMS()); } catch (InvalidObjectException e) { throw new MetaException(e.getMessage()); } catch (MetaException e) { throw e; } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + assert (e instanceof RuntimeException); + throw (RuntimeException) e; } } @@ -539,7 +371,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { Metrics.startScope(function); } catch (IOException e) { LOG.debug("Exception when starting metrics scope" - + e.getClass().getName() + " " + e.getMessage()); + + e.getClass().getName() + " " + e.getMessage()); MetaStoreUtils.printStackTrace(e); } return function; @@ -561,11 +393,11 @@ public class HiveMetaStore extends ThriftHiveMetastore { public String startPartitionFunction(String function, String db, String tbl, List partVals) { return startFunction(function, " : db=" + db + " tbl=" + tbl - + "[" + join(partVals, ",") + "]" ); + + "[" + join(partVals, ",") + "]"); } public String startPartitionFunction(String function, String db, String tbl, - Map partName) { + Map partName) { return startFunction(function, " : db=" + db + " tbl=" + tbl + "partition=" + partName); } @@ -625,8 +457,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { } private void create_database_core(RawStore ms, final Database db) - throws AlreadyExistsException, InvalidObjectException, MetaException, - IOException { + throws AlreadyExistsException, InvalidObjectException, MetaException { if (!validateName(db.getName())) { throw new InvalidObjectException(db.getName() + " is not a valid database name"); } @@ -666,34 +497,21 @@ public class HiveMetaStore extends ThriftHiveMetastore { public void create_database(final Database db) throws AlreadyExistsException, InvalidObjectException, MetaException { startFunction("create_database", ": " - + db.getName() + " " - + db.getLocationUri() + " " - + db.getDescription()); + + db.getName() + " " + + db.getLocationUri() + " " + + db.getDescription()); boolean success = false; try { try { - if(null != get_database(db.getName())) { + if (null != get_database(db.getName())) { throw new AlreadyExistsException("Database " + db.getName() + " already exists"); } } catch (NoSuchObjectException e) { // expected } - success = executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - create_database_core(ms, db); - return Boolean.TRUE; - } - }); - } catch (AlreadyExistsException e) { - throw e; - } catch (InvalidObjectException e) { - throw e; - } catch (MetaException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + + create_database_core(getMS(), db); + success = true; } finally { endFunction("create_database", success); } @@ -704,19 +522,14 @@ public class HiveMetaStore extends ThriftHiveMetastore { startFunction("get_database", ": " + name); Database db = null; try { - db = executeWithRetry(new Command() { - @Override - public Database run(RawStore ms) throws Exception { - return ms.getDatabase(name); - } - }); + db = getMS().getDatabase(name); } catch (MetaException e) { throw e; } catch (NoSuchObjectException e) { throw e; } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + assert (e instanceof RuntimeException); + throw (RuntimeException) e; } finally { endFunction("get_database", db != null); } @@ -728,21 +541,8 @@ public class HiveMetaStore extends ThriftHiveMetastore { startFunction("alter_database" + dbName); boolean success = false; try { - success = executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - return ms.alterDatabase(dbName, db); - } - }); - } catch (MetaException e) { - throw e; - } catch (NoSuchObjectException e) { - throw e; - } catch (TException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException) e; + getMS().alterDatabase(dbName, db); + success = true; } finally { endFunction("alter_database", success); } @@ -794,22 +594,10 @@ public class HiveMetaStore extends ThriftHiveMetastore { boolean success = false; try { - success = executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - drop_database_core(ms, dbName, deleteData, cascade); - return Boolean.TRUE; - } - }); - } catch (NoSuchObjectException e) { - throw e; - } catch (InvalidOperationException e) { - throw e; - } catch (MetaException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + drop_database_core(getMS(), dbName, deleteData, cascade); + success = true; + } catch (IOException e) { + throw new MetaException(e.getMessage()); } finally { endFunction("drop_database", success); } @@ -820,17 +608,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - return ms.getDatabases(pattern); - } - }); - } catch (MetaException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = getMS().getDatabases(pattern); } finally { endFunction("get_databases", ret != null); } @@ -842,17 +620,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - return ms.getAllDatabases(); - } - }); - } catch (MetaException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = getMS().getAllDatabases(); } finally { endFunction("get_all_databases", ret != null); } @@ -860,7 +628,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { } private void create_type_core(final RawStore ms, final Type type) - throws AlreadyExistsException, MetaException, InvalidObjectException { + throws AlreadyExistsException, MetaException, InvalidObjectException { if (!MetaStoreUtils.validateName(type.getName())) { throw new InvalidObjectException("Invalid type name"); } @@ -883,29 +651,15 @@ public class HiveMetaStore extends ThriftHiveMetastore { public boolean create_type(final Type type) throws AlreadyExistsException, MetaException, InvalidObjectException { startFunction("create_type", ": " + type.getName()); - boolean ret = false; + boolean success = false; try { - ret = executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - create_type_core(ms, type); - return Boolean.TRUE; - } - }); - } catch (AlreadyExistsException e) { - throw e; - } catch (MetaException e) { - throw e; - } catch (InvalidObjectException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + create_type_core(getMS(), type); + success = true; } finally { - endFunction("create_type", ret); + endFunction("create_type", success); } - return ret; + return success; } public Type get_type(final String name) throws MetaException, NoSuchObjectException { @@ -913,23 +667,10 @@ public class HiveMetaStore extends ThriftHiveMetastore { Type ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public Type run(RawStore ms) throws Exception { - Type type = ms.getType(name); - if (null == type) { - throw new NoSuchObjectException("Type \"" + name + "\" not found."); - } - return type; - } - }); - } catch (NoSuchObjectException e) { - throw e; - } catch (MetaException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = getMS().getType(name); + if (null == ret) { + throw new NoSuchObjectException("Type \"" + name + "\" not found."); + } } finally { endFunction("get_type", ret != null); } @@ -961,28 +702,17 @@ public class HiveMetaStore extends ThriftHiveMetastore { } } - - public boolean drop_type(final String name) throws MetaException { + public boolean drop_type(final String name) throws MetaException, NoSuchObjectException { startFunction("drop_type", ": " + name); - boolean ret = false; + boolean success = false; try { - ret = executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - // TODO:pc validate that there are no types that refer to this - return ms.dropType(name); - } - }); - } catch (MetaException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + // TODO:pc validate that there are no types that refer to this + success = getMS().dropType(name); } finally { - endFunction("drop_type", ret); + endFunction("drop_type", success); } - return ret; + return success; } public Map get_type_all(String name) throws MetaException { @@ -1008,7 +738,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { try { ms.openTransaction(); - if(ms.getDatabase(tbl.getDbName()) == null){ + if (ms.getDatabase(tbl.getDbName()) == null) { throw new NoSuchObjectException("The database " + tbl.getDbName() + " does not exist"); } @@ -1020,13 +750,13 @@ public class HiveMetaStore extends ThriftHiveMetastore { if (!TableType.VIRTUAL_VIEW.toString().equals(tbl.getTableType())) { if (tbl.getSd().getLocation() == null - || tbl.getSd().getLocation().isEmpty()) { + || tbl.getSd().getLocation().isEmpty()) { tblPath = wh.getTablePath( ms.getDatabase(tbl.getDbName()), tbl.getTableName()); } else { if (!isExternal(tbl) && !MetaStoreUtils.isNonNativeTable(tbl)) { LOG.warn("Location: " + tbl.getSd().getLocation() - + " specified for non-external table:" + tbl.getTableName()); + + " specified for non-external table:" + tbl.getTableName()); } tblPath = wh.getDnsPath(new Path(tbl.getSd().getLocation())); } @@ -1064,9 +794,9 @@ public class HiveMetaStore extends ThriftHiveMetastore { } for (MetaStoreEventListener listener : listeners) { listener.onCreateTable(new CreateTableEvent(tbl, success, this)); + } } } - } public void create_table(final Table tbl) throws AlreadyExistsException, MetaException, InvalidObjectException { @@ -1074,24 +804,10 @@ public class HiveMetaStore extends ThriftHiveMetastore { + tbl.getTableName()); boolean success = false; try { - success = executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - create_table_core(ms, tbl); - return Boolean.TRUE; - } - }); - } catch (AlreadyExistsException e) { - throw e; - } catch (MetaException e) { - throw e; - } catch (InvalidObjectException e) { - throw e; - }catch (NoSuchObjectException e) { + create_table_core(getMS(), tbl); + success = true; + } catch (NoSuchObjectException e) { throw new InvalidObjectException(e.getMessage()); - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; } finally { endFunction("create_table", success); } @@ -1132,7 +848,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { if (!isIndexTable) { try { List indexes = ms.getIndexes(dbname, name, Short.MAX_VALUE); - while(indexes != null && indexes.size()>0) { + while (indexes != null && indexes.size() > 0) { for (Index idx : indexes) { this.drop_index_by_name(dbname, name, idx.getIndexName(), true); } @@ -1163,11 +879,11 @@ public class HiveMetaStore extends ThriftHiveMetastore { wh.deleteDir(tblPath, true); // ok even if the data is not deleted } - for(MetaStoreEventListener listener : listeners){ + for (MetaStoreEventListener listener : listeners) { listener.onDropTable(new DropTableEvent(tbl, success, this)); + } } } - } public void drop_table(final String dbname, final String name, final boolean deleteData) throws NoSuchObjectException, MetaException { @@ -1175,20 +891,10 @@ public class HiveMetaStore extends ThriftHiveMetastore { boolean success = false; try { - success = executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - drop_table_core(ms, dbname, name, deleteData); - return Boolean.TRUE; - } - }); - } catch (NoSuchObjectException e) { - throw e; - } catch (MetaException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + drop_table_core(getMS(), dbname, name, deleteData); + success = true; + } catch (IOException e) { + throw new MetaException(e.getMessage()); } finally { endFunction("drop_table", success); } @@ -1206,7 +912,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { return MetaStoreUtils.isExternalTable(table); } - private boolean isIndexTable (Table table) { + private boolean isIndexTable(Table table) { return MetaStoreUtils.isIndexTable(table); } @@ -1215,24 +921,11 @@ public class HiveMetaStore extends ThriftHiveMetastore { Table t = null; startTableFunction("get_table", dbname, name); try { - t = executeWithRetry(new Command() { - @Override - public Table run(RawStore ms) throws Exception { - Table t = ms.getTable(dbname, name); - if (t == null) { - throw new NoSuchObjectException(dbname + "." + name - + " table not found"); - } - return t; - } - }); - } catch (NoSuchObjectException e) { - throw e; - } catch (MetaException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + t = getMS().getTable(dbname, name); + if (t == null) { + throw new NoSuchObjectException(dbname + "." + name + + " table not found"); + } } finally { endFunction("get_table", t != null); } @@ -1241,46 +934,34 @@ public class HiveMetaStore extends ThriftHiveMetastore { /** * Gets multiple tables from the hive metastore. + * * @param dbname - * The name of the database in which the tables reside + * The name of the database in which the tables reside * @param names - * The names of the tables to get. + * The names of the tables to get. * * @return A list of tables whose names are in the the list "names" and - * are retrievable from the database specified by "dbnames." - * There is no guarantee of the order of the returned tables. - * If there are duplicate names, only one instance of the table will be returned. + * are retrievable from the database specified by "dbnames." + * There is no guarantee of the order of the returned tables. + * If there are duplicate names, only one instance of the table will be returned. * @throws MetaException * @throws InvalidOperationException * @throws UnknownDBException */ public List
get_table_objects_by_name(final String dbname, final List names) - throws MetaException, InvalidOperationException, UnknownDBException { + throws MetaException, InvalidOperationException, UnknownDBException { List
tables = null; startMultiTableFunction("get_multi_table", dbname, names); try { - tables = executeWithRetry(new Command>() { - @Override - public List
run(RawStore ms) throws Exception { - if (dbname == null || dbname.isEmpty()) { - throw new UnknownDBException("DB name is null or empty"); - } - if (names == null) - { - throw new InvalidOperationException(dbname + " cannot find null tables"); - } - List
foundTables = ms.getTableObjectsByName(dbname, names); - return foundTables; - } - }); - } catch (MetaException e) { - throw e; - } catch (InvalidOperationException e) { - throw e; - } catch (UnknownDBException e) { - throw e; - } catch (Exception e) { - throw new MetaException(e.toString()); + + if (dbname == null || dbname.isEmpty()) { + throw new UnknownDBException("DB name is null or empty"); + } + if (names == null) + { + throw new InvalidOperationException(dbname + " cannot find null tables"); + } + tables = getMS().getTableObjectsByName(dbname, names); } finally { endFunction("get_multi_table", tables != null); } @@ -1294,27 +975,13 @@ public class HiveMetaStore extends ThriftHiveMetastore { List tables = null; startFunction("get_table_names_by_filter", ": db = " + dbName + ", filter = " + filter); try { - tables = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - if (dbName == null || dbName.isEmpty()) { - throw new UnknownDBException("DB name is null or empty"); - } - if (filter == null) { - throw new InvalidOperationException(filter + " cannot apply null filter"); - } - List tables = ms.listTableNamesByFilter(dbName, filter, maxTables); - return tables; - } - }); - } catch (MetaException e) { - throw e; - } catch (InvalidOperationException e) { - throw e; - } catch (UnknownDBException e) { - throw e; - } catch (Exception e) { - throw new MetaException(e.toString()); + if (dbName == null || dbName.isEmpty()) { + throw new UnknownDBException("DB name is null or empty"); + } + if (filter == null) { + throw new InvalidOperationException(filter + " cannot apply null filter"); + } + tables = getMS().listTableNamesByFilter(dbName, filter, maxTables); } finally { endFunction("get_table_names_by_filter", tables != null); } @@ -1347,7 +1014,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { "Unable to add partition because table or database do not exist"); } if (tbl.getSd().getLocation() == null) { - throw new MetaException( + throw new MetaException( "Cannot append a partition to a view"); } @@ -1361,7 +1028,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { Partition old_part = null; try { old_part = ms.getPartition(part.getDbName(), part - .getTableName(), part.getValues()); + .getTableName(), part.getValues()); } catch (NoSuchObjectException e) { // this means there is no existing partition old_part = null; @@ -1410,21 +1077,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { Partition ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public Partition run(RawStore ms) throws Exception { - return append_partition_common(ms, dbName, tableName, part_vals); - } - }); - } catch (MetaException e) { - throw e; - } catch (InvalidObjectException e) { - throw e; - } catch (AlreadyExistsException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = append_partition_common(getMS(), dbName, tableName, part_vals); } finally { endFunction("append_partition", ret != null); } @@ -1438,20 +1091,20 @@ public class HiveMetaStore extends ThriftHiveMetastore { logInfo("add_partitions : db=" + db + " tbl=" + tbl); boolean success = false; - Map addedPartitions = new HashMap(); + Map addedPartitions = new HashMap(); try { ms.openTransaction(); for (Partition part : parts) { - Entry e = add_partition_core_notxn(ms,part); - addedPartitions.put(e.getKey(),e.getValue()); + Entry e = add_partition_core_notxn(ms, part); + addedPartitions.put(e.getKey(), e.getValue()); } success = true; ms.commitTransaction(); } finally { if (!success) { ms.rollbackTransaction(); - for (Entry e : addedPartitions.entrySet()){ - if (e.getValue()){ + for (Entry e : addedPartitions.entrySet()) { + if (e.getValue()) { wh.deleteDir(new Path(e.getKey().getSd().getLocation()), true); // we just created this directory - it's not a case of pre-creation, so we nuke } @@ -1470,22 +1123,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { Integer ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public Integer run(RawStore ms) throws Exception { - int ret = add_partitions_core(ms, parts); - return Integer.valueOf(ret); - } - }); - } catch (InvalidObjectException e) { - throw e; - } catch (AlreadyExistsException e) { - throw e; - } catch (MetaException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = add_partitions_core(getMS(), parts); } finally { endFunction("add_partition", ret != null); } @@ -1496,6 +1134,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { * An implementation of add_partition_core that does not commit * transaction or rollback transaction as part of its operation * - it is assumed that will be tended to from outside this call + * * @param ms * @param part * @return @@ -1503,17 +1142,17 @@ public class HiveMetaStore extends ThriftHiveMetastore { * @throws AlreadyExistsException * @throws MetaException */ - private Entry add_partition_core_notxn( + private Entry add_partition_core_notxn( final RawStore ms, final Partition part) - throws InvalidObjectException, AlreadyExistsException, MetaException { + throws InvalidObjectException, AlreadyExistsException, MetaException { boolean success = false, madeDir = false; Path partLocation = null; try { Partition old_part = null; try { old_part = ms.getPartition(part.getDbName(), part - .getTableName(), part.getValues()); - } catch(NoSuchObjectException e) { + .getTableName(), part.getValues()); + } catch (NoSuchObjectException e) { // this means there is no existing partition old_part = null; } @@ -1537,13 +1176,13 @@ public class HiveMetaStore extends ThriftHiveMetastore { // a physical table partition (not a view) if (tbl.getSd().getLocation() != null) { partLocation = new Path(tbl.getSd().getLocation(), Warehouse - .makePartName(tbl.getPartitionKeys(), part.getValues())); + .makePartName(tbl.getPartitionKeys(), part.getValues())); } } else { if (tbl.getSd().getLocation() == null) { throw new MetaException( - "Cannot specify location for a view partition"); + "Cannot specify location for a view partition"); } partLocation = wh.getDnsPath(new Path(partLocationStr)); } @@ -1558,7 +1197,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { if (!wh.isDir(partLocation)) { if (!wh.mkdirs(partLocation)) { throw new MetaException(partLocation - + " is not a directory or unable to create one"); + + " is not a directory or unable to create one"); } madeDir = true; } @@ -1599,27 +1238,27 @@ public class HiveMetaStore extends ThriftHiveMetastore { wh.deleteDir(partLocation, true); } } - for(MetaStoreEventListener listener : listeners){ + for (MetaStoreEventListener listener : listeners) { listener.onAddPartition(new AddPartitionEvent(part, success, this)); } } - Map returnVal = new HashMap(); + Map returnVal = new HashMap(); returnVal.put(part, madeDir); return returnVal.entrySet().iterator().next(); } private Partition add_partition_core(final RawStore ms, final Partition part) - throws InvalidObjectException, AlreadyExistsException, MetaException { + throws InvalidObjectException, AlreadyExistsException, MetaException { boolean success = false; Partition retPtn = null; - try{ + try { ms.openTransaction(); - retPtn = add_partition_core_notxn(ms,part).getKey(); + retPtn = add_partition_core_notxn(ms, part).getKey(); // we proceed only if we'd actually succeeded anyway, otherwise, // we'd have thrown an exception success = ms.commitTransaction(); - }finally{ - if (!success){ + } finally { + if (!success) { ms.rollbackTransaction(); } } @@ -1632,21 +1271,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { Partition ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public Partition run(RawStore ms) throws Exception { - return add_partition_core(ms, part); - } - }); - } catch (InvalidObjectException e) { - throw e; - } catch (AlreadyExistsException e) { - throw e; - } catch (MetaException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = add_partition_core(getMS(), part); } finally { endFunction("add_partition", ret != null); } @@ -1656,7 +1281,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { private boolean drop_partition_common(RawStore ms, String db_name, String tbl_name, List part_vals, final boolean deleteData) - throws MetaException, NoSuchObjectException, IOException { + throws MetaException, NoSuchObjectException, IOException { boolean success = false; Path partPath = null; @@ -1704,21 +1329,22 @@ public class HiveMetaStore extends ThriftHiveMetastore { // Archived partitions have har:/to_har_file as their location. // The original directory was saved in params if (isArchived) { - assert(archiveParentDir != null); + assert (archiveParentDir != null); wh.deleteDir(archiveParentDir, true); } else { - assert(partPath != null); + assert (partPath != null); wh.deleteDir(partPath, true); } // ok even if the data is not deleted } } - for(MetaStoreEventListener listener : listeners){ + for (MetaStoreEventListener listener : listeners) { listener.onDropPartition(new DropPartitionEvent(part, success, this)); - } + } } return true; } + public boolean drop_partition(final String db_name, final String tbl_name, final List part_vals, final boolean deleteData) throws NoSuchObjectException, MetaException, TException { @@ -1727,22 +1353,9 @@ public class HiveMetaStore extends ThriftHiveMetastore { boolean ret = false; try { - ret = executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - return Boolean.valueOf( - drop_partition_common(ms, db_name, tbl_name, part_vals, deleteData)); - } - }); - } catch (MetaException e) { - throw e; - } catch (NoSuchObjectException e) { - throw e; - } catch (TException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = drop_partition_common(getMS(), db_name, tbl_name, part_vals, deleteData); + } catch (IOException e) { + throw new MetaException(e.getMessage()); } finally { endFunction("drop_partition", ret); } @@ -1756,19 +1369,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { Partition ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public Partition run(RawStore ms) throws Exception { - return ms.getPartition(db_name, tbl_name, part_vals); - } - }); - } catch (MetaException e) { - throw e; - } catch (NoSuchObjectException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = getMS().getPartition(db_name, tbl_name, part_vals); } finally { endFunction("get_partition", ret != null); } @@ -1785,20 +1386,10 @@ public class HiveMetaStore extends ThriftHiveMetastore { Partition ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public Partition run(RawStore ms) throws Exception { - return ms.getPartitionWithAuth(db_name, tbl_name, part_vals, - user_name, group_names); - } - }); - } catch (MetaException e) { - throw e; - } catch (NoSuchObjectException e) { - throw e; - } catch (Exception e) { - assert (e instanceof RuntimeException); - throw (RuntimeException) e; + ret = getMS().getPartitionWithAuth(db_name, tbl_name, part_vals, + user_name, group_names); + } catch (InvalidObjectException e) { + throw new NoSuchObjectException(e.getMessage()); } finally { endFunction("get_partition_with_auth", ret != null); } @@ -1811,19 +1402,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - return ms.getPartitions(db_name, tbl_name, max_parts); - } - }); - } catch (MetaException e) { - throw e; - } catch (NoSuchObjectException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = getMS().getPartitions(db_name, tbl_name, max_parts); } finally { endFunction("get_partitions", ret != null); } @@ -1840,20 +1419,10 @@ public class HiveMetaStore extends ThriftHiveMetastore { List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - return ms.getPartitionsWithAuth(dbName, tblName, maxParts, + ret = getMS().getPartitionsWithAuth(dbName, tblName, maxParts, userName, groupNames); - } - }); - } catch (MetaException e) { - throw e; - } catch (NoSuchObjectException e) { - throw e; - } catch (Exception e) { - assert (e instanceof RuntimeException); - throw (RuntimeException) e; + } catch (InvalidObjectException e) { + throw new NoSuchObjectException(e.getMessage()); } finally { endFunction("get_partitions_with_auth", ret != null); } @@ -1867,17 +1436,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - return ms.listPartitionNames(db_name, tbl_name, max_parts); - } - }); - } catch (MetaException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = getMS().listPartitionNames(db_name, tbl_name, max_parts); } finally { endFunction("get_partition_names", ret != null); } @@ -1896,45 +1455,31 @@ public class HiveMetaStore extends ThriftHiveMetastore { throws InvalidOperationException, MetaException, TException { startTableFunction("alter_partition", db_name, tbl_name); - LOG.info("New partition values:" + new_part.getValues()); - if (part_vals != null && part_vals.size() > 0) { - LOG.info("Old Partition values:" + part_vals); + + if(LOG.isInfoEnabled()) { + LOG.info("New partition values:" + new_part.getValues()); + if (part_vals != null && part_vals.size() > 0) { + LOG.info("Old Partition values:" + part_vals); + } } Partition oldPart = null; try { - oldPart = - executeWithRetry(new Command() { - @Override - public Partition run(RawStore ms) throws Exception { - return alterHandler.alterPartition(ms, wh, db_name, tbl_name, part_vals, new_part); - } - }); + oldPart = alterHandler.alterPartition(getMS(), wh, db_name, tbl_name, part_vals, new_part); + for (MetaStoreEventListener listener : listeners) { listener.onAlterPartition(new AlterPartitionEvent(oldPart, new_part, true, this)); } - } catch (NoSuchObjectException e){ - //old partition does not exist - throw new InvalidOperationException("alter is not possible"); } catch (InvalidObjectException e) { throw new InvalidOperationException(e.getMessage()); } catch (AlreadyExistsException e) { throw new InvalidOperationException(e.getMessage()); - } catch (MetaException e) { - throw e; - } catch (TException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; } finally { endFunction("alter_partition", oldPart != null); } return; } - - public boolean create_index(Index index_def) throws IndexAlreadyExistsException, MetaException { endFunction(startFunction("create_index"), false); @@ -1952,22 +1497,12 @@ public class HiveMetaStore extends ThriftHiveMetastore { boolean success = false; try { - success = executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - ms.alterIndex(dbname, base_table_name, index_name, newIndex); - return Boolean.TRUE; - } - }); - } catch (MetaException e) { - throw e; - } catch (InvalidOperationException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + getMS().alterIndex(dbname, base_table_name, index_name, newIndex); + success = true; + } catch (InvalidObjectException e) { + throw new InvalidOperationException(e.getMessage()); } finally { - endFunction("alter_index", false); + endFunction("alter_index", success); } return; } @@ -1991,26 +1526,15 @@ public class HiveMetaStore extends ThriftHiveMetastore { boolean success = false; try { Table oldt = get_table(dbname, name); - success = executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - alterHandler.alterTable(ms, wh, dbname, name, newTable); - return Boolean.TRUE; - } - }); + alterHandler.alterTable(getMS(), wh, dbname, name, newTable); + success = true; + for (MetaStoreEventListener listener : listeners) { listener.onAlterTable(new AlterTableEvent(oldt, newTable, success, this)); } - } catch (MetaException e) { - throw e; - } catch (InvalidOperationException e) { - throw e; } catch (NoSuchObjectException e) { - //thrown when the table to be altered does not exist + // thrown when the table to be altered does not exist throw new InvalidOperationException(e.getMessage()); - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; } finally { endFunction("alter_table", success); } @@ -2022,17 +1546,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - return ms.getTables(dbname, pattern); - } - }); - } catch (MetaException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = getMS().getTables(dbname, pattern); } finally { endFunction("get_tables", ret != null); } @@ -2044,17 +1558,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - return ms.getAllTables(dbname); - } - }); - } catch (MetaException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = getMS().getAllTables(dbname); } finally { endFunction("get_all_tables", ret != null); } @@ -2196,7 +1700,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { } List partVals = new ArrayList(); - for(FieldSchema field : t.getPartitionKeys()) { + for (FieldSchema field : t.getPartitionKeys()) { String key = field.getName(); String val = hm.get(key); if (val == null) { @@ -2234,21 +1738,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { Partition ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public Partition run(RawStore ms) throws Exception { - return get_partition_by_name_core(ms, db_name, tbl_name, part_name); - } - }); - } catch (MetaException e) { - throw e; - } catch (NoSuchObjectException e) { - throw e; - } catch (TException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = get_partition_by_name_core(getMS(), db_name, tbl_name, part_name); } finally { endFunction("get_partition_by_name", ret != null); } @@ -2263,24 +1753,9 @@ public class HiveMetaStore extends ThriftHiveMetastore { Partition ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public Partition run(RawStore ms) throws Exception { - List partVals = getPartValsFromName(ms, db_name, tbl_name, part_name); - return append_partition_common(ms, db_name, tbl_name, partVals); - } - }); - } catch (InvalidObjectException e) { - throw e; - } catch (AlreadyExistsException e) { - throw e; - } catch (MetaException e) { - throw e; - } catch (TException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + RawStore ms = getMS(); + List partVals = getPartValsFromName(ms, db_name, tbl_name, part_name); + ret = append_partition_common(ms, db_name, tbl_name, partVals); } finally { endFunction("append_partition_by_name", ret != null); } @@ -2311,22 +1786,10 @@ public class HiveMetaStore extends ThriftHiveMetastore { boolean ret = false; try { - ret = executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - return drop_partition_by_name_core(ms, db_name, tbl_name, + ret = drop_partition_by_name_core(getMS(), db_name, tbl_name, part_name, deleteData); - } - }); - } catch (NoSuchObjectException e) { - throw e; - } catch (MetaException e) { - throw e; - } catch (TException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + } catch (IOException e) { + throw new MetaException(e.getMessage()); } finally { endFunction("drop_partition_by_name", ret); } @@ -2344,8 +1807,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { try { ret = get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, null, null); - } - finally { + } finally { endFunction("get_partitions_ps", ret != null); } @@ -2361,22 +1823,10 @@ public class HiveMetaStore extends ThriftHiveMetastore { part_vals); List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - return ms.listPartitionsPsWithAuth(db_name, tbl_name, part_vals, max_parts, + ret = getMS().listPartitionsPsWithAuth(db_name, tbl_name, part_vals, max_parts, userName, groupNames); - } - }); - } catch (MetaException e) { - throw e; } catch (InvalidObjectException e) { - throw new MetaException(e.getMessage()); - } catch (NoSuchObjectException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + throw new MetaException(e.getMessage()); } finally { endFunction("get_partitions_ps_with_auth", ret != null); } @@ -2390,19 +1840,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { startPartitionFunction("get_partitions_names_ps", db_name, tbl_name, part_vals); List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - return ms.listPartitionNamesPs(db_name, tbl_name, part_vals, max_parts); - } - }); - } catch (MetaException e) { - throw e; - } catch (NoSuchObjectException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = getMS().listPartitionNamesPs(db_name, tbl_name, part_vals, max_parts); } finally { endFunction("get_partitions_names_ps", ret != null); } @@ -2437,21 +1875,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { + newIndex.getOrigTableName() + " index=" + newIndex.getIndexName()); Index ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public Index run(RawStore ms) throws Exception { - return add_index_core(ms, newIndex, indexTable); - } - }); - } catch (InvalidObjectException e) { - throw e; - } catch (AlreadyExistsException e) { - throw e; - } catch (MetaException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = add_index_core(getMS(), newIndex, indexTable); } finally { endFunction("add_index", ret != null); } @@ -2524,22 +1948,10 @@ public class HiveMetaStore extends ThriftHiveMetastore { boolean ret = false; try { - ret = executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - return drop_index_by_name_core(ms, dbName, tblName, + ret = drop_index_by_name_core(getMS(), dbName, tblName, indexName, deleteData); - } - }); - } catch (NoSuchObjectException e) { - throw e; - } catch (MetaException e) { - throw e; - } catch (TException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + } catch (IOException e) { + throw new MetaException(e.getMessage()); } finally { endFunction("drop_index_by_name", ret); } @@ -2557,7 +1969,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { try { ms.openTransaction(); - //drop the underlying index table + // drop the underlying index table Index index = get_index_by_name(dbName, tblName, indexName); if (index == null) { throw new NoSuchObjectException(indexName + " doesn't exist"); @@ -2609,21 +2021,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { Index ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public Index run(RawStore ms) throws Exception { - return get_index_by_name_core(ms, dbName, tblName, indexName); - } - }); - } catch (MetaException e) { - throw e; - } catch (NoSuchObjectException e) { - throw e; - } catch (TException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = get_index_by_name_core(getMS(), dbName, tblName, indexName); } finally { endFunction("drop_index_by_name", ret != null); } @@ -2649,17 +2047,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - return ms.listIndexNames(dbName, tblName, maxIndexes); - } - }); - } catch (MetaException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = getMS().listIndexNames(dbName, tblName, maxIndexes); } finally { endFunction("get_index_names", ret != null); } @@ -2674,17 +2062,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - return ms.getIndexes(dbName, tblName, maxIndexes); - } - }); - } catch (MetaException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = getMS().getIndexes(dbName, tblName, maxIndexes); } finally { endFunction("get_indexes", ret != null); } @@ -2699,19 +2077,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - return ms.getPartitionsByFilter(dbName, tblName, filter, maxParts); - } - }); - } catch (MetaException e) { - throw e; - } catch (NoSuchObjectException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = getMS().getPartitionsByFilter(dbName, tblName, filter, maxParts); } finally { endFunction("get_partitions_by_filter", ret != null); } @@ -2727,19 +2093,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - return ms.getPartitionsByNames(dbName, tblName, partNames); - } - }); - } catch (MetaException e) { - throw e; - } catch (NoSuchObjectException e) { - throw e; - } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + ret = getMS().getPartitionsByNames(dbName, tblName, partNames); } finally { endFunction("get_partitions_by_names", ret != null); } @@ -2795,13 +2149,8 @@ public class HiveMetaStore extends ThriftHiveMetastore { PrincipalPrivilegeSet ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public PrincipalPrivilegeSet run(RawStore ms) throws Exception { - return ms.getColumnPrivilegeSet( - dbName, tableName, partName, columnName, userName, groupNames); - } - }); + ret = getMS().getColumnPrivilegeSet( + dbName, tableName, partName, columnName, userName, groupNames); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -2817,12 +2166,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { PrincipalPrivilegeSet ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public PrincipalPrivilegeSet run(RawStore ms) throws Exception { - return ms.getDBPrivilegeSet(dbName, userName, groupNames); - } - }); + ret = getMS().getDBPrivilegeSet(dbName, userName, groupNames); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -2839,13 +2183,8 @@ public class HiveMetaStore extends ThriftHiveMetastore { PrincipalPrivilegeSet ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public PrincipalPrivilegeSet run(RawStore ms) throws Exception { - return ms.getPartitionPrivilegeSet(dbName, tableName, partName, + ret = getMS().getPartitionPrivilegeSet(dbName, tableName, partName, userName, groupNames); - } - }); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -2861,13 +2200,8 @@ public class HiveMetaStore extends ThriftHiveMetastore { PrincipalPrivilegeSet ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public PrincipalPrivilegeSet run(RawStore ms) throws Exception { - return ms.getTablePrivilegeSet(dbName, tableName, userName, + ret = getMS().getTablePrivilegeSet(dbName, tableName, userName, groupNames); - } - }); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -2885,13 +2219,9 @@ public class HiveMetaStore extends ThriftHiveMetastore { Boolean ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - Role role = ms.getRole(roleName); - return ms.grantRole(role, userName, principalType, grantor, grantorType, grantOption); - } - }); + RawStore ms = getMS(); + Role role = ms.getRole(roleName); + ret = ms.grantRole(role, userName, principalType, grantor, grantorType, grantOption); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -2906,26 +2236,23 @@ public class HiveMetaStore extends ThriftHiveMetastore { List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - List result = new ArrayList(); - List roleMap = ms.listRoles(principalName, principalType); - if (roleMap!=null) { - for (MRoleMap role : roleMap) { - MRole r = role.getRole(); - result.add(new Role(r.getRoleName(), r - .getCreateTime(), r.getOwnerName())); - } - } - return result; + + List result = new ArrayList(); + List roleMap = getMS().listRoles(principalName, principalType); + if (roleMap != null) { + for (MRoleMap role : roleMap) { + MRole r = role.getRole(); + result.add(new Role(r.getRoleName(), r + .getCreateTime(), r.getOwnerName())); } - }); + } + ret = result; } catch (MetaException e) { throw e; } catch (Exception e) { throw new RuntimeException(e); } + return ret; } @@ -2936,13 +2263,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { Boolean ret = null; try { - - ret = executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - return ms.addRole(role.getRoleName(), role.getOwnerName()); - } - }); + ret = getMS().addRole(role.getRoleName(), role.getOwnerName()); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -2958,12 +2279,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { Boolean ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - return ms.removeRole(roleName); - } - }); + ret = getMS().removeRole(roleName); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -2978,17 +2294,11 @@ public class HiveMetaStore extends ThriftHiveMetastore { List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - return ms.listRoleNames(); - } - }); + ret = getMS().listRoleNames(); } catch (MetaException e) { throw e; } catch (Exception e) { - assert(e instanceof RuntimeException); - throw (RuntimeException)e; + throw new RuntimeException(e); } return ret; } @@ -3000,14 +2310,8 @@ public class HiveMetaStore extends ThriftHiveMetastore { Boolean ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - return ms.grantPrivileges(privileges); - } - }); + ret = getMS().grantPrivileges(privileges); } catch (MetaException e) { - e.printStackTrace(); throw e; } catch (Exception e) { throw new RuntimeException(e); @@ -3022,13 +2326,9 @@ public class HiveMetaStore extends ThriftHiveMetastore { Boolean ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - Role mRole = ms.getRole(roleName); - return ms.revokeRole(mRole, userName, principalType); - } - }); + RawStore ms = getMS(); + Role mRole = ms.getRole(roleName); + ret = ms.revokeRole(mRole, userName, principalType); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -3044,12 +2344,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { Boolean ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public Boolean run(RawStore ms) throws Exception { - return ms.revokePrivileges(privileges); - } - }); + ret = getMS().revokePrivileges(privileges); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -3064,12 +2359,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { PrincipalPrivilegeSet ret = null; try { - ret = executeWithRetry(new Command() { - @Override - public PrincipalPrivilegeSet run(RawStore ms) throws Exception { - return ms.getUserPrivilegeSet(userName, groupNames); - } - }); + ret = getMS().getUserPrivilegeSet(userName, groupNames); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -3078,7 +2368,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { return ret; } - public PrincipalType getPrincipalType (String principalType) { + public PrincipalType getPrincipalType(String principalType) { return PrincipalType.valueOf(principalType); } @@ -3114,64 +2404,60 @@ public class HiveMetaStore extends ThriftHiveMetastore { List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - String partName = null; - if (partValues != null && partValues.size()>0) { - Table tbl = get_table(dbName, tableName); - partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); + RawStore ms = getMS(); + String partName = null; + if (partValues != null && partValues.size() > 0) { + Table tbl = get_table(dbName, tableName); + partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); + } + + List result = Collections. emptyList(); + + if (partName != null) { + Partition part = null; + part = get_partition_by_name(dbName, tableName, partName); + List mPartitionCols = ms.listPrincipalPartitionColumnGrants( + principalName, + principalType, dbName, tableName, partName, columnName); + if (mPartitionCols.size() > 0) { + result = new ArrayList(); + for (int i = 0; i < mPartitionCols.size(); i++) { + MPartitionColumnPrivilege sCol = mPartitionCols.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.COLUMN, dbName, tableName, + part == null ? null : part.getValues(), sCol + .getColumnName()); + HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, + sCol.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sCol.getPrivilege(), sCol + .getCreateTime(), sCol.getGrantor(), PrincipalType + .valueOf(sCol.getGrantorType()), sCol.getGrantOption())); + result.add(secObj); } - - List result = Collections.emptyList(); - - if (partName != null) { - Partition part = null; - part = get_partition_by_name(dbName, tableName, partName); - List mPartitionCols - = ms.listPrincipalPartitionColumnGrants(principalName, - principalType, dbName, tableName, partName, columnName); - if (mPartitionCols.size() > 0) { - result = new ArrayList(); - for (int i = 0; i < mPartitionCols.size(); i++) { - MPartitionColumnPrivilege sCol = mPartitionCols.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.COLUMN, dbName, tableName, - part == null ? null : part.getValues(), sCol - .getColumnName()); - HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, - sCol.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sCol.getPrivilege(), sCol - .getCreateTime(), sCol.getGrantor(), PrincipalType - .valueOf(sCol.getGrantorType()), sCol.getGrantOption())); - result.add(secObj); - } - } - } else { - List mTableCols = ms - .listPrincipalTableColumnGrants(principalName, principalType, - dbName, tableName, columnName); - if (mTableCols.size() > 0) { - result = new ArrayList(); - for (int i = 0; i < mTableCols.size(); i++) { - MTableColumnPrivilege sCol = mTableCols.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.COLUMN, dbName, tableName, null, sCol - .getColumnName()); - HiveObjectPrivilege secObj = new HiveObjectPrivilege( - objectRef, sCol.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sCol.getPrivilege(), sCol - .getCreateTime(), sCol.getGrantor(), PrincipalType - .valueOf(sCol.getGrantorType()), sCol - .getGrantOption())); - result.add(secObj); - } - } + } + } else { + List mTableCols = ms + .listPrincipalTableColumnGrants(principalName, principalType, + dbName, tableName, columnName); + if (mTableCols.size() > 0) { + result = new ArrayList(); + for (int i = 0; i < mTableCols.size(); i++) { + MTableColumnPrivilege sCol = mTableCols.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.COLUMN, dbName, tableName, null, sCol + .getColumnName()); + HiveObjectPrivilege secObj = new HiveObjectPrivilege( + objectRef, sCol.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sCol.getPrivilege(), sCol + .getCreateTime(), sCol.getGrantor(), PrincipalType + .valueOf(sCol.getGrantorType()), sCol + .getGrantOption())); + result.add(secObj); } - - return result; } - }); + } + + ret = result; } catch (MetaException e) { throw e; } catch (Exception e) { @@ -3185,37 +2471,31 @@ public class HiveMetaStore extends ThriftHiveMetastore { throws MetaException, TException { incrementCounter("list_security_db_grant"); - List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - List mDbs = ms.listPrincipalDBGrants( - principalName, principalType, dbName); - if (mDbs.size() > 0) { - List result = new ArrayList(); - for (int i = 0; i < mDbs.size(); i++) { - MDBPrivilege sDB = mDbs.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.DATABASE, dbName, null, null, null); - HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, - sDB.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sDB.getPrivilege(), sDB - .getCreateTime(), sDB.getGrantor(), PrincipalType - .valueOf(sDB.getGrantorType()), sDB.getGrantOption())); - result.add(secObj); - } - return result; - } - return Collections.emptyList(); - } - }); + RawStore ms = getMS(); + List mDbs = ms.listPrincipalDBGrants( + principalName, principalType, dbName); + if (mDbs.size() > 0) { + List result = new ArrayList(); + for (int i = 0; i < mDbs.size(); i++) { + MDBPrivilege sDB = mDbs.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.DATABASE, dbName, null, null, null); + HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, + sDB.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sDB.getPrivilege(), sDB + .getCreateTime(), sDB.getGrantor(), PrincipalType + .valueOf(sDB.getGrantorType()), sDB.getGrantOption())); + result.add(secObj); + } + return result; + } + return Collections. emptyList(); } catch (MetaException e) { throw e; } catch (Exception e) { throw new RuntimeException(e); } - return ret; } public List list_partition_privileges( @@ -3224,42 +2504,36 @@ public class HiveMetaStore extends ThriftHiveMetastore { throws MetaException, TException { incrementCounter("list_security_partition_grant"); - List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - Table tbl = get_table(dbName, tableName); - String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); - List mParts = ms.listPrincipalPartitionGrants( - principalName, principalType, dbName, tableName, partName); - if (mParts.size() > 0) { - List result = new ArrayList(); - for (int i = 0; i < mParts.size(); i++) { - MPartitionPrivilege sPart = mParts.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.PARTITION, dbName, tableName, partValues, - null); - HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, - sPart.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sPart.getPrivilege(), sPart - .getCreateTime(), sPart.getGrantor(), PrincipalType - .valueOf(sPart.getGrantorType()), sPart - .getGrantOption())); - - result.add(secObj); - } - return result; - } - return Collections.emptyList(); - } - }); + RawStore ms = getMS(); + Table tbl = get_table(dbName, tableName); + String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); + List mParts = ms.listPrincipalPartitionGrants( + principalName, principalType, dbName, tableName, partName); + if (mParts.size() > 0) { + List result = new ArrayList(); + for (int i = 0; i < mParts.size(); i++) { + MPartitionPrivilege sPart = mParts.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.PARTITION, dbName, tableName, partValues, + null); + HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, + sPart.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sPart.getPrivilege(), sPart + .getCreateTime(), sPart.getGrantor(), PrincipalType + .valueOf(sPart.getGrantorType()), sPart + .getGrantOption())); + + result.add(secObj); + } + return result; + } + return Collections. emptyList(); } catch (MetaException e) { throw e; } catch (Exception e) { throw new RuntimeException(e); } - return ret; } public List list_table_privileges( @@ -3268,37 +2542,30 @@ public class HiveMetaStore extends ThriftHiveMetastore { TException { incrementCounter("list_security_table_grant"); - List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - List mTbls = ms - .listAllTableGrants(principalName, principalType, dbName, tableName); - if (mTbls.size() > 0) { - List result = new ArrayList(); - for (int i = 0; i < mTbls.size(); i++) { - MTablePrivilege sTbl = mTbls.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.TABLE, dbName, tableName, null, null); - HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, - sTbl.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sTbl.getPrivilege(), sTbl.getCreateTime(), sTbl - .getGrantor(), PrincipalType.valueOf(sTbl - .getGrantorType()), sTbl.getGrantOption())); - result.add(secObj); - } - return result; - } - return Collections.emptyList(); - } - }); + List mTbls = getMS() + .listAllTableGrants(principalName, principalType, dbName, tableName); + if (mTbls.size() > 0) { + List result = new ArrayList(); + for (int i = 0; i < mTbls.size(); i++) { + MTablePrivilege sTbl = mTbls.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.TABLE, dbName, tableName, null, null); + HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, + sTbl.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sTbl.getPrivilege(), sTbl.getCreateTime(), sTbl + .getGrantor(), PrincipalType.valueOf(sTbl + .getGrantorType()), sTbl.getGrantOption())); + result.add(secObj); + } + return result; + } + return Collections. emptyList(); } catch (MetaException e) { throw e; } catch (Exception e) { throw new RuntimeException(e); } - return ret; } public List list_global_privileges( @@ -3306,47 +2573,41 @@ public class HiveMetaStore extends ThriftHiveMetastore { throws MetaException, TException { incrementCounter("list_security_user_grant"); - List ret = null; try { - ret = executeWithRetry(new Command>() { - @Override - public List run(RawStore ms) throws Exception { - List mUsers = ms.listPrincipalGlobalGrants( - principalName, principalType); - if (mUsers.size() > 0) { - List result = new ArrayList(); - for (int i = 0; i < mUsers.size(); i++) { - MGlobalPrivilege sUsr = mUsers.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.GLOBAL, null, null, null, null); - HiveObjectPrivilege secUser = new HiveObjectPrivilege( - objectRef, sUsr.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sUsr.getPrivilege(), sUsr - .getCreateTime(), sUsr.getGrantor(), PrincipalType - .valueOf(sUsr.getGrantorType()), sUsr.getGrantOption())); - result.add(secUser); - } - return result; - } - return Collections.emptyList(); - } - }); + List mUsers = getMS().listPrincipalGlobalGrants( + principalName, principalType); + if (mUsers.size() > 0) { + List result = new ArrayList(); + for (int i = 0; i < mUsers.size(); i++) { + MGlobalPrivilege sUsr = mUsers.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.GLOBAL, null, null, null, null); + HiveObjectPrivilege secUser = new HiveObjectPrivilege( + objectRef, sUsr.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sUsr.getPrivilege(), sUsr + .getCreateTime(), sUsr.getGrantor(), PrincipalType + .valueOf(sUsr.getGrantorType()), sUsr.getGrantOption())); + result.add(secUser); + } + return result; + } + return Collections. emptyList(); } catch (MetaException e) { throw e; } catch (Exception e) { throw new RuntimeException(e); } - return ret; } + @Override public void cancel_delegation_token(String token_str_form) - throws MetaException, TException { + throws MetaException, TException { startFunction("cancel_delegation_token"); boolean success = false; try { HiveMetaStore.cancelDelegationToken(token_str_form); success = true; - } catch(IOException e) { + } catch (IOException e) { throw new MetaException(e.getMessage()); } finally { endFunction("cancel_delegation_token", success); @@ -3355,12 +2616,12 @@ public class HiveMetaStore extends ThriftHiveMetastore { @Override public long renew_delegation_token(String token_str_form) - throws MetaException, TException { + throws MetaException, TException { startFunction("renew_delegation_token"); Long ret = null; try { ret = HiveMetaStore.renewDelegationToken(token_str_form); - } catch(IOException e) { + } catch (IOException e) { throw new MetaException(e.getMessage()); } finally { endFunction("renew_delegation_token", ret != null); @@ -3371,14 +2632,14 @@ public class HiveMetaStore extends ThriftHiveMetastore { @Override public String get_delegation_token(String token_owner, String renewer_kerberos_principal_name) - throws MetaException, TException { + throws MetaException, TException { startFunction("get_delegation_token"); String ret = null; try { ret = - HiveMetaStore.getDelegationToken(token_owner, - renewer_kerberos_principal_name); - } catch(IOException e) { + HiveMetaStore.getDelegationToken(token_owner, + renewer_kerberos_principal_name); + } catch (IOException e) { throw new MetaException(e.getMessage()); } catch (InterruptedException e) { throw new MetaException(e.getMessage()); @@ -3390,90 +2651,78 @@ public class HiveMetaStore extends ThriftHiveMetastore { @Override public void markPartitionForEvent(final String db_name, final String tbl_name, - final Map partName, final PartitionEventType evtType) throws - MetaException,TException, NoSuchObjectException, UnknownDBException, UnknownTableException, - InvalidPartitionException, UnknownPartitionException { + final Map partName, final PartitionEventType evtType) throws + MetaException, TException, NoSuchObjectException, UnknownDBException, + UnknownTableException, + InvalidPartitionException, UnknownPartitionException { Table tbl = null; try { startPartitionFunction("markPartitionForEvent", db_name, tbl_name, partName); - try{ - tbl = executeWithRetry(new Command
(){ - @Override - public Table run(RawStore ms) throws Exception { - return ms.markPartitionForEvent(db_name, tbl_name, partName, evtType); - } - }); + try { + tbl = getMS().markPartitionForEvent(db_name, tbl_name, partName, evtType); } catch (Exception original) { LOG.error(original); if (original instanceof NoSuchObjectException) { - throw (NoSuchObjectException)original; - } else if(original instanceof UnknownTableException){ - throw (UnknownTableException)original; - } else if(original instanceof UnknownDBException){ - throw (UnknownDBException)original; - } else if(original instanceof UnknownPartitionException){ - throw (UnknownPartitionException)original; - } else if(original instanceof InvalidPartitionException){ - throw (InvalidPartitionException)original; - } else if(original instanceof MetaException){ - throw (MetaException)original; - } else{ + throw (NoSuchObjectException) original; + } else if (original instanceof UnknownTableException) { + throw (UnknownTableException) original; + } else if (original instanceof UnknownDBException) { + throw (UnknownDBException) original; + } else if (original instanceof UnknownPartitionException) { + throw (UnknownPartitionException) original; + } else if (original instanceof InvalidPartitionException) { + throw (InvalidPartitionException) original; + } else if (original instanceof MetaException) { + throw (MetaException) original; + } else { MetaException me = new MetaException(original.toString()); me.initCause(original); throw me; } } if (null == tbl) { - throw new UnknownTableException("Table: "+tbl_name + " not found."); - } else{ - for(MetaStoreEventListener listener : listeners){ + throw new UnknownTableException("Table: " + tbl_name + " not found."); + } else { + for (MetaStoreEventListener listener : listeners) { listener.onLoadPartitionDone(new LoadPartitionDoneEvent(true, this, tbl, partName)); } } - } - finally{ + } finally { endFunction("markPartitionForEvent", tbl != null); } } @Override public boolean isPartitionMarkedForEvent(final String db_name, final String tbl_name, - final Map partName, final PartitionEventType evtType) throws + final Map partName, final PartitionEventType evtType) throws MetaException, NoSuchObjectException, UnknownDBException, UnknownTableException, TException, UnknownPartitionException, InvalidPartitionException { startPartitionFunction("isPartitionMarkedForEvent", db_name, tbl_name, partName); Boolean ret = null; try { - ret = executeWithRetry(new Command(){ - @Override - public Boolean run(RawStore ms) throws Exception { - return ms.isPartitionMarkedForEvent(db_name, tbl_name, partName, evtType); - } - - }); + ret = getMS().isPartitionMarkedForEvent(db_name, tbl_name, partName, evtType); } catch (Exception original) { LOG.error(original); if (original instanceof NoSuchObjectException) { - throw (NoSuchObjectException)original; - } else if(original instanceof UnknownTableException){ - throw (UnknownTableException)original; - } else if(original instanceof UnknownDBException){ - throw (UnknownDBException)original; - } else if(original instanceof UnknownPartitionException){ - throw (UnknownPartitionException)original; - } else if(original instanceof InvalidPartitionException){ - throw (InvalidPartitionException)original; - } else if(original instanceof MetaException){ - throw (MetaException)original; - } else{ + throw (NoSuchObjectException) original; + } else if (original instanceof UnknownTableException) { + throw (UnknownTableException) original; + } else if (original instanceof UnknownDBException) { + throw (UnknownDBException) original; + } else if (original instanceof UnknownPartitionException) { + throw (UnknownPartitionException) original; + } else if (original instanceof InvalidPartitionException) { + throw (InvalidPartitionException) original; + } else if (original instanceof MetaException) { + throw (MetaException) original; + } else { MetaException me = new MetaException(original.toString()); me.initCause(original); throw me; } - } - finally{ + } finally { endFunction("isPartitionMarkedForEvent", ret != null); } @@ -3482,36 +2731,43 @@ public class HiveMetaStore extends ThriftHiveMetastore { @Override public List set_ugi(String username, List groupNames) throws MetaException, - TException { + TException { Collections.addAll(groupNames, username); return groupNames; - } + } } /** * Discard a current delegation token. - * @param tokenStrForm the token in string form + * + * @param tokenStrForm + * the token in string form */ public static void cancelDelegationToken(String tokenStrForm - ) throws IOException { + ) throws IOException { saslServer.cancelDelegationToken(tokenStrForm); } /** * Get a new delegation token. - * @param renewer the designated renewer + * + * @param renewer + * the designated renewer */ public static String getDelegationToken(String owner, String renewer) - throws IOException, InterruptedException { + throws IOException, InterruptedException { return saslServer.getDelegationToken(owner, renewer); } + /** * Renew a delegation token to extend its lifetime. - * @param tokenStrForm the token in string form + * + * @param tokenStrForm + * the token in string form */ public static long renewDelegationToken(String tokenStrForm - ) throws IOException { + ) throws IOException { return saslServer.renewDelegationToken(tokenStrForm); } @@ -3546,7 +2802,7 @@ public class HiveMetaStore extends ThriftHiveMetastore { // complain about the deprecated syntax -- but still run System.err.println( "This usage has been deprecated, consider using the new command " - + "line syntax (run with -h to see usage information)"); + + "line syntax (run with -h to see usage information)"); port = new Integer(args[0]); } @@ -3606,27 +2862,30 @@ public class HiveMetaStore extends ThriftHiveMetastore { } catch (Throwable t) { // Catch the exception, log it and rethrow it. HMSHandler.LOG - .error("Metastore Thrift Server threw an exception...",t); + .error("Metastore Thrift Server threw an exception...", t); throw t; } } /** * Start Metastore based on a passed {@link HadoopThriftAuthBridge} + * * @param port * @param bridge * @throws Throwable */ public static void startMetaStore(int port, HadoopThriftAuthBridge bridge) - throws Throwable { + throws Throwable { startMetaStore(port, bridge, new HiveConf(HMSHandler.class)); } /** * Start Metastore based on a passed {@link HadoopThriftAuthBridge} + * * @param port * @param bridge - * @param hiveconf configuration overrides + * @param hiveconf + * configuration overrides * @throws Throwable */ public static void startMetaStore(int port, HadoopThriftAuthBridge bridge, @@ -3648,9 +2907,9 @@ public class HiveMetaStore extends ThriftHiveMetastore { TTransportFactory transFactory; if (useSasl) { // we are in secure mode. - saslServer = bridge.createServer( - conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE), - conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL)); + saslServer = bridge.createServer( + conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE), + conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL)); // start delegation token manager saslServer.startDelegationTokenSecretManager(conf); transFactory = saslServer.createTransportFactory(); @@ -3660,23 +2919,23 @@ public class HiveMetaStore extends ThriftHiveMetastore { } else { // we are in unsecure mode. HMSHandler handler = new HMSHandler("new db based metaserver", conf); - if (conf.getBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI)){ + if (conf.getBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI)) { transFactory = new TUGIContainingTransport.Factory(); processor = new TUGIBasedProcessor(handler); LOG.info("Starting DB backed MetaStore Server with SetUGI enabled"); - } else{ - transFactory = new TTransportFactory(); - processor = new ThriftHiveMetastore.Processor(handler); + } else { + transFactory = new TTransportFactory(); + processor = new ThriftHiveMetastore.Processor(handler); LOG.info("Starting DB backed MetaStore Server"); - } + } } TThreadPoolServer.Args args = new TThreadPoolServer.Args(serverTransport) - .processor(processor) - .transportFactory(transFactory) - .protocolFactory(new TBinaryProtocol.Factory()) - .minWorkerThreads(minWorkerThreads) - .maxWorkerThreads(maxWorkerThreads); + .processor(processor) + .transportFactory(transFactory) + .protocolFactory(new TBinaryProtocol.Factory()) + .minWorkerThreads(minWorkerThreads) + .maxWorkerThreads(maxWorkerThreads); TServer tServer = new TThreadPoolServer(args); HMSHandler.LOG.info("Started the new metaserver on port [" + port diff --git metastore/src/java/org/apache/hadoop/hive/metastore/RetryingRawStore.java metastore/src/java/org/apache/hadoop/hive/metastore/RetryingRawStore.java new file mode 100644 index 0000000..7847044 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/RetryingRawStore.java @@ -0,0 +1,206 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.lang.reflect.UndeclaredThrowableException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.JavaUtils; +import org.apache.hadoop.hive.common.classification.InterfaceAudience; +import org.apache.hadoop.hive.common.classification.InterfaceStability; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.hooks.JDOConnectionURLHook; +import org.apache.hadoop.util.ReflectionUtils; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class RetryingRawStore implements InvocationHandler { + + private static final Log LOG = LogFactory.getLog(RetryingRawStore.class); + + private final RawStore base; + private int retryInterval = 0; + private int retryLimit = 0; + private JDOConnectionURLHook urlHook = null; + private String urlHookClassName = ""; + private final int id; + private final HiveConf hiveConf; + private final Configuration conf; // thread local conf from HMS + + protected RetryingRawStore(HiveConf hiveConf, Configuration conf, RawStore base, int id) + throws MetaException { + this.base = base; + this.conf = conf; + this.hiveConf = hiveConf; + this.id = id; + init(); + } + + public static RawStore getProxy(HiveConf hiveConf, Configuration conf, RawStore base, int id) + throws MetaException { + + RetryingRawStore handler = new RetryingRawStore(hiveConf, conf, base, id); + + return (RawStore) Proxy.newProxyInstance(RetryingRawStore.class.getClassLoader() + , base.getClass().getInterfaces(), handler); + } + + private void init() throws MetaException { + retryInterval = HiveConf.getIntVar(hiveConf, + HiveConf.ConfVars.METASTOREINTERVAL); + retryLimit = HiveConf.getIntVar(hiveConf, + HiveConf.ConfVars.METASTOREATTEMPTS); + // Using the hook on startup ensures that the hook always has priority + // over settings in *.xml. We can use hiveConf as only a single thread + // will be calling the constructor. + updateConnectionURL(hiveConf, null); + } + + private void initMS() { + base.setConf(getConf()); + } + + @Override + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + Object ret = null; + + boolean gotNewConnectUrl = false; + boolean reloadConf = HiveConf.getBoolVar(hiveConf, + HiveConf.ConfVars.METASTOREFORCERELOADCONF); + + if (reloadConf) { + updateConnectionURL(getConf(), null); + } + + int retryCount = 0; + Exception caughtException = null; + while (true) { + try { + if (reloadConf || gotNewConnectUrl) { + initMS(); + } + ret = method.invoke(base, args); + break; + } catch (javax.jdo.JDOException e) { + caughtException = e; + } catch (UndeclaredThrowableException e) { + throw e.getCause(); + } catch (InvocationTargetException e) { + throw e.getCause(); + } + + if (retryCount >= retryLimit) { + throw caughtException; + } + + assert (retryInterval >= 0); + retryCount++; + LOG.error( + String.format( + "JDO datastore error. Retrying metastore command " + + "after %d ms (attempt %d of %d)", retryInterval, retryCount, retryLimit)); + Thread.sleep(retryInterval); + // If we have a connection error, the JDO connection URL hook might + // provide us with a new URL to access the datastore. + String lastUrl = getConnectionURL(getConf()); + gotNewConnectUrl = updateConnectionURL(getConf(), lastUrl); + } + return ret; + } + + /** + * Updates the connection URL in hiveConf using the hook + * + * @return true if a new connection URL was loaded into the thread local + * configuration + */ + private boolean updateConnectionURL(Configuration conf, String badUrl) + throws MetaException { + String connectUrl = null; + String currentUrl = getConnectionURL(conf); + try { + // We always call init because the hook name in the configuration could + // have changed. + initConnectionUrlHook(); + if (urlHook != null) { + if (badUrl != null) { + urlHook.notifyBadConnectionUrl(badUrl); + } + connectUrl = urlHook.getJdoConnectionUrl(hiveConf); + } + } catch (Exception e) { + LOG.error("Exception while getting connection URL from the hook: " + + e); + } + + if (connectUrl != null && !connectUrl.equals(currentUrl)) { + LOG.error(addPrefix( + String.format("Overriding %s with %s", + HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(), + connectUrl))); + conf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(), + connectUrl); + return true; + } + return false; + } + + private static String getConnectionURL(Configuration conf) { + return conf.get( + HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(), ""); + } + + // Multiple threads could try to initialize at the same time. + synchronized private void initConnectionUrlHook() + throws ClassNotFoundException { + + String className = + hiveConf.get(HiveConf.ConfVars.METASTORECONNECTURLHOOK.toString(), "").trim(); + if (className.equals("")) { + urlHookClassName = ""; + urlHook = null; + return; + } + boolean urlHookChanged = !urlHookClassName.equals(className); + if (urlHook == null || urlHookChanged) { + urlHookClassName = className.trim(); + + Class urlHookClass = Class.forName(urlHookClassName, true, + JavaUtils.getClassLoader()); + urlHook = (JDOConnectionURLHook) ReflectionUtils.newInstance(urlHookClass, null); + } + return; + } + + private String addPrefix(String s) { + return id + ": " + s; + } + + public Configuration getConf() { + return conf; + } + +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/events/EventCleanerTask.java metastore/src/java/org/apache/hadoop/hive/metastore/events/EventCleanerTask.java index 557a8c4..df82bce 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/events/EventCleanerTask.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/events/EventCleanerTask.java @@ -23,7 +23,6 @@ import java.util.TimerTask; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; -import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler.Command; import org.apache.hadoop.hive.metastore.RawStore; public class EventCleanerTask extends TimerTask{ @@ -40,12 +39,9 @@ public class EventCleanerTask extends TimerTask{ public void run() { try { - long deleteCnt = handler.executeWithRetry(new Command(){ - @Override - public Long run(RawStore ms) throws Exception { - return ms.cleanupEvents(); - } - }); + RawStore ms = handler.getMS(); + long deleteCnt = ms.cleanupEvents(); + if (deleteCnt > 0L){ LOG.info("Number of events deleted from event Table: "+deleteCnt); }