Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 901511) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy) @@ -18,10 +18,8 @@ package org.apache.hadoop.hive.metastore; - import java.util.List; import java.util.Map; -import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.commons.logging.Log; @@ -31,6 +29,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; +import org.apache.hadoop.hive.metastore.api.Constants; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Index; @@ -50,11 +49,6 @@ import org.apache.hadoop.hive.serde2.SerDeUtils; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.hive.metastore.api.Constants; - -import com.facebook.fb303.FacebookBase; -import com.facebook.fb303.FacebookService; -import com.facebook.fb303.fb_status; import org.apache.thrift.TException; import org.apache.thrift.protocol.TBinaryProtocol; import org.apache.thrift.server.TServer; @@ -63,690 +57,744 @@ import org.apache.thrift.transport.TServerTransport; import org.apache.thrift.transport.TTransportFactory; +import com.facebook.fb303.FacebookBase; +import com.facebook.fb303.FacebookService; +import com.facebook.fb303.fb_status; + /** * TODO:pc remove application logic to a separate interface. */ public class HiveMetaStore extends ThriftHiveMetastore { - public static class HMSHandler extends FacebookBase implements ThriftHiveMetastore.Iface { - public static final Log LOG = LogFactory.getLog(HiveMetaStore.class.getName()); - private static boolean createDefaultDB = false; - private String rawStoreClassName; - private HiveConf hiveConf; // stores datastore (jpox) properties, right now they come from jpox.properties - private Warehouse wh; // hdfs warehouse - private ThreadLocal threadLocalMS = new ThreadLocal() { - protected synchronized Object initialValue() { - return null; - } - }; - - // The next serial number to be assigned - private boolean checkForDefaultDb; - private static int nextSerialNum = 0; - private static ThreadLocal threadLocalId = new ThreadLocal() { - protected synchronized Object initialValue() { - return new Integer(nextSerialNum++); - } - }; - public static Integer get() { - return threadLocalId.get(); + public static class HMSHandler extends FacebookBase implements + ThriftHiveMetastore.Iface { + public static final Log LOG = LogFactory.getLog(HiveMetaStore.class + .getName()); + private static boolean createDefaultDB = false; + private String rawStoreClassName; + private final HiveConf hiveConf; // stores datastore (jpox) properties, + // right now they come from jpox.properties + private Warehouse wh; // hdfs warehouse + private final ThreadLocal threadLocalMS = new ThreadLocal() { + @Override + protected synchronized Object initialValue() { + return null; } + }; - public HMSHandler(String name) throws MetaException { - super(name); - hiveConf = new HiveConf(this.getClass()); - init(); + // The next serial number to be assigned + private boolean checkForDefaultDb; + private static int nextSerialNum = 0; + private static ThreadLocal threadLocalId = new ThreadLocal() { + @Override + protected synchronized Object initialValue() { + return new Integer(nextSerialNum++); } + }; - public HMSHandler(String name, HiveConf conf) throws MetaException { - super(name); - hiveConf = conf; - init(); - } + public static Integer get() { + return threadLocalId.get(); + } - private ClassLoader classLoader; - private AlterHandler alterHandler; - { - classLoader = Thread.currentThread().getContextClassLoader(); - if (classLoader == null) { - classLoader = Configuration.class.getClassLoader(); - } - } + public HMSHandler(String name) throws MetaException { + super(name); + hiveConf = new HiveConf(this.getClass()); + init(); + } - private boolean init() throws MetaException { - rawStoreClassName = hiveConf.get("hive.metastore.rawstore.impl"); - checkForDefaultDb = hiveConf.getBoolean("hive.metastore.checkForDefaultDb", true); - String alterHandlerName = hiveConf.get("hive.metastore.alter.impl", HiveAlterHandler.class.getName()); - alterHandler = (AlterHandler) ReflectionUtils.newInstance(getClass(alterHandlerName, AlterHandler.class), hiveConf); - wh = new Warehouse(hiveConf); - createDefaultDB(); - return true; - } + public HMSHandler(String name, HiveConf conf) throws MetaException { + super(name); + hiveConf = conf; + init(); + } - /** - * @return - * @throws MetaException - */ - private RawStore getMS() throws MetaException { - RawStore ms = threadLocalMS.get(); - if(ms == null) { - LOG.info(threadLocalId.get() + ": Opening raw store with implemenation class:" + rawStoreClassName); - ms = (RawStore) ReflectionUtils.newInstance(getClass(rawStoreClassName, RawStore.class), hiveConf); - threadLocalMS.set(ms); - ms = threadLocalMS.get(); - } - return ms; + private ClassLoader classLoader; + private AlterHandler alterHandler; + { + classLoader = Thread.currentThread().getContextClassLoader(); + if (classLoader == null) { + classLoader = Configuration.class.getClassLoader(); } + } - /** - * create default database if it doesn't exist - * @throws MetaException - */ - private void createDefaultDB() throws MetaException { - if(HMSHandler.createDefaultDB || !checkForDefaultDb) { - return; - } - try { - getMS().getDatabase(MetaStoreUtils.DEFAULT_DATABASE_NAME); - } catch (NoSuchObjectException e) { - getMS().createDatabase(new Database(MetaStoreUtils.DEFAULT_DATABASE_NAME, - wh.getDefaultDatabasePath(MetaStoreUtils.DEFAULT_DATABASE_NAME).toString())); - } - HMSHandler.createDefaultDB = true; - } + private boolean init() throws MetaException { + rawStoreClassName = hiveConf.get("hive.metastore.rawstore.impl"); + checkForDefaultDb = hiveConf.getBoolean( + "hive.metastore.checkForDefaultDb", true); + String alterHandlerName = hiveConf.get("hive.metastore.alter.impl", + HiveAlterHandler.class.getName()); + alterHandler = (AlterHandler) ReflectionUtils.newInstance(getClass( + alterHandlerName, AlterHandler.class), hiveConf); + wh = new Warehouse(hiveConf); + createDefaultDB(); + return true; + } - private Class getClass(String rawStoreClassName, Class class1) throws MetaException { - try { - return Class.forName(rawStoreClassName, true, classLoader); - } catch (ClassNotFoundException e) { - throw new MetaException(rawStoreClassName + " class not found"); - } + /** + * @return + * @throws MetaException + */ + private RawStore getMS() throws MetaException { + RawStore ms = threadLocalMS.get(); + if (ms == null) { + LOG.info(threadLocalId.get() + + ": Opening raw store with implemenation class:" + + rawStoreClassName); + ms = (RawStore) ReflectionUtils.newInstance(getClass(rawStoreClassName, + RawStore.class), hiveConf); + threadLocalMS.set(ms); + ms = threadLocalMS.get(); } + return ms; + } - private void logStartFunction(String m) { - LOG.info(threadLocalId.get().toString() + ": " + m); + /** + * create default database if it doesn't exist + * + * @throws MetaException + */ + private void createDefaultDB() throws MetaException { + if (HMSHandler.createDefaultDB || !checkForDefaultDb) { + return; } - - private void logStartFunction(String f, String db, String tbl) { - LOG.info(threadLocalId.get().toString() + ": " + f + " : db=" + db + " tbl=" + tbl); + try { + getMS().getDatabase(MetaStoreUtils.DEFAULT_DATABASE_NAME); + } catch (NoSuchObjectException e) { + getMS().createDatabase( + new Database(MetaStoreUtils.DEFAULT_DATABASE_NAME, wh + .getDefaultDatabasePath(MetaStoreUtils.DEFAULT_DATABASE_NAME) + .toString())); } + HMSHandler.createDefaultDB = true; + } - @Override - public int getStatus() { - return fb_status.ALIVE; + private Class getClass(String rawStoreClassName, Class class1) + throws MetaException { + try { + return Class.forName(rawStoreClassName, true, classLoader); + } catch (ClassNotFoundException e) { + throw new MetaException(rawStoreClassName + " class not found"); } + } - public void shutdown() { - logStartFunction("Shutting down the object store..."); - try { - if(threadLocalMS.get() != null) { - getMS().shutdown(); - } - } catch (MetaException e) { - LOG.error("unable to shutdown metastore", e); - } - System.exit(0); - } + private void logStartFunction(String m) { + LOG.info(threadLocalId.get().toString() + ": " + m); + } - public boolean create_database(String name, String location_uri) - throws AlreadyExistsException, MetaException { - this.incrementCounter("create_database"); - logStartFunction("create_database: " + name); - boolean success = false; - try { - getMS().openTransaction(); - Database db = new Database(name, location_uri); - if(getMS().createDatabase(db) && wh.mkdirs(wh.getDefaultDatabasePath(name))) { - success = getMS().commitTransaction(); - } - } finally { - if(!success) { - getMS().rollbackTransaction(); - } + private void logStartFunction(String f, String db, String tbl) { + LOG.info(threadLocalId.get().toString() + ": " + f + " : db=" + db + + " tbl=" + tbl); + } + + @Override + public int getStatus() { + return fb_status.ALIVE; + } + + public void shutdown() { + logStartFunction("Shutting down the object store..."); + try { + if (threadLocalMS.get() != null) { + getMS().shutdown(); } - return success; + } catch (MetaException e) { + LOG.error("unable to shutdown metastore", e); } + System.exit(0); + } - public Database get_database(String name) throws NoSuchObjectException, MetaException { - this.incrementCounter("get_database"); - logStartFunction("get_database: " + name); - return getMS().getDatabase(name); - } - - public boolean drop_database(String name) throws MetaException { - this.incrementCounter("drop_database"); - logStartFunction("drop_database: " + name); - if(name.equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) { - throw new MetaException("Can't drop default database"); + public boolean create_database(String name, String location_uri) + throws AlreadyExistsException, MetaException { + incrementCounter("create_database"); + logStartFunction("create_database: " + name); + boolean success = false; + try { + getMS().openTransaction(); + Database db = new Database(name, location_uri); + if (getMS().createDatabase(db) + && wh.mkdirs(wh.getDefaultDatabasePath(name))) { + success = getMS().commitTransaction(); } - boolean success = false; - try { - getMS().openTransaction(); - if(getMS().dropDatabase(name)) { - success = getMS().commitTransaction(); - } - } finally { - if(!success) { - getMS().rollbackTransaction(); - } else { - wh.deleteDir(wh.getDefaultDatabasePath(name), true); - // it is not a terrible thing even if the data is not deleted - } + } finally { + if (!success) { + getMS().rollbackTransaction(); } - return success; } + return success; + } - public List get_databases() throws MetaException { - this.incrementCounter("get_databases"); - logStartFunction("get_databases"); - return getMS().getDatabases(); + public Database get_database(String name) throws NoSuchObjectException, + MetaException { + incrementCounter("get_database"); + logStartFunction("get_database: " + name); + return getMS().getDatabase(name); + } + + public boolean drop_database(String name) throws MetaException { + incrementCounter("drop_database"); + logStartFunction("drop_database: " + name); + if (name.equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) { + throw new MetaException("Can't drop default database"); } - - public boolean create_type(Type type) throws AlreadyExistsException, MetaException, InvalidObjectException { - this.incrementCounter("create_type"); - logStartFunction("create_type: " + type.getName()); - // check whether type already exists - if(get_type(type.getName()) != null) { - throw new AlreadyExistsException("Type " + type.getName() + " already exists"); + boolean success = false; + try { + getMS().openTransaction(); + if (getMS().dropDatabase(name)) { + success = getMS().commitTransaction(); } - - //TODO:pc Validation of types should be done by clients or here???? - return getMS().createType(type); + } finally { + if (!success) { + getMS().rollbackTransaction(); + } else { + wh.deleteDir(wh.getDefaultDatabasePath(name), true); + // it is not a terrible thing even if the data is not deleted + } } + return success; + } - public Type get_type(String name) throws MetaException { - this.incrementCounter("get_type"); - logStartFunction("get_type: " + name); - return getMS().getType(name); - } + public List get_databases() throws MetaException { + incrementCounter("get_databases"); + logStartFunction("get_databases"); + return getMS().getDatabases(); + } - public boolean drop_type(String name) throws MetaException { - this.incrementCounter("drop_type"); - logStartFunction("drop_type: " + name); - // TODO:pc validate that there are no types that refer to this - return getMS().dropType(name); + public boolean create_type(Type type) throws AlreadyExistsException, + MetaException, InvalidObjectException { + incrementCounter("create_type"); + logStartFunction("create_type: " + type.getName()); + // check whether type already exists + if (get_type(type.getName()) != null) { + throw new AlreadyExistsException("Type " + type.getName() + + " already exists"); } - public Map get_type_all(String name) throws MetaException { - this.incrementCounter("get_type_all"); - // TODO Auto-generated method stub - logStartFunction("get_type_all"); - throw new MetaException("Not yet implemented"); - } + // TODO:pc Validation of types should be done by clients or here???? + return getMS().createType(type); + } - public void create_table(Table tbl) throws AlreadyExistsException, MetaException, InvalidObjectException { - this.incrementCounter("create_table"); - logStartFunction("create_table: db=" + tbl.getDbName() + " tbl=" + tbl.getTableName()); + public Type get_type(String name) throws MetaException { + incrementCounter("get_type"); + logStartFunction("get_type: " + name); + return getMS().getType(name); + } - if(!MetaStoreUtils.validateName(tbl.getTableName()) || - !MetaStoreUtils.validateColNames(tbl.getSd().getCols()) || - (tbl.getPartitionKeys() != null && - !MetaStoreUtils.validateColNames(tbl.getPartitionKeys()))) { - throw new InvalidObjectException(tbl.getTableName() + " is not a valid object name"); - } + public boolean drop_type(String name) throws MetaException { + incrementCounter("drop_type"); + logStartFunction("drop_type: " + name); + // TODO:pc validate that there are no types that refer to this + return getMS().dropType(name); + } - Path tblPath = null; - boolean success = false, madeDir = false; - try { - getMS().openTransaction(); - if(tbl.getSd().getLocation() == null || tbl.getSd().getLocation().isEmpty()) { - tblPath = wh.getDefaultTablePath(tbl.getDbName(), tbl.getTableName()); - } else { - if (!isExternal(tbl)) { - LOG.warn("Location: " + tbl.getSd().getLocation() + - "specified for non-external table:" + tbl.getTableName()); - } - tblPath = wh.getDnsPath(new Path(tbl.getSd().getLocation())); - } + public Map get_type_all(String name) throws MetaException { + incrementCounter("get_type_all"); + // TODO Auto-generated method stub + logStartFunction("get_type_all"); + throw new MetaException("Not yet implemented"); + } - tbl.getSd().setLocation(tblPath.toString()); + public void create_table(Table tbl) throws AlreadyExistsException, + MetaException, InvalidObjectException { + incrementCounter("create_table"); + logStartFunction("create_table: db=" + tbl.getDbName() + " tbl=" + + tbl.getTableName()); - // get_table checks whether database exists, it should be moved here - if(is_table_exists(tbl.getDbName(), tbl.getTableName())) { - throw new AlreadyExistsException("Table " + tbl.getTableName() + " already exists"); - } + if (!MetaStoreUtils.validateName(tbl.getTableName()) + || !MetaStoreUtils.validateColNames(tbl.getSd().getCols()) + || (tbl.getPartitionKeys() != null && !MetaStoreUtils + .validateColNames(tbl.getPartitionKeys()))) { + throw new InvalidObjectException(tbl.getTableName() + + " is not a valid object name"); + } - if(!wh.isDir(tblPath)) { - if(!wh.mkdirs(tblPath)) { - throw new MetaException (tblPath + " is not a directory or unable to create one"); - } - madeDir = true; + Path tblPath = null; + boolean success = false, madeDir = false; + try { + getMS().openTransaction(); + if (tbl.getSd().getLocation() == null + || tbl.getSd().getLocation().isEmpty()) { + tblPath = wh.getDefaultTablePath(tbl.getDbName(), tbl.getTableName()); + } else { + if (!isExternal(tbl)) { + LOG.warn("Location: " + tbl.getSd().getLocation() + + "specified for non-external table:" + tbl.getTableName()); } + tblPath = wh.getDnsPath(new Path(tbl.getSd().getLocation())); + } - // set create time - long time = System.currentTimeMillis() / 1000; - tbl.setCreateTime((int) time); - tbl.putToParameters(Constants.DDL_TIME, Long.toString(time)); + tbl.getSd().setLocation(tblPath.toString()); - getMS().createTable(tbl); - success = getMS().commitTransaction(); + // get_table checks whether database exists, it should be moved here + if (is_table_exists(tbl.getDbName(), tbl.getTableName())) { + throw new AlreadyExistsException("Table " + tbl.getTableName() + + " already exists"); + } - } finally { - if(!success) { - getMS().rollbackTransaction(); - if(madeDir) { - wh.deleteDir(tblPath, true); - } + if (!wh.isDir(tblPath)) { + if (!wh.mkdirs(tblPath)) { + throw new MetaException(tblPath + + " is not a directory or unable to create one"); } + madeDir = true; } - } - public boolean is_table_exists(String dbname, String name) throws MetaException { - try { - return (get_table(dbname, name) != null); - } catch (NoSuchObjectException e) { - return false; - } - } + // set create time + long time = System.currentTimeMillis() / 1000; + tbl.setCreateTime((int) time); + tbl.putToParameters(Constants.DDL_TIME, Long.toString(time)); - public void drop_table(String dbname, String name, boolean deleteData) throws NoSuchObjectException, MetaException { - this.incrementCounter("drop_table"); - logStartFunction("drop_table", dbname, name); - boolean success = false; - boolean isExternal = false; - Path tblPath = null; - Table tbl = null; - isExternal = false; - try { - getMS().openTransaction(); - // drop any partitions - tbl = get_table(dbname, name); - if (tbl == null) { - throw new NoSuchObjectException(name + " doesn't exist"); - } - if(tbl.getSd() == null || tbl.getSd().getLocation() == null) { - throw new MetaException("Table metadata is corrupted"); - } - isExternal = isExternal(tbl); - tblPath = new Path(tbl.getSd().getLocation()); - if(!getMS().dropTable(dbname, name)) { - throw new MetaException("Unable to drop table"); - } - tbl = null; // table collections disappear after dropping - success = getMS().commitTransaction(); - } finally { - if(!success) { - getMS().rollbackTransaction(); - } else if(deleteData && (tblPath != null) && !isExternal) { + getMS().createTable(tbl); + success = getMS().commitTransaction(); + + } finally { + if (!success) { + getMS().rollbackTransaction(); + if (madeDir) { wh.deleteDir(tblPath, true); - // ok even if the data is not deleted } } } + } - /** - * Is this an external table? - * @param table Check if this table is external. - * @return True if the table is external, otherwise false. - */ - private boolean isExternal(Table table) { - if(table == null) { - return false; + public boolean is_table_exists(String dbname, String name) + throws MetaException { + try { + return (get_table(dbname, name) != null); + } catch (NoSuchObjectException e) { + return false; + } + } + + public void drop_table(String dbname, String name, boolean deleteData) + throws NoSuchObjectException, MetaException { + incrementCounter("drop_table"); + logStartFunction("drop_table", dbname, name); + boolean success = false; + boolean isExternal = false; + Path tblPath = null; + Table tbl = null; + isExternal = false; + try { + getMS().openTransaction(); + // drop any partitions + tbl = get_table(dbname, name); + if (tbl == null) { + throw new NoSuchObjectException(name + " doesn't exist"); } - Map params = table.getParameters(); - if(params == null) { - return false; + if (tbl.getSd() == null || tbl.getSd().getLocation() == null) { + throw new MetaException("Table metadata is corrupted"); } + isExternal = isExternal(tbl); + tblPath = new Path(tbl.getSd().getLocation()); + if (!getMS().dropTable(dbname, name)) { + throw new MetaException("Unable to drop table"); + } + tbl = null; // table collections disappear after dropping + success = getMS().commitTransaction(); + } finally { + if (!success) { + getMS().rollbackTransaction(); + } else if (deleteData && (tblPath != null) && !isExternal) { + wh.deleteDir(tblPath, true); + // ok even if the data is not deleted + } + } + } - return "TRUE".equalsIgnoreCase(params.get("EXTERNAL")); + /** + * Is this an external table? + * + * @param table + * Check if this table is external. + * @return True if the table is external, otherwise false. + */ + private boolean isExternal(Table table) { + if (table == null) { + return false; } + Map params = table.getParameters(); + if (params == null) { + return false; + } - public Table get_table(String dbname, String name) throws MetaException, NoSuchObjectException { - this.incrementCounter("get_table"); - logStartFunction("get_table", dbname, name); - Table t = getMS().getTable(dbname, name); - if(t == null) { - throw new NoSuchObjectException(dbname + "." + name + " table not found"); - } - return t; + return "TRUE".equalsIgnoreCase(params.get("EXTERNAL")); + } + + public Table get_table(String dbname, String name) throws MetaException, + NoSuchObjectException { + incrementCounter("get_table"); + logStartFunction("get_table", dbname, name); + Table t = getMS().getTable(dbname, name); + if (t == null) { + throw new NoSuchObjectException(dbname + "." + name + + " table not found"); } + return t; + } - public boolean set_table_parameters(String dbname, String name, - Map params) throws NoSuchObjectException, - MetaException { - this.incrementCounter("set_table_parameters"); - logStartFunction("set_table_parameters", dbname, name); - // TODO Auto-generated method stub - return false; + public boolean set_table_parameters(String dbname, String name, + Map params) throws NoSuchObjectException, MetaException { + incrementCounter("set_table_parameters"); + logStartFunction("set_table_parameters", dbname, name); + // TODO Auto-generated method stub + return false; + } + + public Partition append_partition(String dbName, String tableName, + List part_vals) throws InvalidObjectException, + AlreadyExistsException, MetaException { + incrementCounter("append_partition"); + logStartFunction("append_partition", dbName, tableName); + if (LOG.isDebugEnabled()) { + for (String part : part_vals) { + LOG.debug(part); + } } + Partition part = new Partition(); + boolean success = false, madeDir = false; + Path partLocation = null; + try { + getMS().openTransaction(); + part = new Partition(); + part.setDbName(dbName); + part.setTableName(tableName); + part.setValues(part_vals); - public Partition append_partition(String dbName, String tableName, List part_vals) - throws InvalidObjectException, AlreadyExistsException, MetaException { - this.incrementCounter("append_partition"); - logStartFunction("append_partition", dbName, tableName); - if(LOG.isDebugEnabled()) { - for (String part : part_vals) { - LOG.debug(part); - } + Table tbl = getMS().getTable(part.getDbName(), part.getTableName()); + if (tbl == null) { + throw new InvalidObjectException( + "Unable to add partition because table or database do not exist"); } - Partition part = new Partition(); - boolean success = false, madeDir = false; - Path partLocation = null; - try { - getMS().openTransaction(); - part = new Partition(); - part.setDbName(dbName); - part.setTableName(tableName); - part.setValues(part_vals); - Table tbl = getMS().getTable(part.getDbName(), part.getTableName()); - if(tbl == null) { - throw new InvalidObjectException("Unable to add partition because table or database do not exist"); - } + part.setSd(tbl.getSd()); + partLocation = new Path(tbl.getSd().getLocation(), Warehouse + .makePartName(tbl.getPartitionKeys(), part_vals)); + part.getSd().setLocation(partLocation.toString()); - part.setSd(tbl.getSd()); - partLocation = new Path(tbl.getSd().getLocation(), - Warehouse.makePartName(tbl.getPartitionKeys(), part_vals)); - part.getSd().setLocation(partLocation.toString()); + Partition old_part = get_partition(part.getDbName(), part + .getTableName(), part.getValues()); + if (old_part != null) { + throw new AlreadyExistsException("Partition already exists:" + part); + } - Partition old_part = this.get_partition(part.getDbName(), - part.getTableName(), part.getValues()); - if( old_part != null) { - throw new AlreadyExistsException("Partition already exists:" + part); + if (!wh.isDir(partLocation)) { + if (!wh.mkdirs(partLocation)) { + throw new MetaException(partLocation + + " is not a directory or unable to create one"); } + madeDir = true; + } - if(!wh.isDir(partLocation)) { - if(!wh.mkdirs(partLocation)) { - throw new MetaException (partLocation + " is not a directory or unable to create one"); - } - madeDir = true; - } + // set create time + long time = System.currentTimeMillis() / 1000; + part.setCreateTime((int) time); + part.putToParameters(Constants.DDL_TIME, Long.toString(time)); - // set create time - long time = System.currentTimeMillis() / 1000; - part.setCreateTime((int) time); - part.putToParameters(Constants.DDL_TIME, Long.toString(time)); - - success = getMS().addPartition(part); - if(success) { - success = getMS().commitTransaction(); + success = getMS().addPartition(part); + if (success) { + success = getMS().commitTransaction(); + } + } finally { + if (!success) { + getMS().rollbackTransaction(); + if (madeDir) { + wh.deleteDir(partLocation, true); } - } finally { - if(!success) { - getMS().rollbackTransaction(); - if(madeDir) { - wh.deleteDir(partLocation, true); - } - } } - return part; } + return part; + } - public int add_partitions(List parts) throws MetaException, InvalidObjectException, AlreadyExistsException { - this.incrementCounter("add_partition"); - if(parts.size() == 0) { - return 0; + public int add_partitions(List parts) throws MetaException, + InvalidObjectException, AlreadyExistsException { + incrementCounter("add_partition"); + if (parts.size() == 0) { + return 0; + } + String db = parts.get(0).getDbName(); + String tbl = parts.get(0).getTableName(); + logStartFunction("add_partitions", db, tbl); + boolean success = false; + try { + getMS().openTransaction(); + for (Partition part : parts) { + add_partition(part); } - String db = parts.get(0).getDbName(); - String tbl = parts.get(0).getTableName(); - logStartFunction("add_partitions", db, tbl); - boolean success = false; - try { - getMS().openTransaction(); - for (Partition part : parts) { - this.add_partition(part); - } - success = true; - getMS().commitTransaction(); - } finally { - if(!success) { - getMS().rollbackTransaction(); - } + success = true; + getMS().commitTransaction(); + } finally { + if (!success) { + getMS().rollbackTransaction(); } - return parts.size(); } + return parts.size(); + } - public Partition add_partition(Partition part) throws InvalidObjectException, - AlreadyExistsException, MetaException { - this.incrementCounter("add_partition"); - logStartFunction("add_partition", part.getDbName(), part.getTableName()); - boolean success = false, madeDir = false; - Path partLocation = null; - try { - getMS().openTransaction(); - Partition old_part = this.get_partition(part.getDbName(), part.getTableName(), part.getValues()); - if( old_part != null) { - throw new AlreadyExistsException("Partition already exists:" + part); - } - Table tbl = getMS().getTable(part.getDbName(), part.getTableName()); - if(tbl == null) { - throw new InvalidObjectException("Unable to add partition because table or database do not exist"); - } + public Partition add_partition(Partition part) + throws InvalidObjectException, AlreadyExistsException, MetaException { + incrementCounter("add_partition"); + logStartFunction("add_partition", part.getDbName(), part.getTableName()); + boolean success = false, madeDir = false; + Path partLocation = null; + try { + getMS().openTransaction(); + Partition old_part = get_partition(part.getDbName(), part + .getTableName(), part.getValues()); + if (old_part != null) { + throw new AlreadyExistsException("Partition already exists:" + part); + } + Table tbl = getMS().getTable(part.getDbName(), part.getTableName()); + if (tbl == null) { + throw new InvalidObjectException( + "Unable to add partition because table or database do not exist"); + } - String partLocationStr = part.getSd().getLocation(); - if (partLocationStr == null || partLocationStr.isEmpty()) { - // set default location if not specified - partLocation = new Path(tbl.getSd().getLocation(), - Warehouse.makePartName(tbl.getPartitionKeys(), part.getValues())); + String partLocationStr = part.getSd().getLocation(); + if (partLocationStr == null || partLocationStr.isEmpty()) { + // set default location if not specified + partLocation = new Path(tbl.getSd().getLocation(), Warehouse + .makePartName(tbl.getPartitionKeys(), part.getValues())); - } else { - partLocation = wh.getDnsPath(new Path(partLocationStr)); - } + } else { + partLocation = wh.getDnsPath(new Path(partLocationStr)); + } - part.getSd().setLocation(partLocation.toString()); + part.getSd().setLocation(partLocation.toString()); - if(!wh.isDir(partLocation)) { - if(!wh.mkdirs(partLocation)) { - throw new MetaException (partLocation + " is not a directory or unable to create one"); - } - madeDir = true; + if (!wh.isDir(partLocation)) { + if (!wh.mkdirs(partLocation)) { + throw new MetaException(partLocation + + " is not a directory or unable to create one"); } + madeDir = true; + } - // set create time - long time = System.currentTimeMillis() / 1000; - part.setCreateTime((int) time); - part.putToParameters(Constants.DDL_TIME, Long.toString(time)); + // set create time + long time = System.currentTimeMillis() / 1000; + part.setCreateTime((int) time); + part.putToParameters(Constants.DDL_TIME, Long.toString(time)); - success = getMS().addPartition(part) && getMS().commitTransaction(); + success = getMS().addPartition(part) && getMS().commitTransaction(); - } finally { - if(!success) { - getMS().rollbackTransaction(); - if(madeDir) { - wh.deleteDir(partLocation, true); - } + } finally { + if (!success) { + getMS().rollbackTransaction(); + if (madeDir) { + wh.deleteDir(partLocation, true); } } - return part; } + return part; + } - public boolean drop_partition(String db_name, String tbl_name, List part_vals, boolean deleteData) throws NoSuchObjectException, MetaException, - TException { - this.incrementCounter("drop_partition"); - logStartFunction("drop_partition", db_name, tbl_name); - LOG.info("Partition values:" + part_vals); - boolean success = false; - Path partPath = null; - Table tbl = null; - try { - getMS().openTransaction(); - Partition part = this.get_partition(db_name, tbl_name, part_vals); - if(part == null) { - throw new NoSuchObjectException("Partition doesn't exist. " + part_vals); + public boolean drop_partition(String db_name, String tbl_name, + List part_vals, boolean deleteData) + throws NoSuchObjectException, MetaException, TException { + incrementCounter("drop_partition"); + logStartFunction("drop_partition", db_name, tbl_name); + LOG.info("Partition values:" + part_vals); + boolean success = false; + Path partPath = null; + Table tbl = null; + try { + getMS().openTransaction(); + Partition part = get_partition(db_name, tbl_name, part_vals); + if (part == null) { + throw new NoSuchObjectException("Partition doesn't exist. " + + part_vals); + } + if (part.getSd() == null || part.getSd().getLocation() == null) { + throw new MetaException("Partition metadata is corrupted"); + } + if (!getMS().dropPartition(db_name, tbl_name, part_vals)) { + throw new MetaException("Unable to drop partition"); + } + success = getMS().commitTransaction(); + partPath = new Path(part.getSd().getLocation()); + tbl = get_table(db_name, tbl_name); + } finally { + if (!success) { + getMS().rollbackTransaction(); + } else if (deleteData && (partPath != null)) { + if (tbl != null && !isExternal(tbl)) { + wh.deleteDir(partPath, true); + // ok even if the data is not deleted } - if(part.getSd() == null || part.getSd().getLocation() == null) { - throw new MetaException("Partition metadata is corrupted"); - } - if(!getMS().dropPartition(db_name, tbl_name, part_vals)) { - throw new MetaException("Unable to drop partition"); - } - success = getMS().commitTransaction(); - partPath = new Path(part.getSd().getLocation()); - tbl = get_table(db_name, tbl_name); - } finally { - if(!success) { - getMS().rollbackTransaction(); - } else if(deleteData && (partPath != null)) { - if(tbl != null && !isExternal(tbl)) { - wh.deleteDir(partPath, true); - // ok even if the data is not deleted - } - } } - return true; } + return true; + } - public Partition get_partition(String db_name, String tbl_name, List part_vals) - throws MetaException { - this.incrementCounter("get_partition"); - logStartFunction("get_partition", db_name, tbl_name); - return getMS().getPartition(db_name, tbl_name, part_vals); - } + public Partition get_partition(String db_name, String tbl_name, + List part_vals) throws MetaException { + incrementCounter("get_partition"); + logStartFunction("get_partition", db_name, tbl_name); + return getMS().getPartition(db_name, tbl_name, part_vals); + } - public List get_partitions(String db_name, String tbl_name, short max_parts) - throws NoSuchObjectException, MetaException { - this.incrementCounter("get_partitions"); - logStartFunction("get_partitions", db_name, tbl_name); - return getMS().getPartitions(db_name, tbl_name, max_parts); - } + public List get_partitions(String db_name, String tbl_name, + short max_parts) throws NoSuchObjectException, MetaException { + incrementCounter("get_partitions"); + logStartFunction("get_partitions", db_name, tbl_name); + return getMS().getPartitions(db_name, tbl_name, max_parts); + } - public List get_partition_names(String db_name, String tbl_name, short max_parts) throws MetaException { - this.incrementCounter("get_partition_names"); - logStartFunction("get_partition_names", db_name, tbl_name); - return getMS().listPartitionNames(db_name, tbl_name, max_parts); - } + public List get_partition_names(String db_name, String tbl_name, + short max_parts) throws MetaException { + incrementCounter("get_partition_names"); + logStartFunction("get_partition_names", db_name, tbl_name); + return getMS().listPartitionNames(db_name, tbl_name, max_parts); + } - public void alter_partition(String db_name, String tbl_name, - Partition new_part) throws InvalidOperationException, MetaException, - TException { - this.incrementCounter("alter_partition"); - logStartFunction("alter_partition", db_name, tbl_name); - LOG.info("Partition values:" + new_part.getValues()); - try { - new_part.putToParameters(Constants.DDL_TIME, Long.toString(System.currentTimeMillis() / 1000)); - getMS().alterPartition(db_name, tbl_name, new_part); - } catch(InvalidObjectException e) { - LOG.error(StringUtils.stringifyException(e)); - throw new InvalidOperationException("alter is not possible"); - } + public void alter_partition(String db_name, String tbl_name, + Partition new_part) throws InvalidOperationException, MetaException, + TException { + incrementCounter("alter_partition"); + logStartFunction("alter_partition", db_name, tbl_name); + LOG.info("Partition values:" + new_part.getValues()); + try { + new_part.putToParameters(Constants.DDL_TIME, Long.toString(System + .currentTimeMillis() / 1000)); + getMS().alterPartition(db_name, tbl_name, new_part); + } catch (InvalidObjectException e) { + LOG.error(StringUtils.stringifyException(e)); + throw new InvalidOperationException("alter is not possible"); } + } - public boolean create_index(Index index_def) - throws IndexAlreadyExistsException, MetaException { - this.incrementCounter("create_index"); - // TODO Auto-generated method stub - throw new MetaException("Not yet implemented"); - } + public boolean create_index(Index index_def) + throws IndexAlreadyExistsException, MetaException { + incrementCounter("create_index"); + // TODO Auto-generated method stub + throw new MetaException("Not yet implemented"); + } - public String getVersion() throws TException { - this.incrementCounter("getVersion"); - logStartFunction("getVersion"); - return "3.0"; - } + public String getVersion() throws TException { + incrementCounter("getVersion"); + logStartFunction("getVersion"); + return "3.0"; + } - public void alter_table(String dbname, String name, Table newTable) throws InvalidOperationException, - MetaException { - this.incrementCounter("alter_table"); - logStartFunction("truncate_table: db=" + dbname + " tbl=" + name + " newtbl=" + newTable.getTableName()); - newTable.putToParameters(Constants.DDL_TIME, Long.toString(System.currentTimeMillis() / 1000)); - alterHandler.alterTable(getMS(), wh, dbname, name, newTable); - } + public void alter_table(String dbname, String name, Table newTable) + throws InvalidOperationException, MetaException { + incrementCounter("alter_table"); + logStartFunction("truncate_table: db=" + dbname + " tbl=" + name + + " newtbl=" + newTable.getTableName()); + newTable.putToParameters(Constants.DDL_TIME, Long.toString(System + .currentTimeMillis() / 1000)); + alterHandler.alterTable(getMS(), wh, dbname, name, newTable); + } - public List get_tables(String dbname, String pattern) throws MetaException { - this.incrementCounter("get_tables"); - logStartFunction("get_tables: db=" + dbname + " pat=" + pattern); - return getMS().getTables(dbname, pattern); - } + public List get_tables(String dbname, String pattern) + throws MetaException { + incrementCounter("get_tables"); + logStartFunction("get_tables: db=" + dbname + " pat=" + pattern); + return getMS().getTables(dbname, pattern); + } + public List get_fields(String db, String tableName) + throws MetaException, UnknownTableException, UnknownDBException { + incrementCounter("get_fields"); + logStartFunction("get_fields: db=" + db + "tbl=" + tableName); + String[] names = tableName.split("\\."); + String base_table_name = names[0]; - public List get_fields(String db, String tableName) - throws MetaException,UnknownTableException, UnknownDBException { - this.incrementCounter("get_fields"); - logStartFunction("get_fields: db=" + db + "tbl=" + tableName); - String [] names = tableName.split("\\."); - String base_table_name = names[0]; - - Table tbl; + Table tbl; + try { + tbl = get_table(db, base_table_name); + } catch (NoSuchObjectException e) { + throw new UnknownTableException(e.getMessage()); + } + boolean isNative = SerDeUtils.isNativeSerDe(tbl.getSd().getSerdeInfo() + .getSerializationLib()); + if (isNative) { + return tbl.getSd().getCols(); + } else { try { - tbl = this.get_table(db, base_table_name); - } catch (NoSuchObjectException e) { - throw new UnknownTableException(e.getMessage()); + Deserializer s = MetaStoreUtils.getDeserializer(hiveConf, tbl); + return MetaStoreUtils.getFieldsFromDeserializer(tableName, s); + } catch (SerDeException e) { + StringUtils.stringifyException(e); + throw new MetaException(e.getMessage()); } - boolean isNative = SerDeUtils.isNativeSerDe(tbl.getSd().getSerdeInfo().getSerializationLib()); - if (isNative) - return tbl.getSd().getCols(); - else { - try { - Deserializer s = MetaStoreUtils.getDeserializer(this.hiveConf, tbl); - return MetaStoreUtils.getFieldsFromDeserializer(tableName, s); - } catch(SerDeException e) { - StringUtils.stringifyException(e); - throw new MetaException(e.getMessage()); - } - } } + } - /** - * Return the schema of the table. This function includes partition columns - * in addition to the regular columns. - * @param db Name of the database - * @param tableName Name of the table - * @return List of columns, each column is a FieldSchema structure - * @throws MetaException - * @throws UnknownTableException - * @throws UnknownDBException - */ - public List get_schema(String db, String tableName) + /** + * Return the schema of the table. This function includes partition columns + * in addition to the regular columns. + * + * @param db + * Name of the database + * @param tableName + * Name of the table + * @return List of columns, each column is a FieldSchema structure + * @throws MetaException + * @throws UnknownTableException + * @throws UnknownDBException + */ + public List get_schema(String db, String tableName) throws MetaException, UnknownTableException, UnknownDBException { - this.incrementCounter("get_schema"); - logStartFunction("get_schema: db=" + db + "tbl=" + tableName); - String [] names = tableName.split("\\."); - String base_table_name = names[0]; + incrementCounter("get_schema"); + logStartFunction("get_schema: db=" + db + "tbl=" + tableName); + String[] names = tableName.split("\\."); + String base_table_name = names[0]; - Table tbl; - try { - tbl = this.get_table(db, base_table_name); - } catch (NoSuchObjectException e) { - throw new UnknownTableException(e.getMessage()); - } - List fieldSchemas = this.get_fields(db, base_table_name); + Table tbl; + try { + tbl = get_table(db, base_table_name); + } catch (NoSuchObjectException e) { + throw new UnknownTableException(e.getMessage()); + } + List fieldSchemas = get_fields(db, base_table_name); - if (tbl == null || fieldSchemas == null) { - throw new UnknownTableException(tableName + " doesn't exist"); - } + if (tbl == null || fieldSchemas == null) { + throw new UnknownTableException(tableName + " doesn't exist"); + } - if (tbl.getPartitionKeys() != null) { - // Combine the column field schemas and the partition keys to create the whole schema - fieldSchemas.addAll(tbl.getPartitionKeys()); - } - return fieldSchemas; + if (tbl.getPartitionKeys() != null) { + // Combine the column field schemas and the partition keys to create the + // whole schema + fieldSchemas.addAll(tbl.getPartitionKeys()); } + return fieldSchemas; + } - public String getCpuProfile(int profileDurationInSec) throws TException { - return ""; + public String getCpuProfile(int profileDurationInSec) throws TException { + return ""; + } + + /** + * Returns the value of the given configuration variable name. If the + * configuration variable with the given name doesn't exist, or if there + * were an exception thrown while retrieving the variable, or if name is + * null, defaultValue is returned. + */ + public String get_config_value(String name, String defaultValue) + throws TException, ConfigValSecurityException { + incrementCounter("get_config_value"); + logStartFunction("get_config_value: name=" + name + " defaultValue=" + + defaultValue); + if (name == null) { + return defaultValue; } + // Allow only keys that start with hive.*, hdfs.*, mapred.* for security + // i.e. don't allow access to db password + if (!Pattern.matches("(hive|hdfs|mapred).*", name)) { + throw new ConfigValSecurityException("For security reasons, the " + + "config key " + name + " cannot be accessed"); + } - /** - * Returns the value of the given configuration variable name. If the - * configuration variable with the given name doesn't exist, or if there - * were an exception thrown while retrieving the variable, or if name is - * null, defaultValue is returned. - */ - public String get_config_value(String name, String defaultValue) - throws TException, ConfigValSecurityException { - this.incrementCounter("get_config_value"); - logStartFunction("get_config_value: name=" + name + - " defaultValue=" + defaultValue); - if(name == null) { - return defaultValue; - } - // Allow only keys that start with hive.*, hdfs.*, mapred.* for security - // i.e. don't allow access to db password - if(!Pattern.matches("(hive|hdfs|mapred).*", name)) { - throw new ConfigValSecurityException("For security reasons, the " + - "config key " + name + " cannot be accessed"); - } - - String toReturn = defaultValue; - try { - toReturn = hiveConf.get(name, defaultValue); - } catch(RuntimeException e) { - LOG.error(threadLocalId.get().toString() + ": " + - "RuntimeException thrown in get_config_value - msg: " + - e.getMessage() + " cause: " + e.getCause()); - } - return toReturn; + String toReturn = defaultValue; + try { + toReturn = hiveConf.get(name, defaultValue); + } catch (RuntimeException e) { + LOG.error(threadLocalId.get().toString() + ": " + + "RuntimeException thrown in get_config_value - msg: " + + e.getMessage() + " cause: " + e.getCause()); } + return toReturn; + } } /** @@ -755,28 +803,32 @@ public static void main(String[] args) { int port = 9083; - if(args.length > 0) { + if (args.length > 0) { port = Integer.getInteger(args[0]); } try { TServerTransport serverTransport = new TServerSocket(port); Iface handler = new HMSHandler("new db based metaserver"); - FacebookService.Processor processor = new ThriftHiveMetastore.Processor(handler); + FacebookService.Processor processor = new ThriftHiveMetastore.Processor( + handler); TThreadPoolServer.Options options = new TThreadPoolServer.Options(); options.minWorkerThreads = 200; TServer server = new TThreadPoolServer(processor, serverTransport, new TTransportFactory(), new TTransportFactory(), new TBinaryProtocol.Factory(), new TBinaryProtocol.Factory(), options); - HMSHandler.LOG.info("Started the new metaserver on port [" + port + "]..."); - HMSHandler.LOG.info("Options.minWorkerThreads = " + options.minWorkerThreads); - HMSHandler.LOG.info("Options.maxWorkerThreads = " + options.maxWorkerThreads); + HMSHandler.LOG.info("Started the new metaserver on port [" + port + + "]..."); + HMSHandler.LOG.info("Options.minWorkerThreads = " + + options.minWorkerThreads); + HMSHandler.LOG.info("Options.maxWorkerThreads = " + + options.maxWorkerThreads); server.serve(); } catch (Throwable x) { x.printStackTrace(); - HMSHandler.LOG.error("Metastore Thrift Server threw an exception. Exiting..."); + HMSHandler.LOG + .error("Metastore Thrift Server threw an exception. Exiting..."); HMSHandler.LOG.error(StringUtils.stringifyException(x)); System.exit(1); } } } - Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java (revision 901511) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java (working copy) @@ -33,39 +33,40 @@ import org.apache.hadoop.hive.metastore.api.Table; /** - * Hive specific implementation of alter + * Hive specific implementation of alter */ -public class HiveAlterHandler implements AlterHandler { +public class HiveAlterHandler implements AlterHandler { private Configuration hiveConf; - private static final Log LOG = LogFactory.getLog(HiveAlterHandler.class.getName()); + private static final Log LOG = LogFactory.getLog(HiveAlterHandler.class + .getName()); - public Configuration getConf() { + public Configuration getConf() { return hiveConf; } @SuppressWarnings("nls") public void setConf(Configuration conf) { - this.hiveConf = conf; + hiveConf = conf; } - public void alterTable(RawStore msdb, Warehouse wh, String dbname, String name, Table newt) - throws InvalidOperationException, MetaException { + public void alterTable(RawStore msdb, Warehouse wh, String dbname, + String name, Table newt) throws InvalidOperationException, MetaException { if (newt == null) { throw new InvalidOperationException("New table is invalid: " + newt); } - - if(!MetaStoreUtils.validateName(newt.getTableName()) || - !MetaStoreUtils.validateColNames(newt.getSd().getCols())) { - throw new InvalidOperationException(newt.getTableName() + " is not a valid object name"); + + if (!MetaStoreUtils.validateName(newt.getTableName()) + || !MetaStoreUtils.validateColNames(newt.getSd().getCols())) { + throw new InvalidOperationException(newt.getTableName() + + " is not a valid object name"); } if (newt.getViewExpandedText() != null) { - throw new InvalidOperationException( - newt.getTableName() - + " is a view, so it cannot be modified via ALTER TABLE"); + throw new InvalidOperationException(newt.getTableName() + + " is a view, so it cannot be modified via ALTER TABLE"); } - + Path srcPath = null; FileSystem srcFs = null; Path destPath = null; @@ -84,63 +85,80 @@ // check if table with the new name already exists if (!newt.getTableName().equalsIgnoreCase(name) || !newt.getDbName().equalsIgnoreCase(dbname)) { - if(msdb.getTable(newt.getDbName(), newt.getTableName()) != null) { - throw new InvalidOperationException("new table " + newt.getDbName() + if (msdb.getTable(newt.getDbName(), newt.getTableName()) != null) { + throw new InvalidOperationException("new table " + newt.getDbName() + "." + newt.getTableName() + " already exists"); } - rename = true; + rename = true; } // get old table Table oldt = msdb.getTable(dbname, name); - if(oldt == null) { - throw new InvalidOperationException("table " + newt.getDbName() - + "." + newt.getTableName() + " doesn't exist"); + if (oldt == null) { + throw new InvalidOperationException("table " + newt.getDbName() + "." + + newt.getTableName() + " doesn't exist"); } - + // check that partition keys have not changed - if( oldt.getPartitionKeys().size() != newt.getPartitionKeys().size() + if (oldt.getPartitionKeys().size() != newt.getPartitionKeys().size() || !oldt.getPartitionKeys().containsAll(newt.getPartitionKeys())) { - throw new InvalidOperationException("partition keys can not be changed."); + throw new InvalidOperationException( + "partition keys can not be changed."); } - - if (rename // if this alter is a rename - && (oldt.getSd().getLocation().compareTo(newt.getSd().getLocation()) == 0 // and user didn't change the default location - || StringUtils.isEmpty(newt.getSd().getLocation())) // or new location is empty - && !oldt.getParameters().containsKey("EXTERNAL")) { // and table is not an external table - // that means user is asking metastore to move data to new location corresponding to the new name + + if (rename // if this alter is a rename + && (oldt.getSd().getLocation().compareTo(newt.getSd().getLocation()) == 0 // and + // user + // didn't + // change + // the + // default + // location + || StringUtils.isEmpty(newt.getSd().getLocation())) // or new location + // is empty + && !oldt.getParameters().containsKey("EXTERNAL")) { // and table is + // not an external + // table + // that means user is asking metastore to move data to new location + // corresponding to the new name // get new location - newTblLoc = wh.getDefaultTablePath(newt.getDbName(), newt.getTableName()).toString(); + newTblLoc = wh.getDefaultTablePath(newt.getDbName(), + newt.getTableName()).toString(); newt.getSd().setLocation(newTblLoc); oldTblLoc = oldt.getSd().getLocation(); moveData = true; - // check that destination does not exist otherwise we will be overwriting data + // check that destination does not exist otherwise we will be + // overwriting data srcPath = new Path(oldTblLoc); srcFs = wh.getFs(srcPath); destPath = new Path(newTblLoc); destFs = wh.getFs(destPath); // check that src and dest are on the same file system if (srcFs != destFs) { - throw new InvalidOperationException("table new location " + destPath - + " is on a different file system than the old location " + srcPath - + ". This operation is not supported"); + throw new InvalidOperationException("table new location " + destPath + + " is on a different file system than the old location " + + srcPath + ". This operation is not supported"); } try { - srcFs.exists(srcPath); // check that src exists and also checks permissions necessary - if(destFs.exists(destPath)) { - throw new InvalidOperationException("New location for this table "+ newt.getDbName() - + "." + newt.getTableName() + " already exists : " + destPath); + srcFs.exists(srcPath); // check that src exists and also checks + // permissions necessary + if (destFs.exists(destPath)) { + throw new InvalidOperationException("New location for this table " + + newt.getDbName() + "." + newt.getTableName() + + " already exists : " + destPath); } } catch (IOException e) { - throw new InvalidOperationException("Unable to access new location " + destPath + " for table " - + newt.getDbName() + "." + newt.getTableName() ); + throw new InvalidOperationException("Unable to access new location " + + destPath + " for table " + newt.getDbName() + "." + + newt.getTableName()); } // also the location field in partition List parts = msdb.getPartitions(dbname, name, 0); for (Partition part : parts) { String oldPartLoc = part.getSd().getLocation(); if (oldPartLoc.contains(oldTblLoc)) { - part.getSd().setLocation(part.getSd().getLocation().replace(oldTblLoc, newTblLoc)); + part.getSd().setLocation( + part.getSd().getLocation().replace(oldTblLoc, newTblLoc)); msdb.alterPartition(dbname, name, part); } } @@ -151,13 +169,14 @@ success = msdb.commitTransaction(); } catch (InvalidObjectException e) { LOG.debug(e); - throw new InvalidOperationException("Unable to change partition or table." + - " Check metastore logs for detailed stack." + e.getMessage()); + throw new InvalidOperationException( + "Unable to change partition or table." + + " Check metastore logs for detailed stack." + e.getMessage()); } finally { - if(!success) { + if (!success) { msdb.rollbackTransaction(); } - if(success && moveData) { + if (success && moveData) { // change the file name in hdfs // check that src exists otherwise there is no need to copy the data try { @@ -166,8 +185,8 @@ srcFs.rename(srcPath, destPath); } } catch (IOException e) { - throw new InvalidOperationException("Unable to access old location " + srcPath + " for table " - + dbname + "." + name ); + throw new InvalidOperationException("Unable to access old location " + + srcPath + " for table " + dbname + "." + name); } } } Index: metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (revision 901511) +++ metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (working copy) @@ -46,17 +46,17 @@ */ public class Warehouse { private Path whRoot; - private Configuration conf; + private final Configuration conf; String whRootString; public static final Log LOG = LogFactory.getLog("hive.metastore.warehouse"); public Warehouse(Configuration conf) throws MetaException { this.conf = conf; - whRootString = HiveConf.getVar(conf, HiveConf.ConfVars.METASTOREWAREHOUSE); - if(StringUtils.isBlank(whRootString)) { + whRootString = HiveConf.getVar(conf, HiveConf.ConfVars.METASTOREWAREHOUSE); + if (StringUtils.isBlank(whRootString)) { throw new MetaException(HiveConf.ConfVars.METASTOREWAREHOUSE.varname - + " is not set in the config or blank"); + + " is not set in the config or blank"); } } @@ -73,30 +73,29 @@ } /** - * Hadoop File System reverse lookups paths with raw ip addresses - * The File System URI always contains the canonical DNS name of the - * Namenode. Subsequently, operations on paths with raw ip addresses - * cause an exception since they don't match the file system URI. - * - * This routine solves this problem by replacing the scheme and authority - * of a path with the scheme and authority of the FileSystem that it - * maps to. - * - * @param path Path to be canonicalized + * Hadoop File System reverse lookups paths with raw ip addresses The File + * System URI always contains the canonical DNS name of the Namenode. + * Subsequently, operations on paths with raw ip addresses cause an exception + * since they don't match the file system URI. + * + * This routine solves this problem by replacing the scheme and authority of a + * path with the scheme and authority of the FileSystem that it maps to. + * + * @param path + * Path to be canonicalized * @return Path with canonical scheme and authority */ public Path getDnsPath(Path path) throws MetaException { - FileSystem fs = getFs(path); - return (new Path(fs.getUri().getScheme(), fs.getUri().getAuthority(), - path.toUri().getPath())); + FileSystem fs = getFs(path); + return (new Path(fs.getUri().getScheme(), fs.getUri().getAuthority(), path + .toUri().getPath())); } - /** * Resolve the configured warehouse root dir with respect to the configuration - * This involves opening the FileSystem corresponding to the warehouse root dir - * (but that should be ok given that this is only called during DDL statements - * for non-external tables). + * This involves opening the FileSystem corresponding to the warehouse root + * dir (but that should be ok given that this is only called during DDL + * statements for non-external tables). */ private Path getWhRoot() throws MetaException { if (whRoot != null) { @@ -112,8 +111,9 @@ } return new Path(getWhRoot(), dbName.toLowerCase() + ".db"); } - - public Path getDefaultTablePath(String dbName, String tableName) throws MetaException { + + public Path getDefaultTablePath(String dbName, String tableName) + throws MetaException { return new Path(getDefaultDatabasePath(dbName), tableName.toLowerCase()); } @@ -127,16 +127,16 @@ } return false; } - + public boolean deleteDir(Path f, boolean recursive) throws MetaException { LOG.info("deleting " + f); try { FileSystem fs = getFs(f); - if(!fs.exists(f)) { + if (!fs.exists(f)) { return false; } - // older versions of Hadoop don't have a Trash constructor based on the + // older versions of Hadoop don't have a Trash constructor based on the // Path or FileSystem. So need to achieve this by creating a dummy conf. // this needs to be filtered out based on version Configuration dupConf = new Configuration(conf); @@ -151,11 +151,11 @@ LOG.info("Deleted the diretory " + f); return true; } - if(fs.exists(f)) { + if (fs.exists(f)) { throw new MetaException("Unable to delete directory: " + f); } } catch (FileNotFoundException e) { - return true; //ok even if there is not data + return true; // ok even if there is not data } catch (IOException e) { MetaStoreUtils.logAndThrowMetaException(e); } @@ -164,7 +164,7 @@ // NOTE: This is for generating the internal path name for partitions. Users // should always use the MetaStore API to get the path name for a partition. - // Users should not directly take partition values and turn it into a path + // Users should not directly take partition values and turn it into a path // name by themselves, because the logic below may change in the future. // // In the future, it's OK to add new chars to the escape list, and old data @@ -173,34 +173,34 @@ // new partitions, it will use new names. static BitSet charToEscape = new BitSet(128); static { - for (char c = 0; c < ' ' ; c++) { + for (char c = 0; c < ' '; c++) { charToEscape.set(c); } - char[] clist = new char[] { '"', '#', '%', '\'', '*', '/', ':', - '=', '?', '\\', '\u00FF' - }; + char[] clist = new char[] { '"', '#', '%', '\'', '*', '/', ':', '=', '?', + '\\', '\u00FF' }; for (char c : clist) { charToEscape.set(c); } } + static boolean needsEscaping(char c) { - return c >= 0 && c < charToEscape.size() - && charToEscape.get(c); + return c >= 0 && c < charToEscape.size() && charToEscape.get(c); } - + static String escapePathName(String path) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < path.length(); i++) { char c = path.charAt(i); if (needsEscaping(c)) { sb.append('%'); - sb.append(String.format("%1$02X", (int)c)); + sb.append(String.format("%1$02X", (int) c)); } else { sb.append(c); } } return sb.toString(); } + static String unescapePathName(String path) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < path.length(); i++) { @@ -208,25 +208,26 @@ if (c == '%' && i + 2 < path.length()) { int code = -1; try { - code = Integer.valueOf(path.substring(i+1, i+3), 16); + code = Integer.valueOf(path.substring(i + 1, i + 3), 16); } catch (Exception e) { code = -1; } if (code >= 0) { - sb.append((char)code); + sb.append((char) code); i += 2; continue; } } sb.append(c); - } + } return sb.toString(); } - - public static String makePartName(Map spec) throws MetaException { + + public static String makePartName(Map spec) + throws MetaException { StringBuffer suffixBuf = new StringBuffer(); - for(Entry e: spec.entrySet()) { - if(e.getValue() == null || e.getValue().length() == 0) { + for (Entry e : spec.entrySet()) { + if (e.getValue() == null || e.getValue().length() == 0) { throw new MetaException("Partition spec is incorrect. " + spec); } suffixBuf.append(escapePathName(e.getKey())); @@ -236,9 +237,11 @@ } return suffixBuf.toString(); } - + static final Pattern pat = Pattern.compile("([^/]+)=([^/]+)"); - public static LinkedHashMap makeSpecFromName(String name) throws MetaException { + + public static LinkedHashMap makeSpecFromName(String name) + throws MetaException { LinkedHashMap partSpec = new LinkedHashMap(); if (name == null || name.isEmpty()) { throw new MetaException("Partition name is invalid. " + name); @@ -253,40 +256,42 @@ String v = unescapePathName(m.group(2)); if (partSpec.containsKey(k)) { - throw new MetaException("Partition name is invalid. Key " + k + " defined at two levels"); + throw new MetaException("Partition name is invalid. Key " + k + + " defined at two levels"); } String[] kv = new String[2]; kv[0] = k; kv[1] = v; kvs.add(kv); - } - else { + } else { throw new MetaException("Partition name is invalid. " + name); } currPath = currPath.getParent(); - } while(currPath != null && !currPath.getName().isEmpty()); - - // reverse the list since we checked the part from leaf dir to table's base dir - for(int i = kvs.size(); i > 0; i--) { - partSpec.put(kvs.get(i-1)[0], kvs.get(i-1)[1]); + } while (currPath != null && !currPath.getName().isEmpty()); + + // reverse the list since we checked the part from leaf dir to table's base + // dir + for (int i = kvs.size(); i > 0; i--) { + partSpec.put(kvs.get(i - 1)[0], kvs.get(i - 1)[1]); } return partSpec; } + public Path getPartitionPath(String dbName, String tableName, + LinkedHashMap pm) throws MetaException { + return new Path(getDefaultTablePath(dbName, tableName), makePartName(pm)); + } - public Path getPartitionPath(String dbName, String tableName, LinkedHashMap pm) throws MetaException { - return new Path(getDefaultTablePath(dbName, tableName), makePartName(pm)); + public Path getPartitionPath(Path tblPath, LinkedHashMap pm) + throws MetaException { + return new Path(tblPath, makePartName(pm)); } - - public Path getPartitionPath(Path tblPath, LinkedHashMap pm) throws MetaException { - return new Path(tblPath, makePartName(pm)); - } - + public boolean isDir(Path f) throws MetaException { try { FileSystem fs = getFs(f); FileStatus fstatus = fs.getFileStatus(f); - if(!fstatus.isDir()) { + if (!fstatus.isDir()) { return false; } } catch (FileNotFoundException e) { @@ -297,13 +302,14 @@ return true; } - public static String makePartName(List partCols, List vals) throws MetaException { + public static String makePartName(List partCols, + List vals) throws MetaException { if ((partCols.size() != vals.size()) || (partCols.size() == 0)) { throw new MetaException("Invalid partition key & values"); } StringBuilder name = new StringBuilder(); - for(int i=0; i< partCols.size(); i++) { - if(i > 0) { + for (int i = 0; i < partCols.size(); i++) { + if (i > 0) { name.append(Path.SEPARATOR); } name.append(escapePathName((partCols.get(i)).getName().toLowerCase())); Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 901511) +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy) @@ -61,47 +61,46 @@ import org.apache.hadoop.util.StringUtils; /** - * This class is the interface between the application logic and the database store that - * contains the objects. - * Refrain putting any logic in mode.M* objects or in this file as former could be auto - * generated and this class would need to be made into a interface that can read both - * from a database and a filestore. + * This class is the interface between the application logic and the database + * store that contains the objects. Refrain putting any logic in mode.M* objects + * or in this file as former could be auto generated and this class would need + * to be made into a interface that can read both from a database and a + * filestore. */ public class ObjectStore implements RawStore, Configurable { - @SuppressWarnings("nls") - private static final String JPOX_CONFIG = "jpox.properties"; private static Properties prop = null; private static PersistenceManagerFactory pmf = null; private static final Log LOG = LogFactory.getLog(ObjectStore.class.getName()); + private static enum TXN_STATUS { - NO_STATE, - OPEN, - COMMITED, - ROLLBACK + NO_STATE, OPEN, COMMITED, ROLLBACK } + private boolean isInitialized = false; private PersistenceManager pm = null; private Configuration hiveConf; private int openTrasactionCalls = 0; private Transaction currentTransaction = null; private TXN_STATUS transactionStatus = TXN_STATUS.NO_STATE; - - public ObjectStore() {} + public ObjectStore() { + } + public Configuration getConf() { return hiveConf; } @SuppressWarnings("nls") public void setConf(Configuration conf) { - this.hiveConf = conf; - if(isInitialized) { + hiveConf = conf; + if (isInitialized) { return; } else { initialize(); } - if(!isInitialized) { - throw new RuntimeException("Unable to create persistence manager. Check dss.log for details"); + if (!isInitialized) { + throw new RuntimeException( + "Unable to create persistence manager. Check dss.log for details"); } else { LOG.info("Initialized ObjectStore"); } @@ -109,9 +108,9 @@ private ClassLoader classLoader; { - this.classLoader = Thread.currentThread().getContextClassLoader(); - if (this.classLoader == null) { - this.classLoader = ObjectStore.class.getClassLoader(); + classLoader = Thread.currentThread().getContextClassLoader(); + if (classLoader == null) { + classLoader = ObjectStore.class.getClassLoader(); } } @@ -120,47 +119,50 @@ LOG.info("ObjectStore, initialize called"); initDataSourceProps(); pm = getPersistenceManager(); - if(pm != null) + if (pm != null) { isInitialized = true; + } return; } /** - * Properties specified in hive-default.xml override the properties specified in - * jpox.properties. + * Properties specified in hive-default.xml override the properties specified + * in jpox.properties. */ @SuppressWarnings("nls") private void initDataSourceProps() { - if(prop != null) { + if (prop != null) { return; } prop = new Properties(); - + Iterator> iter = hiveConf.iterator(); - while(iter.hasNext()) { + while (iter.hasNext()) { Map.Entry e = iter.next(); - if(e.getKey().contains("datanucleus") || e.getKey().contains("jdo")) { + if (e.getKey().contains("datanucleus") || e.getKey().contains("jdo")) { Object prevVal = prop.setProperty(e.getKey(), e.getValue()); - if(LOG.isDebugEnabled() && !e.getKey().equals(HiveConf.ConfVars.METASTOREPWD.varname)) { - LOG.debug("Overriding " + e.getKey() + " value " + prevVal + if (LOG.isDebugEnabled() + && !e.getKey().equals(HiveConf.ConfVars.METASTOREPWD.varname)) { + LOG.debug("Overriding " + e.getKey() + " value " + prevVal + " from jpox.properties with " + e.getValue()); } } } - if(LOG.isDebugEnabled()) { - for (Entry e: prop.entrySet()) { - if(!e.getKey().equals(HiveConf.ConfVars.METASTOREPWD.varname)) + if (LOG.isDebugEnabled()) { + for (Entry e : prop.entrySet()) { + if (!e.getKey().equals(HiveConf.ConfVars.METASTOREPWD.varname)) { LOG.debug(e.getKey() + " = " + e.getValue()); + } } } } private static PersistenceManagerFactory getPMF() { - if(pmf == null) { + if (pmf == null) { pmf = JDOHelper.getPersistenceManagerFactory(prop); DataStoreCache dsc = pmf.getDataStoreCache(); - if(dsc != null) { + if (dsc != null) { dsc.pinAll(true, MTable.class); dsc.pinAll(true, MStorageDescriptor.class); dsc.pinAll(true, MSerDeInfo.class); @@ -173,76 +175,83 @@ } return pmf; } - + private PersistenceManager getPersistenceManager() { return getPMF().getPersistenceManager(); } - + public void shutdown() { - if(pm != null) { + if (pm != null) { pm.close(); } } /** - * Opens a new one or the one already created - * Every call of this function must have corresponding commit or rollback function call + * Opens a new one or the one already created Every call of this function must + * have corresponding commit or rollback function call + * * @return an active transaction */ - + public boolean openTransaction() { - this.openTrasactionCalls++; - if(this.openTrasactionCalls == 1) { + openTrasactionCalls++; + if (openTrasactionCalls == 1) { currentTransaction = pm.currentTransaction(); currentTransaction.begin(); transactionStatus = TXN_STATUS.OPEN; } else { - // something is wrong since openTransactionCalls is greater than 1 but currentTransaction is not active - assert((currentTransaction != null) && (currentTransaction.isActive())); + // something is wrong since openTransactionCalls is greater than 1 but + // currentTransaction is not active + assert ((currentTransaction != null) && (currentTransaction.isActive())); } return currentTransaction.isActive(); } - + /** - * if this is the commit of the first open call then an actual commit is called. + * if this is the commit of the first open call then an actual commit is + * called. + * * @return Always returns true */ @SuppressWarnings("nls") public boolean commitTransaction() { - assert(this.openTrasactionCalls >= 1); - if(!currentTransaction.isActive()) { - throw new RuntimeException("Commit is called, but transaction is not active. Either there are" + - "mismatching open and close calls or rollback was called in the same trasaction"); + assert (openTrasactionCalls >= 1); + if (!currentTransaction.isActive()) { + throw new RuntimeException( + "Commit is called, but transaction is not active. Either there are" + + "mismatching open and close calls or rollback was called in the same trasaction"); } - this.openTrasactionCalls--; - if ((this.openTrasactionCalls == 0) && currentTransaction.isActive()) { + openTrasactionCalls--; + if ((openTrasactionCalls == 0) && currentTransaction.isActive()) { transactionStatus = TXN_STATUS.COMMITED; currentTransaction.commit(); } return true; } - + /** - * @return true if there is an active transaction. If the current transaction is either - * committed or rolled back it returns false + * @return true if there is an active transaction. If the current transaction + * is either committed or rolled back it returns false */ public boolean isActiveTransaction() { - if(currentTransaction == null) + if (currentTransaction == null) { return false; + } return currentTransaction.isActive(); } - + /** * Rolls back the current transaction if it is active */ public void rollbackTransaction() { - if(this.openTrasactionCalls < 1) { + if (openTrasactionCalls < 1) { return; } - this.openTrasactionCalls = 0; - if(currentTransaction.isActive() && transactionStatus != TXN_STATUS.ROLLBACK) { + openTrasactionCalls = 0; + if (currentTransaction.isActive() + && transactionStatus != TXN_STATUS.ROLLBACK) { transactionStatus = TXN_STATUS.ROLLBACK; - // could already be rolled back + // could already be rolled back currentTransaction.rollback(); } } @@ -250,26 +259,27 @@ public boolean createDatabase(Database db) { boolean success = false; boolean commited = false; - MDatabase mdb = new MDatabase(db.getName().toLowerCase(), db.getDescription()); + MDatabase mdb = new MDatabase(db.getName().toLowerCase(), db + .getDescription()); try { openTransaction(); pm.makePersistent(mdb); success = true; commited = commitTransaction(); } finally { - if(!commited) { + if (!commited) { rollbackTransaction(); } } return success; } - + public boolean createDatabase(String name) { // TODO: get default path Database db = new Database(name, "default_path"); return this.createDatabase(db); } - + @SuppressWarnings("nls") private MDatabase getMDatabase(String name) throws NoSuchObjectException { MDatabase db = null; @@ -284,15 +294,16 @@ pm.retrieve(db); commited = commitTransaction(); } finally { - if(!commited) { + if (!commited) { rollbackTransaction(); } } - if(db == null) { + if (db == null) { throw new NoSuchObjectException("There is no database named " + name); } return db; } + public Database getDatabase(String name) throws NoSuchObjectException { MDatabase db = null; boolean commited = false; @@ -301,7 +312,7 @@ db = getMDatabase(name); commited = commitTransaction(); } finally { - if(!commited) { + if (!commited) { rollbackTransaction(); } } @@ -309,12 +320,12 @@ } public boolean dropDatabase(String dbname) { - + boolean success = false; boolean commited = false; try { openTransaction(); - + // first drop tables dbname = dbname.toLowerCase(); LOG.info("Dropping database along with all tables " + dbname); @@ -324,23 +335,24 @@ pm.deletePersistentAll(mtbls); // then drop the database - Query query = pm.newQuery(MDatabase.class, "name == dbName"); - query.declareParameters("java.lang.String dbName"); - query.setUnique(true); - MDatabase db = (MDatabase) query.execute(dbname.trim()); + Query query = pm.newQuery(MDatabase.class, "name == dbName"); + query.declareParameters("java.lang.String dbName"); + query.setUnique(true); + MDatabase db = (MDatabase) query.execute(dbname.trim()); pm.retrieve(db); - - //StringIdentity id = new StringIdentity(MDatabase.class, dbname); - //MDatabase db = (MDatabase) pm.getObjectById(id); - if(db != null) + + // StringIdentity id = new StringIdentity(MDatabase.class, dbname); + // MDatabase db = (MDatabase) pm.getObjectById(id); + if (db != null) { pm.deletePersistent(db); + } commited = commitTransaction(); success = true; } catch (JDOObjectNotFoundException e) { - LOG.debug("database not found " + dbname,e); + LOG.debug("database not found " + dbname, e); commited = commitTransaction(); } finally { - if(!commited) { + if (!commited) { rollbackTransaction(); } } @@ -359,18 +371,19 @@ dbs = (List) query.execute(); commited = commitTransaction(); } finally { - if(!commited) { + if (!commited) { rollbackTransaction(); } } return dbs; } - + private MType getMType(Type type) { List fields = new ArrayList(); - if(type.getFields() != null) { + if (type.getFields() != null) { for (FieldSchema field : type.getFields()) { - fields.add(new MFieldSchema(field.getName(), field.getType(), field.getComment())); + fields.add(new MFieldSchema(field.getName(), field.getType(), field + .getComment())); } } return new MType(type.getName(), type.getType1(), type.getType2(), fields); @@ -378,9 +391,10 @@ private Type getType(MType mtype) { List fields = new ArrayList(); - if(mtype.getFields() != null) { + if (mtype.getFields() != null) { for (MFieldSchema field : mtype.getFields()) { - fields.add(new FieldSchema(field.getName(), field.getType(), field.getComment())); + fields.add(new FieldSchema(field.getName(), field.getType(), field + .getComment())); } } return new Type(mtype.getName(), mtype.getType1(), mtype.getType2(), fields); @@ -396,7 +410,7 @@ commited = commitTransaction(); success = true; } finally { - if(!commited) { + if (!commited) { rollbackTransaction(); } } @@ -408,17 +422,17 @@ boolean commited = false; try { openTransaction(); - Query query = pm.newQuery(MType.class, "name == typeName"); - query.declareParameters("java.lang.String typeName"); - query.setUnique(true); - MType mtype = (MType) query.execute(typeName.trim()); + Query query = pm.newQuery(MType.class, "name == typeName"); + query.declareParameters("java.lang.String typeName"); + query.setUnique(true); + MType mtype = (MType) query.execute(typeName.trim()); pm.retrieve(type); - if(mtype != null) { + if (mtype != null) { type = getType(mtype); } commited = commitTransaction(); } finally { - if(!commited) { + if (!commited) { rollbackTransaction(); } } @@ -426,15 +440,15 @@ } public boolean dropType(String typeName) { - + boolean success = false; boolean commited = false; try { openTransaction(); - Query query = pm.newQuery(MType.class, "name == typeName"); - query.declareParameters("java.lang.String typeName"); - query.setUnique(true); - MType type = (MType) query.execute(typeName.trim()); + Query query = pm.newQuery(MType.class, "name == typeName"); + query.declareParameters("java.lang.String typeName"); + query.setUnique(true); + MType type = (MType) query.execute(typeName.trim()); pm.retrieve(type); pm.deletePersistent(type); commited = commitTransaction(); @@ -443,14 +457,15 @@ commited = commitTransaction(); LOG.debug("type not found " + typeName, e); } finally { - if(!commited) { + if (!commited) { rollbackTransaction(); } } return success; } - public void createTable(Table tbl) throws InvalidObjectException, MetaException { + public void createTable(Table tbl) throws InvalidObjectException, + MetaException { boolean commited = false; try { openTransaction(); @@ -458,20 +473,20 @@ pm.makePersistent(mtbl); commited = commitTransaction(); } finally { - if(!commited) { + if (!commited) { rollbackTransaction(); } } } - + public boolean dropTable(String dbName, String tableName) { - + boolean success = false; try { openTransaction(); - MTable tbl = getMTable(dbName, tableName); + MTable tbl = getMTable(dbName, tableName); pm.retrieve(tbl); - if(tbl != null) { + if (tbl != null) { // first remove all the partitions pm.deletePersistentAll(listMPartitions(dbName, tableName, -1)); // then remove the table @@ -479,7 +494,7 @@ } success = commitTransaction(); } finally { - if(!success) { + if (!success) { rollbackTransaction(); } } @@ -494,24 +509,26 @@ tbl = convertToTable(getMTable(dbName, tableName)); commited = commitTransaction(); } finally { - if(!commited) { + if (!commited) { rollbackTransaction(); } } return tbl; } - - public List getTables(String dbName, String pattern) throws MetaException { + + public List getTables(String dbName, String pattern) + throws MetaException { boolean commited = false; List tbls = null; try { openTransaction(); dbName = dbName.toLowerCase(); - // Take the pattern and split it on the | to get all the composing patterns - String [] subpatterns = pattern.trim().split("\\|"); + // Take the pattern and split it on the | to get all the composing + // patterns + String[] subpatterns = pattern.trim().split("\\|"); String query = "select tableName from org.apache.hadoop.hive.metastore.model.MTable where database.name == dbName && ("; boolean first = true; - for(String subpattern: subpatterns) { + for (String subpattern : subpatterns) { subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*"); if (!first) { query = query + " || "; @@ -525,19 +542,19 @@ q.declareParameters("java.lang.String dbName"); q.setResult("tableName"); Collection names = (Collection) q.execute(dbName.trim()); - tbls = new ArrayList(); - for (Iterator i = names.iterator (); i.hasNext ();) { - tbls.add((String) i.next ()); + tbls = new ArrayList(); + for (Iterator i = names.iterator(); i.hasNext();) { + tbls.add((String) i.next()); } commited = commitTransaction(); } finally { - if(!commited) { + if (!commited) { rollbackTransaction(); } } return tbls; } - + private MTable getMTable(String db, String table) { MTable mtbl = null; boolean commited = false; @@ -545,14 +562,15 @@ openTransaction(); db = db.toLowerCase(); table = table.toLowerCase(); - Query query = pm.newQuery(MTable.class, "tableName == table && database.name == db"); - query.declareParameters("java.lang.String table, java.lang.String db"); - query.setUnique(true); - mtbl = (MTable) query.execute(table.trim(), db.trim()); + Query query = pm.newQuery(MTable.class, + "tableName == table && database.name == db"); + query.declareParameters("java.lang.String table, java.lang.String db"); + query.setUnique(true); + mtbl = (MTable) query.execute(table.trim(), db.trim()); pm.retrieve(mtbl); commited = commitTransaction(); } finally { - if(!commited) { + if (!commited) { rollbackTransaction(); } } @@ -560,78 +578,74 @@ } private Table convertToTable(MTable mtbl) throws MetaException { - if(mtbl == null) return null; - return new Table(mtbl.getTableName(), - mtbl.getDatabase().getName(), - mtbl.getOwner(), - mtbl.getCreateTime(), - mtbl.getLastAccessTime(), - mtbl.getRetention(), - convertToStorageDescriptor(mtbl.getSd()), - convertToFieldSchemas(mtbl.getPartitionKeys()), - mtbl.getParameters(), - mtbl.getViewOriginalText(), - mtbl.getViewExpandedText()); + if (mtbl == null) { + return null; + } + return new Table(mtbl.getTableName(), mtbl.getDatabase().getName(), mtbl + .getOwner(), mtbl.getCreateTime(), mtbl.getLastAccessTime(), mtbl + .getRetention(), convertToStorageDescriptor(mtbl.getSd()), + convertToFieldSchemas(mtbl.getPartitionKeys()), mtbl.getParameters(), + mtbl.getViewOriginalText(), mtbl.getViewExpandedText()); } - - private MTable convertToMTable(Table tbl) throws InvalidObjectException, MetaException { - if(tbl == null) return null; + + private MTable convertToMTable(Table tbl) throws InvalidObjectException, + MetaException { + if (tbl == null) { + return null; + } MDatabase mdb = null; try { - mdb = this.getMDatabase(tbl.getDbName()); + mdb = getMDatabase(tbl.getDbName()); } catch (NoSuchObjectException e) { LOG.error(StringUtils.stringifyException(e)); - throw new InvalidObjectException("Database " + tbl.getDbName() + " doesn't exsit."); + throw new InvalidObjectException("Database " + tbl.getDbName() + + " doesn't exsit."); } - return new MTable(tbl.getTableName().toLowerCase(), - mdb, - convertToMStorageDescriptor(tbl.getSd()), - tbl.getOwner(), - tbl.getCreateTime(), - tbl.getLastAccessTime(), - tbl.getRetention(), - convertToMFieldSchemas(tbl.getPartitionKeys()), - tbl.getParameters(), - tbl.getViewOriginalText(), - tbl.getViewExpandedText()); + return new MTable(tbl.getTableName().toLowerCase(), mdb, + convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), tbl + .getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(), + convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(), + tbl.getViewOriginalText(), tbl.getViewExpandedText()); } - + private List convertToMFieldSchemas(List keys) { List mkeys = null; - if(keys != null) { + if (keys != null) { mkeys = new ArrayList(keys.size()); for (FieldSchema part : keys) { - mkeys.add(new MFieldSchema(part.getName().toLowerCase(), part.getType(), part.getComment())); + mkeys.add(new MFieldSchema(part.getName().toLowerCase(), + part.getType(), part.getComment())); } } return mkeys; - } - + } + private List convertToFieldSchemas(List mkeys) { List keys = null; - if(mkeys != null) { + if (mkeys != null) { keys = new ArrayList(mkeys.size()); for (MFieldSchema part : mkeys) { - keys.add(new FieldSchema(part.getName(), part.getType(), part.getComment())); + keys.add(new FieldSchema(part.getName(), part.getType(), part + .getComment())); } } return keys; } - + private List convertToMOrders(List keys) { List mkeys = null; - if(keys != null) { + if (keys != null) { mkeys = new ArrayList(keys.size()); for (Order part : keys) { mkeys.add(new MOrder(part.getCol().toLowerCase(), part.getOrder())); } } return mkeys; - } - + } + private List convertToOrders(List mkeys) { List keys = null; - if(mkeys != null) { + if (mkeys != null) { keys = new ArrayList(); for (MOrder part : mkeys) { keys.add(new Order(part.getCol(), part.getOrder())); @@ -639,54 +653,51 @@ } return keys; } - + private SerDeInfo converToSerDeInfo(MSerDeInfo ms) throws MetaException { - if(ms == null) throw new MetaException("Invalid SerDeInfo object"); - return new SerDeInfo(ms.getName(), - ms.getSerializationLib(), - ms.getParameters()); + if (ms == null) { + throw new MetaException("Invalid SerDeInfo object"); + } + return new SerDeInfo(ms.getName(), ms.getSerializationLib(), ms + .getParameters()); } - + private MSerDeInfo converToMSerDeInfo(SerDeInfo ms) throws MetaException { - if(ms == null) throw new MetaException("Invalid SerDeInfo object"); - return new MSerDeInfo(ms.getName(), - ms.getSerializationLib(), - ms.getParameters()); - } - + if (ms == null) { + throw new MetaException("Invalid SerDeInfo object"); + } + return new MSerDeInfo(ms.getName(), ms.getSerializationLib(), ms + .getParameters()); + } + // MSD and SD should be same objects. Not sure how to make then same right now // MSerdeInfo *& SerdeInfo should be same as well - private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd) throws MetaException { - if(msd == null) return null; - return new StorageDescriptor( - convertToFieldSchemas(msd.getCols()), - msd.getLocation(), - msd.getInputFormat(), - msd.getOutputFormat(), - msd.isCompressed(), - msd.getNumBuckets(), - converToSerDeInfo(msd.getSerDeInfo()), - msd.getBucketCols(), - convertToOrders(msd.getSortCols()), - msd.getParameters()); + private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd) + throws MetaException { + if (msd == null) { + return null; + } + return new StorageDescriptor(convertToFieldSchemas(msd.getCols()), msd + .getLocation(), msd.getInputFormat(), msd.getOutputFormat(), msd + .isCompressed(), msd.getNumBuckets(), converToSerDeInfo(msd + .getSerDeInfo()), msd.getBucketCols(), convertToOrders(msd + .getSortCols()), msd.getParameters()); } - - private MStorageDescriptor convertToMStorageDescriptor(StorageDescriptor sd) throws MetaException { - if(sd == null) return null; - return new MStorageDescriptor( - convertToMFieldSchemas(sd.getCols()), - sd.getLocation(), - sd.getInputFormat(), - sd.getOutputFormat(), - sd.isCompressed(), - sd.getNumBuckets(), - converToMSerDeInfo(sd.getSerdeInfo()), - sd.getBucketCols(), - convertToMOrders(sd.getSortCols()), - sd.getParameters()); + + private MStorageDescriptor convertToMStorageDescriptor(StorageDescriptor sd) + throws MetaException { + if (sd == null) { + return null; + } + return new MStorageDescriptor(convertToMFieldSchemas(sd.getCols()), sd + .getLocation(), sd.getInputFormat(), sd.getOutputFormat(), sd + .isCompressed(), sd.getNumBuckets(), converToMSerDeInfo(sd + .getSerdeInfo()), sd.getBucketCols(), + convertToMOrders(sd.getSortCols()), sd.getParameters()); } - - public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { + + public boolean addPartition(Partition part) throws InvalidObjectException, + MetaException { boolean success = false; boolean commited = false; try { @@ -696,114 +707,120 @@ commited = commitTransaction(); success = true; } finally { - if(!commited) { + if (!commited) { rollbackTransaction(); } } return success; } - - public Partition getPartition(String dbName, String tableName, List part_vals) throws MetaException { - this.openTransaction(); - Partition part = convertToPart(this.getMPartition(dbName, tableName, part_vals)); - this.commitTransaction(); + + public Partition getPartition(String dbName, String tableName, + List part_vals) throws MetaException { + openTransaction(); + Partition part = convertToPart(getMPartition(dbName, tableName, part_vals)); + commitTransaction(); return part; } - - private MPartition getMPartition(String dbName, String tableName, List part_vals) throws MetaException { + + private MPartition getMPartition(String dbName, String tableName, + List part_vals) throws MetaException { MPartition mpart = null; boolean commited = false; try { openTransaction(); dbName = dbName.toLowerCase(); tableName = tableName.toLowerCase(); - MTable mtbl = this.getMTable(dbName, tableName); - if(mtbl == null) { + MTable mtbl = getMTable(dbName, tableName); + if (mtbl == null) { commited = commitTransaction(); return null; } - // Change the query to use part_vals instead of the name which is redundant - String name = Warehouse.makePartName(convertToFieldSchemas(mtbl.getPartitionKeys()), part_vals); - Query query = pm.newQuery(MPartition.class, "table.tableName == t1 && table.database.name == t2 && partitionName == t3"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - query.setUnique(true); - mpart = (MPartition) query.execute(tableName.trim(), dbName.trim(), name); + // Change the query to use part_vals instead of the name which is + // redundant + String name = Warehouse.makePartName(convertToFieldSchemas(mtbl + .getPartitionKeys()), part_vals); + Query query = pm + .newQuery(MPartition.class, + "table.tableName == t1 && table.database.name == t2 && partitionName == t3"); + query + .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + query.setUnique(true); + mpart = (MPartition) query.execute(tableName.trim(), dbName.trim(), name); pm.retrieve(mpart); commited = commitTransaction(); } finally { - if(!commited) { + if (!commited) { rollbackTransaction(); } } return mpart; } - - private MPartition convertToMPart(Partition part) throws InvalidObjectException, MetaException { - if(part == null) { + + private MPartition convertToMPart(Partition part) + throws InvalidObjectException, MetaException { + if (part == null) { return null; } MTable mt = getMTable(part.getDbName(), part.getTableName()); - if(mt == null) { - throw new InvalidObjectException("Partition doesn't have a valid table or database name"); + if (mt == null) { + throw new InvalidObjectException( + "Partition doesn't have a valid table or database name"); } - return new MPartition( - Warehouse.makePartName(convertToFieldSchemas(mt.getPartitionKeys()), part.getValues()), - mt, - part.getValues(), - part.getCreateTime(), - part.getLastAccessTime(), - convertToMStorageDescriptor(part.getSd()), - part.getParameters()); + return new MPartition(Warehouse.makePartName(convertToFieldSchemas(mt + .getPartitionKeys()), part.getValues()), mt, part.getValues(), part + .getCreateTime(), part.getLastAccessTime(), + convertToMStorageDescriptor(part.getSd()), part.getParameters()); } - + private Partition convertToPart(MPartition mpart) throws MetaException { - if(mpart == null) { + if (mpart == null) { return null; } - return new Partition( - mpart.getValues(), - mpart.getTable().getDatabase().getName(), - mpart.getTable().getTableName(), - mpart.getCreateTime(), - mpart.getLastAccessTime(), - convertToStorageDescriptor(mpart.getSd()), + return new Partition(mpart.getValues(), mpart.getTable().getDatabase() + .getName(), mpart.getTable().getTableName(), mpart.getCreateTime(), + mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd()), mpart.getParameters()); } - public boolean dropPartition(String dbName, String tableName, List part_vals) throws MetaException { + public boolean dropPartition(String dbName, String tableName, + List part_vals) throws MetaException { boolean success = false; try { openTransaction(); - MPartition part = this.getMPartition(dbName, tableName, part_vals); - if(part != null) + MPartition part = getMPartition(dbName, tableName, part_vals); + if (part != null) { pm.deletePersistent(part); + } success = commitTransaction(); } finally { - if(!success) { + if (!success) { rollbackTransaction(); } } return success; } - - public List getPartitions(String dbName, String tableName, int max) throws MetaException { - this.openTransaction(); - List parts = convertToParts(this.listMPartitions(dbName, tableName, max)); - this.commitTransaction(); + + public List getPartitions(String dbName, String tableName, int max) + throws MetaException { + openTransaction(); + List parts = convertToParts(listMPartitions(dbName, tableName, + max)); + commitTransaction(); return parts; } - - private List convertToParts(List mparts) throws MetaException { + + private List convertToParts(List mparts) + throws MetaException { List parts = new ArrayList(mparts.size()); for (MPartition mp : mparts) { - parts.add(this.convertToPart(mp)); + parts.add(convertToPart(mp)); } return parts; } - - //TODO:pc implement max - public List listPartitionNames(String dbName, String tableName, short max) throws MetaException { + // TODO:pc implement max + public List listPartitionNames(String dbName, String tableName, + short max) throws MetaException { List pns = new ArrayList(); boolean success = false; try { @@ -811,25 +828,28 @@ LOG.debug("Executing getPartitionNames"); dbName = dbName.toLowerCase(); tableName = tableName.toLowerCase(); - Query q = pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition where table.database.name == t1 && table.tableName == t2 order by partitionName asc"); + Query q = pm + .newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition where table.database.name == t1 && table.tableName == t2 order by partitionName asc"); q.declareParameters("java.lang.String t1, java.lang.String t2"); q.setResult("partitionName"); - Collection names = (Collection) q.execute(dbName.trim(), tableName.trim()); - pns = new ArrayList(); - for (Iterator i = names.iterator (); i.hasNext ();) { - pns.add((String) i.next ()); + Collection names = (Collection) q + .execute(dbName.trim(), tableName.trim()); + pns = new ArrayList(); + for (Iterator i = names.iterator(); i.hasNext();) { + pns.add((String) i.next()); } success = commitTransaction(); } finally { - if(!success) { + if (!success) { rollbackTransaction(); } } return pns; } - + // TODO:pc implement max - private List listMPartitions(String dbName, String tableName, int max) { + private List listMPartitions(String dbName, String tableName, + int max) { boolean success = false; List mparts = null; try { @@ -837,37 +857,40 @@ LOG.debug("Executing listMPartitions"); dbName = dbName.toLowerCase(); tableName = tableName.toLowerCase(); - Query query = pm.newQuery(MPartition.class, "table.tableName == t1 && table.database.name == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - mparts = (List) query.execute(tableName.trim(), dbName.trim()); + Query query = pm.newQuery(MPartition.class, + "table.tableName == t1 && table.database.name == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mparts = (List) query + .execute(tableName.trim(), dbName.trim()); LOG.debug("Done executing query for listMPartitions"); pm.retrieveAll(mparts); success = commitTransaction(); LOG.debug("Done retrieving all objects for listMPartitions"); } finally { - if(!success) { + if (!success) { rollbackTransaction(); } } return mparts; } - public void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException, MetaException { + public void alterTable(String dbname, String name, Table newTable) + throws InvalidObjectException, MetaException { boolean success = false; try { openTransaction(); name = name.toLowerCase(); dbname = dbname.toLowerCase(); MTable newt = convertToMTable(newTable); - if(newt == null) { + if (newt == null) { throw new InvalidObjectException("new table is invalid"); } - - MTable oldt = this.getMTable(dbname, name); - if(oldt == null) { + + MTable oldt = getMTable(dbname, name); + if (oldt == null) { throw new MetaException("table " + name + " doesn't exist"); } - + // For now only alter name, owner, paramters, cols, bucketcols are allowed oldt.setTableName(newt.getTableName().toLowerCase()); oldt.setParameters(newt.getParameters()); @@ -875,19 +898,19 @@ oldt.setSd(newt.getSd()); oldt.setDatabase(newt.getDatabase()); oldt.setRetention(newt.getRetention()); - oldt.setPartitionKeys(newt.getPartitionKeys()); - + oldt.setPartitionKeys(newt.getPartitionKeys()); + // commit the changes success = commitTransaction(); } finally { - if(!success) { + if (!success) { rollbackTransaction(); } } } public void alterPartition(String dbname, String name, Partition newPart) - throws InvalidObjectException, MetaException { + throws InvalidObjectException, MetaException { boolean success = false; try { openTransaction(); @@ -900,14 +923,16 @@ } oldp.setParameters(newPart.getParameters()); copyMSD(newp.getSd(), oldp.getSd()); - if (newp.getCreateTime() != oldp.getCreateTime()) + if (newp.getCreateTime() != oldp.getCreateTime()) { oldp.setCreateTime(newp.getCreateTime()); - if (newp.getLastAccessTime() != oldp.getLastAccessTime()) + } + if (newp.getLastAccessTime() != oldp.getLastAccessTime()) { oldp.setLastAccessTime(newp.getLastAccessTime()); + } // commit the changes success = commitTransaction(); } finally { - if(!success) { + if (!success) { rollbackTransaction(); } } @@ -922,7 +947,8 @@ oldSd.setOutputFormat(newSd.getOutputFormat()); oldSd.setNumBuckets(newSd.getNumBuckets()); oldSd.getSerDeInfo().setName(newSd.getSerDeInfo().getName()); - oldSd.getSerDeInfo().setSerializationLib(newSd.getSerDeInfo().getSerializationLib()); + oldSd.getSerDeInfo().setSerializationLib( + newSd.getSerDeInfo().getSerializationLib()); oldSd.getSerDeInfo().setParameters(newSd.getSerDeInfo().getParameters()); } } Index: metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (revision 901511) +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (working copy) @@ -62,24 +62,26 @@ /** * printStackTrace - * + * * Helper function to print an exception stack trace to the log and not stderr - * - * @param e the exception - * + * + * @param e + * the exception + * */ static public void printStackTrace(Exception e) { - for(StackTraceElement s: e.getStackTrace()) { + for (StackTraceElement s : e.getStackTrace()) { LOG.error(s); } } - public static Table createColumnsetSchema(String name, List columns, List partCols, Configuration conf) throws MetaException { + public static Table createColumnsetSchema(String name, List columns, + List partCols, Configuration conf) throws MetaException { if (columns == null) { throw new MetaException("columns not specified for table " + name); } - + Table tTable = new Table(); tTable.setTableName(name); tTable.setSd(new StorageDescriptor()); @@ -88,12 +90,14 @@ SerDeInfo serdeInfo = sd.getSerdeInfo(); serdeInfo.setSerializationLib(LazySimpleSerDe.class.getName()); serdeInfo.setParameters(new HashMap()); - serdeInfo.getParameters().put(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1"); - - List fields = new ArrayList(); + serdeInfo.getParameters().put( + org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1"); + + List fields = new ArrayList(); sd.setCols(fields); - for (String col: columns) { - FieldSchema field = new FieldSchema(col, org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME, "'default'"); + for (String col : columns) { + FieldSchema field = new FieldSchema(col, + org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME, "'default'"); fields.add(field); } @@ -101,57 +105,66 @@ for (String partCol : partCols) { FieldSchema part = new FieldSchema(); part.setName(partCol); - part.setType(org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME); // default partition key + part.setType(org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME); // default + // partition + // key tTable.getPartitionKeys().add(part); } sd.setNumBuckets(-1); return tTable; } - /** * recursiveDelete - * - * just recursively deletes a dir - you'd think Java would have something to do this?? - * - * @param f - the file/dir to delete - * @exception IOException propogate f.delete() exceptions - * + * + * just recursively deletes a dir - you'd think Java would have something to + * do this?? + * + * @param f + * - the file/dir to delete + * @exception IOException + * propogate f.delete() exceptions + * */ static public void recursiveDelete(File f) throws IOException { - if(f.isDirectory()) { - File fs [] = f.listFiles(); - for(File subf: fs) { + if (f.isDirectory()) { + File fs[] = f.listFiles(); + for (File subf : fs) { recursiveDelete(subf); } } - if(!f.delete()) { + if (!f.delete()) { throw new IOException("could not delete: " + f.getPath()); } } - /** * getDeserializer - * + * * Get the Deserializer for a table given its name and properties. - * - * @param conf hadoop config - * @param schema the properties to use to instantiate the deserializer + * + * @param conf + * hadoop config + * @param schema + * the properties to use to instantiate the deserializer * @return the Deserializer - * @exception MetaException if any problems instantiating the Deserializer - * - * todo - this should move somewhere into serde.jar - * + * @exception MetaException + * if any problems instantiating the Deserializer + * + * todo - this should move somewhere into serde.jar + * */ - static public Deserializer getDeserializer(Configuration conf, Properties schema) throws MetaException { - String lib = schema.getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB); + static public Deserializer getDeserializer(Configuration conf, + Properties schema) throws MetaException { + String lib = schema + .getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB); try { Deserializer deserializer = SerDeUtils.lookupDeserializer(lib); - ((Deserializer)deserializer).initialize(conf, schema); + (deserializer).initialize(conf, schema); return deserializer; } catch (Exception e) { - LOG.error("error in initSerDe: " + e.getClass().getName() + " " + e.getMessage()); + LOG.error("error in initSerDe: " + e.getClass().getName() + " " + + e.getMessage()); MetaStoreUtils.printStackTrace(e); throw new MetaException(e.getClass().getName() + " " + e.getMessage()); } @@ -159,18 +172,22 @@ /** * getDeserializer - * + * * Get the Deserializer for a table. - * - * @param conf - hadoop config - * @param table the table + * + * @param conf + * - hadoop config + * @param table + * the table * @return the Deserializer - * @exception MetaException if any problems instantiating the Deserializer - * - * todo - this should move somewhere into serde.jar - * + * @exception MetaException + * if any problems instantiating the Deserializer + * + * todo - this should move somewhere into serde.jar + * */ - static public Deserializer getDeserializer(Configuration conf, org.apache.hadoop.hive.metastore.api.Table table) throws MetaException { + static public Deserializer getDeserializer(Configuration conf, + org.apache.hadoop.hive.metastore.api.Table table) throws MetaException { String lib = table.getSd().getSerdeInfo().getSerializationLib(); try { Deserializer deserializer = SerDeUtils.lookupDeserializer(lib); @@ -179,26 +196,30 @@ } catch (RuntimeException e) { throw e; } catch (Exception e) { - LOG.error("error in initSerDe: " + e.getClass().getName() + " " + e.getMessage()); + LOG.error("error in initSerDe: " + e.getClass().getName() + " " + + e.getMessage()); MetaStoreUtils.printStackTrace(e); throw new MetaException(e.getClass().getName() + " " + e.getMessage()); } } - + /** * getDeserializer - * + * * Get the Deserializer for a partition. - * - * @param conf - hadoop config - * @param partition the partition + * + * @param conf + * - hadoop config + * @param partition + * the partition * @return the Deserializer - * @exception MetaException if any problems instantiating the Deserializer - * + * @exception MetaException + * if any problems instantiating the Deserializer + * */ - static public Deserializer getDeserializer(Configuration conf, - org.apache.hadoop.hive.metastore.api.Partition part, - org.apache.hadoop.hive.metastore.api.Table table) throws MetaException { + static public Deserializer getDeserializer(Configuration conf, + org.apache.hadoop.hive.metastore.api.Partition part, + org.apache.hadoop.hive.metastore.api.Table table) throws MetaException { String lib = part.getSd().getSerdeInfo().getSerializationLib(); try { Deserializer deserializer = SerDeUtils.lookupDeserializer(lib); @@ -207,79 +228,86 @@ } catch (RuntimeException e) { throw e; } catch (Exception e) { - LOG.error("error in initSerDe: " + e.getClass().getName() + " " + e.getMessage()); + LOG.error("error in initSerDe: " + e.getClass().getName() + " " + + e.getMessage()); MetaStoreUtils.printStackTrace(e); throw new MetaException(e.getClass().getName() + " " + e.getMessage()); } } - - static public void deleteWHDirectory(Path path,Configuration conf, boolean use_trash) throws MetaException { + static public void deleteWHDirectory(Path path, Configuration conf, + boolean use_trash) throws MetaException { + try { - if(!path.getFileSystem(conf).exists(path)) { - LOG.warn("drop data called on table/partition with no directory: " + path); + if (!path.getFileSystem(conf).exists(path)) { + LOG.warn("drop data called on table/partition with no directory: " + + path); return; } - if(use_trash) { + if (use_trash) { int count = 0; - Path newPath = new Path("/Trash/Current" + path.getParent().toUri().getPath()); + Path newPath = new Path("/Trash/Current" + + path.getParent().toUri().getPath()); - if(path.getFileSystem(conf).exists(newPath) == false) { + if (path.getFileSystem(conf).exists(newPath) == false) { path.getFileSystem(conf).mkdirs(newPath); } do { - newPath = new Path("/Trash/Current" + path.toUri().getPath() + "." + count); - if(path.getFileSystem(conf).exists(newPath)) { + newPath = new Path("/Trash/Current" + path.toUri().getPath() + "." + + count); + if (path.getFileSystem(conf).exists(newPath)) { count++; continue; } - if(path.getFileSystem(conf).rename(path, newPath)) { + if (path.getFileSystem(conf).rename(path, newPath)) { break; } - } while(++count < 50) ; - if(count >= 50) { + } while (++count < 50); + if (count >= 50) { throw new MetaException("Rename failed due to maxing out retries"); } } else { // directly delete it path.getFileSystem(conf).delete(path, true); } - } catch(IOException e) { + } catch (IOException e) { LOG.error("Got exception trying to delete data dir: " + e); throw new MetaException(e.getMessage()); - } catch(MetaException e) { + } catch (MetaException e) { LOG.error("Got exception trying to delete data dir: " + e); throw e; } } - /** * validateName - * - * Checks the name conforms to our standars which are: "[a-zA-z_0-9]+". - * checks this is just characters and numbers and _ - * - * @param name the name to validate + * + * Checks the name conforms to our standars which are: "[a-zA-z_0-9]+". checks + * this is just characters and numbers and _ + * + * @param name + * the name to validate * @return true or false depending on conformance - * @exception MetaException if it doesn't match the pattern. + * @exception MetaException + * if it doesn't match the pattern. */ static public boolean validateName(String name) { Pattern tpat = Pattern.compile("[\\w_]+"); Matcher m = tpat.matcher(name); - if(m.matches()) { + if (m.matches()) { return true; } return false; } - + static public boolean validateColNames(List cols) { for (FieldSchema fieldSchema : cols) { - if(!validateName(fieldSchema.getName())) + if (!validateName(fieldSchema.getName())) { return false; + } } return true; } @@ -289,78 +317,114 @@ } public static String getMapType(String k, String v) { - return "map<" + k +"," + v + ">"; + return "map<" + k + "," + v + ">"; } - public static Table getTable(Configuration conf, Properties schema) throws MetaException { + public static Table getTable(Configuration conf, Properties schema) + throws MetaException { Table t = new Table(); t.setSd(new StorageDescriptor()); - t.setTableName(schema.getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_NAME)); - t.getSd().setLocation(schema.getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_LOCATION)); - t.getSd().setInputFormat(schema.getProperty(org.apache.hadoop.hive.metastore.api.Constants.FILE_INPUT_FORMAT, - org.apache.hadoop.mapred.SequenceFileInputFormat.class.getName())); - t.getSd().setOutputFormat(schema.getProperty(org.apache.hadoop.hive.metastore.api.Constants.FILE_OUTPUT_FORMAT, - org.apache.hadoop.mapred.SequenceFileOutputFormat.class.getName())); + t + .setTableName(schema + .getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_NAME)); + t + .getSd() + .setLocation( + schema + .getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_LOCATION)); + t.getSd().setInputFormat( + schema.getProperty( + org.apache.hadoop.hive.metastore.api.Constants.FILE_INPUT_FORMAT, + org.apache.hadoop.mapred.SequenceFileInputFormat.class.getName())); + t.getSd().setOutputFormat( + schema.getProperty( + org.apache.hadoop.hive.metastore.api.Constants.FILE_OUTPUT_FORMAT, + org.apache.hadoop.mapred.SequenceFileOutputFormat.class.getName())); t.setPartitionKeys(new ArrayList()); t.setDbName(MetaStoreUtils.DEFAULT_DATABASE_NAME); - String part_cols_str = schema.getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_PARTITION_COLUMNS); + String part_cols_str = schema + .getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_PARTITION_COLUMNS); t.setPartitionKeys(new ArrayList()); if (part_cols_str != null && (part_cols_str.trim().length() != 0)) { - String [] part_keys = part_cols_str.trim().split("/"); - for (String key: part_keys) { + String[] part_keys = part_cols_str.trim().split("/"); + for (String key : part_keys) { FieldSchema part = new FieldSchema(); part.setName(key); - part.setType(org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME); // default partition key + part.setType(org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME); // default + // partition + // key t.getPartitionKeys().add(part); } } - t.getSd().setNumBuckets(Integer.parseInt(schema.getProperty(org.apache.hadoop.hive.metastore.api.Constants.BUCKET_COUNT, "-1"))); - String bucketFieldName = schema.getProperty(org.apache.hadoop.hive.metastore.api.Constants.BUCKET_FIELD_NAME); + t.getSd() + .setNumBuckets( + Integer.parseInt(schema.getProperty( + org.apache.hadoop.hive.metastore.api.Constants.BUCKET_COUNT, + "-1"))); + String bucketFieldName = schema + .getProperty(org.apache.hadoop.hive.metastore.api.Constants.BUCKET_FIELD_NAME); t.getSd().setBucketCols(new ArrayList(1)); if ((bucketFieldName != null) && (bucketFieldName.trim().length() != 0)) { t.getSd().setBucketCols(new ArrayList(1)); t.getSd().getBucketCols().add(bucketFieldName); } - + t.getSd().setSerdeInfo(new SerDeInfo()); t.getSd().getSerdeInfo().setParameters(new HashMap()); t.getSd().getSerdeInfo().setName(t.getTableName()); - t.getSd().getSerdeInfo().setSerializationLib(schema.getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB)); - setSerdeParam(t.getSd().getSerdeInfo(), schema, org.apache.hadoop.hive.serde.Constants.SERIALIZATION_CLASS); - setSerdeParam(t.getSd().getSerdeInfo(), schema, org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT); - if(org.apache.commons.lang.StringUtils.isNotBlank(schema.getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_CLASS))) { - setSerdeParam(t.getSd().getSerdeInfo(), schema, org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_SERDE); + t + .getSd() + .getSerdeInfo() + .setSerializationLib( + schema + .getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB)); + setSerdeParam(t.getSd().getSerdeInfo(), schema, + org.apache.hadoop.hive.serde.Constants.SERIALIZATION_CLASS); + setSerdeParam(t.getSd().getSerdeInfo(), schema, + org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT); + if (org.apache.commons.lang.StringUtils + .isNotBlank(schema + .getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_CLASS))) { + setSerdeParam(t.getSd().getSerdeInfo(), schema, + org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_SERDE); } // needed for MetadataTypedColumnSetSerDe and LazySimpleSerDe - setSerdeParam(t.getSd().getSerdeInfo(), schema, org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMNS); + setSerdeParam(t.getSd().getSerdeInfo(), schema, + org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMNS); // needed for LazySimpleSerDe - setSerdeParam(t.getSd().getSerdeInfo(), schema, org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMN_TYPES); + setSerdeParam(t.getSd().getSerdeInfo(), schema, + org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMN_TYPES); // needed for DynamicSerDe - setSerdeParam(t.getSd().getSerdeInfo(), schema, org.apache.hadoop.hive.serde.Constants.SERIALIZATION_DDL); - - String colstr = schema.getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMNS); - List fields = new ArrayList(); - if(colstr != null) { - String[] cols = colstr.split(","); + setSerdeParam(t.getSd().getSerdeInfo(), schema, + org.apache.hadoop.hive.serde.Constants.SERIALIZATION_DDL); + + String colstr = schema + .getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMNS); + List fields = new ArrayList(); + if (colstr != null) { + String[] cols = colstr.split(","); for (String colName : cols) { - FieldSchema col = new FieldSchema(colName, org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME, "'default'"); + FieldSchema col = new FieldSchema(colName, + org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME, + "'default'"); fields.add(col); } - } - - if(fields.size() == 0) { + } + + if (fields.size() == 0) { // get the fields from serde try { - fields = getFieldsFromDeserializer(t.getTableName(), getDeserializer(conf, schema)); + fields = getFieldsFromDeserializer(t.getTableName(), getDeserializer( + conf, schema)); } catch (SerDeException e) { LOG.error(StringUtils.stringifyException(e)); throw new MetaException("Invalid serde or schema. " + e.getMessage()); } } t.getSd().setCols(fields); - + t.setOwner(schema.getProperty("creator")); - + // remove all the used up parameters to find out the remaining parameters schema.remove(Constants.META_TABLE_NAME); schema.remove(Constants.META_TABLE_LOCATION); @@ -375,80 +439,105 @@ schema.remove(Constants.META_TABLE_SERDE); schema.remove(Constants.META_TABLE_COLUMNS); schema.remove(Constants.META_TABLE_COLUMN_TYPES); - + // add the remaining unknown parameters to the table's parameters t.setParameters(new HashMap()); - for(Entry e : schema.entrySet()) { - t.getParameters().put(e.getKey().toString(), e.getValue().toString()); + for (Entry e : schema.entrySet()) { + t.getParameters().put(e.getKey().toString(), e.getValue().toString()); } return t; } - public static void setSerdeParam(SerDeInfo sdi, Properties schema, String param) { + public static void setSerdeParam(SerDeInfo sdi, Properties schema, + String param) { String val = schema.getProperty(param); - if(org.apache.commons.lang.StringUtils.isNotBlank(val)) { + if (org.apache.commons.lang.StringUtils.isNotBlank(val)) { sdi.getParameters().put(param, val); } } - static HashMap typeToThriftTypeMap; + static HashMap typeToThriftTypeMap; static { typeToThriftTypeMap = new HashMap(); - typeToThriftTypeMap.put(org.apache.hadoop.hive.serde.Constants.BOOLEAN_TYPE_NAME, "bool"); - typeToThriftTypeMap.put(org.apache.hadoop.hive.serde.Constants.TINYINT_TYPE_NAME, "byte"); - typeToThriftTypeMap.put(org.apache.hadoop.hive.serde.Constants.SMALLINT_TYPE_NAME, "i16"); - typeToThriftTypeMap.put(org.apache.hadoop.hive.serde.Constants.INT_TYPE_NAME, "i32"); - typeToThriftTypeMap.put(org.apache.hadoop.hive.serde.Constants.BIGINT_TYPE_NAME, "i64"); - typeToThriftTypeMap.put(org.apache.hadoop.hive.serde.Constants.DOUBLE_TYPE_NAME, "double"); - typeToThriftTypeMap.put(org.apache.hadoop.hive.serde.Constants.FLOAT_TYPE_NAME, "float"); - typeToThriftTypeMap.put(org.apache.hadoop.hive.serde.Constants.LIST_TYPE_NAME, "list"); - typeToThriftTypeMap.put(org.apache.hadoop.hive.serde.Constants.MAP_TYPE_NAME, "map"); - typeToThriftTypeMap.put(org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME, "string"); - // These 3 types are not supported yet. - // We should define a complex type date in thrift that contains a single int member, and DynamicSerDe + typeToThriftTypeMap.put( + org.apache.hadoop.hive.serde.Constants.BOOLEAN_TYPE_NAME, "bool"); + typeToThriftTypeMap.put( + org.apache.hadoop.hive.serde.Constants.TINYINT_TYPE_NAME, "byte"); + typeToThriftTypeMap.put( + org.apache.hadoop.hive.serde.Constants.SMALLINT_TYPE_NAME, "i16"); + typeToThriftTypeMap.put( + org.apache.hadoop.hive.serde.Constants.INT_TYPE_NAME, "i32"); + typeToThriftTypeMap.put( + org.apache.hadoop.hive.serde.Constants.BIGINT_TYPE_NAME, "i64"); + typeToThriftTypeMap.put( + org.apache.hadoop.hive.serde.Constants.DOUBLE_TYPE_NAME, "double"); + typeToThriftTypeMap.put( + org.apache.hadoop.hive.serde.Constants.FLOAT_TYPE_NAME, "float"); + typeToThriftTypeMap.put( + org.apache.hadoop.hive.serde.Constants.LIST_TYPE_NAME, "list"); + typeToThriftTypeMap.put( + org.apache.hadoop.hive.serde.Constants.MAP_TYPE_NAME, "map"); + typeToThriftTypeMap.put( + org.apache.hadoop.hive.serde.Constants.STRING_TYPE_NAME, "string"); + // These 3 types are not supported yet. + // We should define a complex type date in thrift that contains a single int + // member, and DynamicSerDe // should convert it to date type at runtime. - typeToThriftTypeMap.put(org.apache.hadoop.hive.serde.Constants.DATE_TYPE_NAME, "date"); - typeToThriftTypeMap.put(org.apache.hadoop.hive.serde.Constants.DATETIME_TYPE_NAME, "datetime"); - typeToThriftTypeMap.put(org.apache.hadoop.hive.serde.Constants.TIMESTAMP_TYPE_NAME, "timestamp"); + typeToThriftTypeMap.put( + org.apache.hadoop.hive.serde.Constants.DATE_TYPE_NAME, "date"); + typeToThriftTypeMap.put( + org.apache.hadoop.hive.serde.Constants.DATETIME_TYPE_NAME, "datetime"); + typeToThriftTypeMap + .put(org.apache.hadoop.hive.serde.Constants.TIMESTAMP_TYPE_NAME, + "timestamp"); } - /** Convert type to ThriftType. We do that by tokenizing the type and convert each token. + + /** + * Convert type to ThriftType. We do that by tokenizing the type and convert + * each token. */ public static String typeToThriftType(String type) { StringBuilder thriftType = new StringBuilder(); int last = 0; boolean lastAlphaDigit = Character.isLetterOrDigit(type.charAt(last)); - for(int i=1; i<=type.length(); i++) { - if (i == type.length() || Character.isLetterOrDigit(type.charAt(i)) != lastAlphaDigit) { + for (int i = 1; i <= type.length(); i++) { + if (i == type.length() + || Character.isLetterOrDigit(type.charAt(i)) != lastAlphaDigit) { String token = type.substring(last, i); last = i; String thriftToken = typeToThriftTypeMap.get(token); - thriftType.append(thriftToken == null? token : thriftToken); + thriftType.append(thriftToken == null ? token : thriftToken); lastAlphaDigit = !lastAlphaDigit; - } + } } return thriftType.toString(); } - /** + + /** * Convert FieldSchemas to Thrift DDL + column names and column types * - * @param structName The name of the table - * @param fieldSchemas List of fields along with their schemas - * @return String containing "Thrift DDL#comma-separated-column-names#colon-separated-columntypes - * Example: "struct result { a string, map b}#a,b#string:map" + * @param structName + * The name of the table + * @param fieldSchemas + * List of fields along with their schemas + * @return String containing "Thrift + * DDL#comma-separated-column-names#colon-separated-columntypes + * Example: + * "struct result { a string, map b}#a,b#string:map" */ - public static String getFullDDLFromFieldSchema(String structName, List fieldSchemas) { + public static String getFullDDLFromFieldSchema(String structName, + List fieldSchemas) { StringBuilder ddl = new StringBuilder(); ddl.append(getDDLFromFieldSchema(structName, fieldSchemas)); ddl.append('#'); StringBuilder colnames = new StringBuilder(); StringBuilder coltypes = new StringBuilder(); boolean first = true; - for (FieldSchema col: fieldSchemas) { + for (FieldSchema col : fieldSchemas) { if (first) { first = false; - } - else { + } else { colnames.append(','); coltypes.append(':'); } @@ -460,16 +549,18 @@ ddl.append(coltypes); return ddl.toString(); } - - /** Convert FieldSchemas to Thrift DDL. + + /** + * Convert FieldSchemas to Thrift DDL. */ - public static String getDDLFromFieldSchema(String structName, List fieldSchemas) { + public static String getDDLFromFieldSchema(String structName, + List fieldSchemas) { StringBuilder ddl = new StringBuilder(); ddl.append("struct "); ddl.append(structName); ddl.append(" { "); boolean first = true; - for (FieldSchema col: fieldSchemas) { + for (FieldSchema col : fieldSchemas) { if (first) { first = false; } else { @@ -480,19 +571,24 @@ ddl.append(col.getName()); } ddl.append("}"); - + LOG.info("DDL: " + ddl); return ddl.toString(); } - - public static Properties getSchema(org.apache.hadoop.hive.metastore.api.Table table) { - return MetaStoreUtils.getSchema(table.getSd(),table.getSd(), table.getParameters(), table.getTableName(), table.getPartitionKeys()); + + public static Properties getSchema( + org.apache.hadoop.hive.metastore.api.Table table) { + return MetaStoreUtils.getSchema(table.getSd(), table.getSd(), table + .getParameters(), table.getTableName(), table.getPartitionKeys()); } - - public static Properties getSchema(org.apache.hadoop.hive.metastore.api.Partition part, org.apache.hadoop.hive.metastore.api.Table table) { - return MetaStoreUtils.getSchema(part.getSd(), table.getSd(), table.getParameters(), table.getTableName(), table.getPartitionKeys()); + + public static Properties getSchema( + org.apache.hadoop.hive.metastore.api.Partition part, + org.apache.hadoop.hive.metastore.api.Table table) { + return MetaStoreUtils.getSchema(part.getSd(), table.getSd(), table + .getParameters(), table.getTableName(), table.getPartitionKeys()); } - + public static Properties getSchema( org.apache.hadoop.hive.metastore.api.StorageDescriptor sd, org.apache.hadoop.hive.metastore.api.StorageDescriptor tblsd, @@ -500,31 +596,47 @@ List partitionKeys) { Properties schema = new Properties(); String inputFormat = sd.getInputFormat(); - if(inputFormat == null || inputFormat.length() == 0) { - inputFormat = org.apache.hadoop.mapred.SequenceFileInputFormat.class.getName(); + if (inputFormat == null || inputFormat.length() == 0) { + inputFormat = org.apache.hadoop.mapred.SequenceFileInputFormat.class + .getName(); } - schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.FILE_INPUT_FORMAT, inputFormat); + schema.setProperty( + org.apache.hadoop.hive.metastore.api.Constants.FILE_INPUT_FORMAT, + inputFormat); String outputFormat = sd.getOutputFormat(); - if(outputFormat == null || outputFormat.length() == 0) { - outputFormat = org.apache.hadoop.mapred.SequenceFileOutputFormat.class.getName(); + if (outputFormat == null || outputFormat.length() == 0) { + outputFormat = org.apache.hadoop.mapred.SequenceFileOutputFormat.class + .getName(); } - schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.FILE_OUTPUT_FORMAT, outputFormat); - schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_NAME, tableName); - if(sd.getLocation() != null) { - schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_LOCATION, sd.getLocation()); + schema.setProperty( + org.apache.hadoop.hive.metastore.api.Constants.FILE_OUTPUT_FORMAT, + outputFormat); + schema.setProperty( + org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_NAME, + tableName); + if (sd.getLocation() != null) { + schema.setProperty( + org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_LOCATION, + sd.getLocation()); } - schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.BUCKET_COUNT, Integer.toString(sd.getNumBuckets())); + schema.setProperty( + org.apache.hadoop.hive.metastore.api.Constants.BUCKET_COUNT, Integer + .toString(sd.getNumBuckets())); if (sd.getBucketCols().size() > 0) { - schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.BUCKET_FIELD_NAME, sd.getBucketCols().get(0)); + schema.setProperty( + org.apache.hadoop.hive.metastore.api.Constants.BUCKET_FIELD_NAME, sd + .getBucketCols().get(0)); } schema.putAll(sd.getSerdeInfo().getParameters()); - if(sd.getSerdeInfo().getSerializationLib() != null) { - schema.setProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB, sd.getSerdeInfo().getSerializationLib()); + if (sd.getSerdeInfo().getSerializationLib() != null) { + schema.setProperty( + org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB, sd + .getSerdeInfo().getSerializationLib()); } StringBuilder colNameBuf = new StringBuilder(); StringBuilder colTypeBuf = new StringBuilder(); boolean first = true; - for (FieldSchema col: tblsd.getCols()) { + for (FieldSchema col : tblsd.getCols()) { if (!first) { colNameBuf.append(","); colTypeBuf.append(":"); @@ -535,39 +647,49 @@ } String colNames = colNameBuf.toString(); String colTypes = colTypeBuf.toString(); - schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMNS, colNames); - schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMN_TYPES, colTypes); - schema.setProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_DDL, + schema.setProperty( + org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMNS, + colNames); + schema.setProperty( + org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMN_TYPES, + colTypes); + schema.setProperty( + org.apache.hadoop.hive.serde.Constants.SERIALIZATION_DDL, getDDLFromFieldSchema(tableName, sd.getCols())); - + String partString = ""; String partStringSep = ""; for (FieldSchema partKey : partitionKeys) { partString = partString.concat(partStringSep); partString = partString.concat(partKey.getName()); - if(partStringSep.length() == 0) { + if (partStringSep.length() == 0) { partStringSep = "/"; } } - if(partString.length() > 0) { - schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_PARTITION_COLUMNS, partString); + if (partString.length() > 0) { + schema + .setProperty( + org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_PARTITION_COLUMNS, + partString); } - - if (parameters != null) { - for(Entry e: parameters.entrySet()) { - schema.setProperty(e.getKey(), e.getValue()); - } - } - + + if (parameters != null) { + for (Entry e : parameters.entrySet()) { + schema.setProperty(e.getKey(), e.getValue()); + } + } + return schema; } - - /** Convert FieldSchemas to columnNames. + + /** + * Convert FieldSchemas to columnNames. */ - public static String getColumnNamesFromFieldSchema(List fieldSchemas) { + public static String getColumnNamesFromFieldSchema( + List fieldSchemas) { StringBuilder sb = new StringBuilder(); - for (int i=0; i0) { + for (int i = 0; i < fieldSchemas.size(); i++) { + if (i > 0) { sb.append(","); } sb.append(fieldSchemas.get(i).getName()); @@ -575,19 +697,21 @@ return sb.toString(); } - /** Convert FieldSchemas to columnTypes. + /** + * Convert FieldSchemas to columnTypes. */ - public static String getColumnTypesFromFieldSchema(List fieldSchemas) { + public static String getColumnTypesFromFieldSchema( + List fieldSchemas) { StringBuilder sb = new StringBuilder(); - for (int i=0; i0) { + for (int i = 0; i < fieldSchemas.size(); i++) { + if (i > 0) { sb.append(","); } sb.append(fieldSchemas.get(i).getType()); } return sb.toString(); } - + public static void makeDir(Path path, HiveConf hiveConf) throws MetaException { FileSystem fs; try { @@ -597,19 +721,23 @@ } } catch (IOException e) { throw new MetaException("Unable to : " + path); - } + } } /** * Catches exceptions that can't be handled and bundles them to MetaException + * * @param e * @throws MetaException */ static void logAndThrowMetaException(Exception e) throws MetaException { - LOG.error("Got exception: " + e.getClass().getName() + " " + e.getMessage()); + LOG + .error("Got exception: " + e.getClass().getName() + " " + + e.getMessage()); LOG.error(StringUtils.stringifyException(e)); - throw new MetaException("Got exception: " + e.getClass().getName() + " " + e.getMessage()); + throw new MetaException("Got exception: " + e.getClass().getName() + " " + + e.getMessage()); } /** @@ -619,60 +747,64 @@ * @throws SerDeException * @throws MetaException */ - public static List getFieldsFromDeserializer(String tableName, Deserializer deserializer) throws SerDeException, MetaException { + public static List getFieldsFromDeserializer(String tableName, + Deserializer deserializer) throws SerDeException, MetaException { ObjectInspector oi = deserializer.getObjectInspector(); - String [] names = tableName.split("\\."); - String last_name = names[names.length-1]; - for(int i = 1; i < names.length; i++) { + String[] names = tableName.split("\\."); + String last_name = names[names.length - 1]; + for (int i = 1; i < names.length; i++) { if (oi instanceof StructObjectInspector) { - StructObjectInspector soi = (StructObjectInspector)oi; + StructObjectInspector soi = (StructObjectInspector) oi; StructField sf = soi.getStructFieldRef(names[i]); if (sf == null) { throw new MetaException("Invalid Field " + names[i]); } else { oi = sf.getFieldObjectInspector(); } - } - else if (oi instanceof ListObjectInspector && names[i].equalsIgnoreCase("$elem$")) { - ListObjectInspector loi = (ListObjectInspector)oi; + } else if (oi instanceof ListObjectInspector + && names[i].equalsIgnoreCase("$elem$")) { + ListObjectInspector loi = (ListObjectInspector) oi; oi = loi.getListElementObjectInspector(); - } - else if (oi instanceof MapObjectInspector && names[i].equalsIgnoreCase("$key$")) { - MapObjectInspector moi = (MapObjectInspector)oi; + } else if (oi instanceof MapObjectInspector + && names[i].equalsIgnoreCase("$key$")) { + MapObjectInspector moi = (MapObjectInspector) oi; oi = moi.getMapKeyObjectInspector(); - } - else if (oi instanceof MapObjectInspector && names[i].equalsIgnoreCase("$value$")) { - MapObjectInspector moi = (MapObjectInspector)oi; + } else if (oi instanceof MapObjectInspector + && names[i].equalsIgnoreCase("$value$")) { + MapObjectInspector moi = (MapObjectInspector) oi; oi = moi.getMapValueObjectInspector(); - } - else { + } else { throw new MetaException("Unknown type for " + names[i]); } } - ArrayList str_fields = new ArrayList(); + ArrayList str_fields = new ArrayList(); // rules on how to recurse the ObjectInspector based on its type if (oi.getCategory() != Category.STRUCT) { - str_fields.add(new FieldSchema(last_name, oi.getTypeName(), "from deserializer")); + str_fields.add(new FieldSchema(last_name, oi.getTypeName(), + "from deserializer")); } else { - List fields = ((StructObjectInspector)oi).getAllStructFieldRefs(); - for(int i=0; i fields = ((StructObjectInspector) oi) + .getAllStructFieldRefs(); + for (int i = 0; i < fields.size(); i++) { String fieldName = fields.get(i).getFieldName(); - String fieldTypeName = fields.get(i).getFieldObjectInspector().getTypeName(); - str_fields.add(new FieldSchema(fieldName, fieldTypeName, "from deserializer")); + String fieldTypeName = fields.get(i).getFieldObjectInspector() + .getTypeName(); + str_fields.add(new FieldSchema(fieldName, fieldTypeName, + "from deserializer")); } } return str_fields; } /** - * Convert TypeInfo to FieldSchema. + * Convert TypeInfo to FieldSchema. */ - public static FieldSchema getFieldSchemaFromTypeInfo(String fieldName, TypeInfo typeInfo) { - return new FieldSchema( - fieldName, typeInfo.getTypeName(), "generated by TypeInfoUtils.getFieldSchemaFromTypeInfo" - ); + public static FieldSchema getFieldSchemaFromTypeInfo(String fieldName, + TypeInfo typeInfo) { + return new FieldSchema(fieldName, typeInfo.getTypeName(), + "generated by TypeInfoUtils.getFieldSchemaFromTypeInfo"); } - + } Index: metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java (revision 901511) +++ metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java (working copy) @@ -23,22 +23,30 @@ import org.apache.hadoop.hive.metastore.api.Table; /** - * Interface for Alter Table and Alter Partition code + * Interface for Alter Table and Alter Partition code */ public interface AlterHandler extends Configurable { /** * handles alter table - * @param msdb object to get metadata - * @param wh TODO - * @param dbname database of the table being altered - * @param name original name of the table being altered. same as - * newTable.tableName if alter op is not a rename. - * @param newTable new table object - * @throws InvalidOperationException thrown if the newTable object is invalid - * @throws MetaException thrown if there is any other erro + * + * @param msdb + * object to get metadata + * @param wh + * TODO + * @param dbname + * database of the table being altered + * @param name + * original name of the table being altered. same as + * newTable.tableName if alter op is not a rename. + * @param newTable + * new table object + * @throws InvalidOperationException + * thrown if the newTable object is invalid + * @throws MetaException + * thrown if there is any other erro */ - public abstract void alterTable(RawStore msdb, - Warehouse wh, String dbname, - String name, Table newTable) throws InvalidOperationException, MetaException; + public abstract void alterTable(RawStore msdb, Warehouse wh, String dbname, + String name, Table newTable) throws InvalidOperationException, + MetaException; } Index: metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (revision 901511) +++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (working copy) @@ -34,15 +34,18 @@ public abstract void shutdown(); /** - * Opens a new one or the one already created - * Every call of this function must have corresponding commit or rollback function call + * Opens a new one or the one already created Every call of this function must + * have corresponding commit or rollback function call + * * @return an active transaction */ public abstract boolean openTransaction(); /** - * if this is the commit of the first open call then an actual commit is called. + * if this is the commit of the first open call then an actual commit is + * called. + * * @return true or false */ public abstract boolean commitTransaction(); @@ -56,7 +59,8 @@ public abstract boolean createDatabase(String name) throws MetaException; - public abstract Database getDatabase(String name) throws NoSuchObjectException; + public abstract Database getDatabase(String name) + throws NoSuchObjectException; public abstract boolean dropDatabase(String dbname); @@ -68,30 +72,35 @@ public abstract boolean dropType(String typeName); - public abstract void createTable(Table tbl) throws InvalidObjectException, MetaException; + public abstract void createTable(Table tbl) throws InvalidObjectException, + MetaException; - public abstract boolean dropTable(String dbName, String tableName) throws MetaException; + public abstract boolean dropTable(String dbName, String tableName) + throws MetaException; - public abstract Table getTable(String dbName, String tableName) throws MetaException; + public abstract Table getTable(String dbName, String tableName) + throws MetaException; - public abstract boolean addPartition(Partition part) throws InvalidObjectException, MetaException; + public abstract boolean addPartition(Partition part) + throws InvalidObjectException, MetaException; - public abstract Partition getPartition(String dbName, String tableName, List part_vals) - throws MetaException; + public abstract Partition getPartition(String dbName, String tableName, + List part_vals) throws MetaException; - public abstract boolean dropPartition(String dbName, String tableName, List part_vals) - throws MetaException; + public abstract boolean dropPartition(String dbName, String tableName, + List part_vals) throws MetaException; - public abstract List getPartitions(String dbName, String tableName, int max) - throws MetaException; + public abstract List getPartitions(String dbName, + String tableName, int max) throws MetaException; public abstract void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException, MetaException; - public List getTables(String dbName, String pattern) throws MetaException; + public List getTables(String dbName, String pattern) + throws MetaException; - public abstract List listPartitionNames(String db_name, String tbl_name, short max_parts) - throws MetaException; + public abstract List listPartitionNames(String db_name, + String tbl_name, short max_parts) throws MetaException; public abstract void alterPartition(String db_name, String tbl_name, Partition new_part) throws InvalidObjectException, MetaException; Index: metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (revision 901511) +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (working copy) @@ -31,71 +31,99 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; - import org.apache.thrift.TException; /** - * TODO Unnecessary when the server sides for both dbstore and filestore are merged + * TODO Unnecessary when the server sides for both dbstore and filestore are + * merged */ public interface IMetaStoreClient { public void close(); - public List getTables(String dbName, String tablePattern) throws MetaException, UnknownTableException, TException, + public List getTables(String dbName, String tablePattern) + throws MetaException, UnknownTableException, TException, UnknownDBException; /** * Drop the table. - * @param tableName The table to drop - * @param deleteData Should we delete the underlying data - * @throws MetaException Could not drop table properly. - * @throws UnknownTableException The table wasn't found. - * @throws TException A thrift communication error occurred - * @throws NoSuchObjectException The table wasn't found. + * + * @param tableName + * The table to drop + * @param deleteData + * Should we delete the underlying data + * @throws MetaException + * Could not drop table properly. + * @throws UnknownTableException + * The table wasn't found. + * @throws TException + * A thrift communication error occurred + * @throws NoSuchObjectException + * The table wasn't found. */ - public void dropTable(String tableName, boolean deleteData) - throws MetaException, UnknownTableException, TException, NoSuchObjectException; + public void dropTable(String tableName, boolean deleteData) + throws MetaException, UnknownTableException, TException, + NoSuchObjectException; /** * Drop the table. - * @param dbname The database for this table - * @param tableName The table to drop - * @throws MetaException Could not drop table properly. - * @throws NoSuchObjectException The table wasn't found. - * @throws TException A thrift communication error occurred + * + * @param dbname + * The database for this table + * @param tableName + * The table to drop + * @throws MetaException + * Could not drop table properly. + * @throws NoSuchObjectException + * The table wasn't found. + * @throws TException + * A thrift communication error occurred * @throws ExistingDependentsException */ - public void dropTable(String dbname, String tableName, boolean deleteData, - boolean ignoreUknownTab) throws - MetaException, TException, NoSuchObjectException; + public void dropTable(String dbname, String tableName, boolean deleteData, + boolean ignoreUknownTab) throws MetaException, TException, + NoSuchObjectException; - //public void createTable(String tableName, Properties schema) throws MetaException, UnknownTableException, - // TException; + // public void createTable(String tableName, Properties schema) throws + // MetaException, UnknownTableException, + // TException; - public boolean tableExists(String tableName) throws MetaException, TException, UnknownDBException; + public boolean tableExists(String tableName) throws MetaException, + TException, UnknownDBException; /** - * Get a table object. - * @param tableName Name of the table to fetch. + * Get a table object. + * + * @param tableName + * Name of the table to fetch. * @return An object representing the table. - * @throws MetaException Could not fetch the table - * @throws TException A thrift communication error occurred - * @throws NoSuchObjectException In case the table wasn't found. + * @throws MetaException + * Could not fetch the table + * @throws TException + * A thrift communication error occurred + * @throws NoSuchObjectException + * In case the table wasn't found. */ - public Table getTable(String tableName) throws MetaException, - TException, NoSuchObjectException; - + public Table getTable(String tableName) throws MetaException, TException, + NoSuchObjectException; + /** - * Get a table object. - * @param dbName The database the table is located in. - * @param tableName Name of the table to fetch. + * Get a table object. + * + * @param dbName + * The database the table is located in. + * @param tableName + * Name of the table to fetch. * @return An object representing the table. - * @throws MetaException Could not fetch the table - * @throws TException A thrift communication error occurred - * @throws NoSuchObjectException In case the table wasn't found. + * @throws MetaException + * Could not fetch the table + * @throws TException + * A thrift communication error occurred + * @throws NoSuchObjectException + * In case the table wasn't found. */ - public Table getTable(String dbName, String tableName) - throws MetaException, TException, NoSuchObjectException; + public Table getTable(String dbName, String tableName) throws MetaException, + TException, NoSuchObjectException; /** * @param tableName @@ -106,23 +134,31 @@ * @throws AlreadyExistsException * @throws MetaException * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String, java.lang.String, java.util.List) + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String, + * java.lang.String, java.util.List) */ - public Partition appendPartition(String tableName, String dbName, List partVals) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException; - + public Partition appendPartition(String tableName, String dbName, + List partVals) throws InvalidObjectException, + AlreadyExistsException, MetaException, TException; + /** * Add a partition to the table. - * @param partition The partition to add + * + * @param partition + * The partition to add * @return The partition added - * @throws InvalidObjectException Could not find table to add to - * @throws AlreadyExistsException Partition already exists - * @throws MetaException Could not add partition - * @throws TException Thrift exception + * @throws InvalidObjectException + * Could not find table to add to + * @throws AlreadyExistsException + * Partition already exists + * @throws MetaException + * Could not add partition + * @throws TException + * Thrift exception */ - public Partition add_partition(Partition partition) - throws InvalidObjectException, AlreadyExistsException, - MetaException, TException; + public Partition add_partition(Partition partition) + throws InvalidObjectException, AlreadyExistsException, MetaException, + TException; /** * @param tblName @@ -131,11 +167,12 @@ * @return the partition object * @throws MetaException * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String, java.lang.String, java.util.List) + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String, + * java.lang.String, java.util.List) */ - public Partition getPartition(String tblName, String dbName, List partVals) - throws MetaException, TException ; - + public Partition getPartition(String tblName, String dbName, + List partVals) throws MetaException, TException; + /** * @param tbl_name * @param db_name @@ -145,11 +182,12 @@ * @throws MetaException * @throws TException */ - public List listPartitions(String db_name, String tbl_name, short max_parts) - throws NoSuchObjectException, MetaException, TException; + public List listPartitions(String db_name, String tbl_name, + short max_parts) throws NoSuchObjectException, MetaException, TException; - public List listPartitionNames(String db_name, String tbl_name, short max_parts) - throws MetaException, TException; + public List listPartitionNames(String db_name, String tbl_name, + short max_parts) throws MetaException, TException; + /** * @param tbl * @throws AlreadyExistsException @@ -159,40 +197,53 @@ * @throws TException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table) */ - public void createTable(Table tbl) throws AlreadyExistsException, InvalidObjectException, - MetaException, NoSuchObjectException, TException ; + public void createTable(Table tbl) throws AlreadyExistsException, + InvalidObjectException, MetaException, NoSuchObjectException, TException; - public void alter_table(String defaultDatabaseName, String tblName, Table table) throws InvalidOperationException, MetaException, TException; - public boolean createDatabase(String name, String location_uri) throws AlreadyExistsException, MetaException, TException; + public void alter_table(String defaultDatabaseName, String tblName, + Table table) throws InvalidOperationException, MetaException, TException; + + public boolean createDatabase(String name, String location_uri) + throws AlreadyExistsException, MetaException, TException; + public boolean dropDatabase(String name) throws MetaException, TException; /** * @param db_name * @param tbl_name * @param part_vals - * @param deleteData delete the underlying data or just delete the table in metadata + * @param deleteData + * delete the underlying data or just delete the table in metadata * @return true or false * @throws NoSuchObjectException * @throws MetaException * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String, java.lang.String, java.util.List, boolean) + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String, + * java.lang.String, java.util.List, boolean) */ - public boolean dropPartition(String db_name, String tbl_name, List part_vals, boolean deleteData) - throws NoSuchObjectException, MetaException, TException; + public boolean dropPartition(String db_name, String tbl_name, + List part_vals, boolean deleteData) throws NoSuchObjectException, + MetaException, TException; /** * updates a partition to new partition - * @param dbName database of the old partition - * @param tblName table name of the old partition - * @param newPart new partition - * @throws InvalidOperationException if the old partition does not exist - * @throws MetaException if error in updating metadata - * @throws TException if error in communicating with metastore server + * + * @param dbName + * database of the old partition + * @param tblName + * table name of the old partition + * @param newPart + * new partition + * @throws InvalidOperationException + * if the old partition does not exist + * @throws MetaException + * if error in updating metadata + * @throws TException + * if error in communicating with metastore server */ - public void alter_partition(String dbName, String tblName, - Partition newPart) throws InvalidOperationException, MetaException, - TException; - + public void alter_partition(String dbName, String tblName, Partition newPart) + throws InvalidOperationException, MetaException, TException; + /** * @param db * @param tableName @@ -200,10 +251,13 @@ * @throws UnknownDBException * @throws MetaException * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_fields(java.lang.String, java.lang.String) + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_fields(java.lang.String, + * java.lang.String) */ - public List getFields(String db, String tableName) - throws MetaException, TException, UnknownTableException, UnknownDBException; + public List getFields(String db, String tableName) + throws MetaException, TException, UnknownTableException, + UnknownDBException; + /** * @param db * @param tableName @@ -211,17 +265,21 @@ * @throws UnknownDBException * @throws MetaException * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_schema(java.lang.String, java.lang.String) + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_schema(java.lang.String, + * java.lang.String) */ - public List getSchema(String db, String tableName) - throws MetaException, TException, UnknownTableException, UnknownDBException; - + public List getSchema(String db, String tableName) + throws MetaException, TException, UnknownTableException, + UnknownDBException; + /** - * @param name name of the configuration property to get the value of - * @param defaultValue the value to return if property with the given name doesn't exist + * @param name + * name of the configuration property to get the value of + * @param defaultValue + * the value to return if property with the given name doesn't exist * @return * @throws TException - * @throws ConfigValSecurityException + * @throws ConfigValSecurityException */ public String getConfigValue(String name, String defaultValue) throws TException, ConfigValSecurityException; Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (revision 901511) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (working copy) @@ -40,7 +40,6 @@ import org.apache.hadoop.hive.metastore.api.Type; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; - import org.apache.thrift.TException; import org.apache.thrift.protocol.TBinaryProtocol; import org.apache.thrift.protocol.TProtocol; @@ -56,54 +55,57 @@ private TTransport transport = null; private boolean open = false; private URI metastoreUris[]; - private boolean standAloneClient = false; + private final boolean standAloneClient = false; // for thrift connects private int retries = 5; static final private Log LOG = LogFactory.getLog("hive.metastore"); - public HiveMetaStoreClient(HiveConf conf) throws MetaException { - if(conf == null) { + if (conf == null) { conf = new HiveConf(HiveMetaStoreClient.class); } - + boolean localMetaStore = conf.getBoolean("hive.metastore.local", false); - if(localMetaStore) { - // instantiate the metastore server handler directly instead of connecting through the network + if (localMetaStore) { + // instantiate the metastore server handler directly instead of connecting + // through the network client = new HiveMetaStore.HMSHandler("hive client", conf); - this.open = true; + open = true; return; } - + // get the number retries retries = conf.getInt("hive.metastore.connect.retries", 5); // user wants file store based configuration - if(conf.getVar(HiveConf.ConfVars.METASTOREURIS) != null) { - String metastoreUrisString []= conf.getVar(HiveConf.ConfVars.METASTOREURIS).split(","); - this.metastoreUris = new URI[metastoreUrisString.length]; + if (conf.getVar(HiveConf.ConfVars.METASTOREURIS) != null) { + String metastoreUrisString[] = conf.getVar( + HiveConf.ConfVars.METASTOREURIS).split(","); + metastoreUris = new URI[metastoreUrisString.length]; try { int i = 0; - for(String s: metastoreUrisString) { + for (String s : metastoreUrisString) { URI tmpUri = new URI(s); - if(tmpUri.getScheme() == null) { - throw new IllegalArgumentException("URI: "+s+" does not have a scheme"); + if (tmpUri.getScheme() == null) { + throw new IllegalArgumentException("URI: " + s + + " does not have a scheme"); } - this.metastoreUris[i++]= tmpUri; + metastoreUris[i++] = tmpUri; } } catch (IllegalArgumentException e) { throw (e); - } catch(Exception e) { + } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } - } else if(conf.getVar(HiveConf.ConfVars.METASTOREDIRECTORY) != null) { - this.metastoreUris = new URI[1]; + } else if (conf.getVar(HiveConf.ConfVars.METASTOREDIRECTORY) != null) { + metastoreUris = new URI[1]; try { - this.metastoreUris[0] = new URI(conf.getVar(HiveConf.ConfVars.METASTOREDIRECTORY)); - } catch(URISyntaxException e) { + metastoreUris[0] = new URI(conf + .getVar(HiveConf.ConfVars.METASTOREDIRECTORY)); + } catch (URISyntaxException e) { MetaStoreUtils.logAndThrowMetaException(e); } } else { @@ -111,9 +113,9 @@ throw new MetaException("MetaStoreURIs not found in conf file"); } // finally open the store - this.open(); + open(); } - + /** * @param dbname * @param tbl_name @@ -121,7 +123,8 @@ * @throws InvalidOperationException * @throws MetaException * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_table(java.lang.String, java.lang.String, org.apache.hadoop.hive.metastore.api.Table) + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_table(java.lang.String, + * java.lang.String, org.apache.hadoop.hive.metastore.api.Table) */ public void alter_table(String dbname, String tbl_name, Table new_tbl) throws InvalidOperationException, MetaException, TException { @@ -129,7 +132,7 @@ } private void open() throws MetaException { - for(URI store: this.metastoreUris) { + for (URI store : metastoreUris) { LOG.info("Trying to connect to metastore with URI " + store); try { openStore(store); @@ -141,63 +144,67 @@ break; } } - if(!open) { - throw new MetaException("Could not connect to meta store using any of the URIs provided"); + if (!open) { + throw new MetaException( + "Could not connect to meta store using any of the URIs provided"); } LOG.info("Connected to metastore."); } - + private void openStore(URI store) throws MetaException { open = false; transport = new TSocket(store.getHost(), store.getPort()); - ((TSocket)transport).setTimeout(20000); + ((TSocket) transport).setTimeout(20000); TProtocol protocol = new TBinaryProtocol(transport); client = new ThriftHiveMetastore.Client(protocol); - for(int i = 0; i < retries && !this.open; ++i) { + for (int i = 0; i < retries && !open; ++i) { try { transport.open(); open = true; - } catch(TTransportException e) { + } catch (TTransportException e) { LOG.warn("failed to connect to MetaStore, re-trying..."); try { Thread.sleep(1000); - } catch(InterruptedException ignore) { } + } catch (InterruptedException ignore) { + } } } - if(!open) { + if (!open) { throw new MetaException("could not connect to meta store"); } } - + public void close() { open = false; - if((transport != null) && transport.isOpen()) { + if ((transport != null) && transport.isOpen()) { transport.close(); } - if(standAloneClient) { + if (standAloneClient) { try { client.shutdown(); } catch (TException e) { - //TODO:pc cleanup the exceptions + // TODO:pc cleanup the exceptions LOG.error("Unable to shutdown local metastore client"); LOG.error(e.getStackTrace()); - //throw new RuntimeException(e.getMessage()); + // throw new RuntimeException(e.getMessage()); } } } - public void dropTable(String tableName, boolean deleteData) throws MetaException, NoSuchObjectException { + public void dropTable(String tableName, boolean deleteData) + throws MetaException, NoSuchObjectException { // assume that it is default database try { - this.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, deleteData, false); + this.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, + deleteData, false); } catch (NoSuchObjectException e) { throw e; } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } } - + /** * @param new_part * @return the added partition @@ -207,8 +214,9 @@ * @throws TException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partition(org.apache.hadoop.hive.metastore.api.Partition) */ - public Partition add_partition(Partition new_part) throws InvalidObjectException, - AlreadyExistsException, MetaException, TException { + public Partition add_partition(Partition new_part) + throws InvalidObjectException, AlreadyExistsException, MetaException, + TException { return client.add_partition(new_part); } @@ -221,10 +229,12 @@ * @throws AlreadyExistsException * @throws MetaException * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String, java.lang.String, java.util.List) + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String, + * java.lang.String, java.util.List) */ - public Partition appendPartition(String db_name, String table_name, List part_vals) - throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + public Partition appendPartition(String db_name, String table_name, + List part_vals) throws InvalidObjectException, + AlreadyExistsException, MetaException, TException { return client.append_partition(db_name, table_name, part_vals); } @@ -235,10 +245,11 @@ * @throws AlreadyExistsException * @throws MetaException * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(java.lang.String, java.lang.String) + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(java.lang.String, + * java.lang.String) */ - public boolean createDatabase(String name, String location_uri) throws AlreadyExistsException, - MetaException, TException { + public boolean createDatabase(String name, String location_uri) + throws AlreadyExistsException, MetaException, TException { return client.create_database(name, location_uri); } @@ -249,8 +260,8 @@ * @throws TException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table) */ - public void createTable(Table tbl) throws AlreadyExistsException, InvalidObjectException, - MetaException, NoSuchObjectException, TException { + public void createTable(Table tbl) throws AlreadyExistsException, + InvalidObjectException, MetaException, NoSuchObjectException, TException { client.create_table(tbl); } @@ -263,8 +274,8 @@ * @throws TException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_type(org.apache.hadoop.hive.metastore.api.Type) */ - public boolean createType(Type type) throws AlreadyExistsException, InvalidObjectException, - MetaException, TException { + public boolean createType(Type type) throws AlreadyExistsException, + InvalidObjectException, MetaException, TException { return client.create_type(type); } @@ -287,29 +298,34 @@ * @throws NoSuchObjectException * @throws MetaException * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String, java.lang.String, java.util.List, boolean) + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String, + * java.lang.String, java.util.List, boolean) */ - public boolean dropPartition(String db_name, String tbl_name, List part_vals) - throws NoSuchObjectException, MetaException, TException { - return dropPartition(db_name, tbl_name, part_vals, true); - } + public boolean dropPartition(String db_name, String tbl_name, + List part_vals) throws NoSuchObjectException, MetaException, + TException { + return dropPartition(db_name, tbl_name, part_vals, true); + } /** * @param db_name * @param tbl_name * @param part_vals - * @param deleteData delete the underlying data or just delete the table in metadata + * @param deleteData + * delete the underlying data or just delete the table in metadata * @return true or false * @throws NoSuchObjectException * @throws MetaException * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String, java.lang.String, java.util.List, boolean) + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String, + * java.lang.String, java.util.List, boolean) */ - public boolean dropPartition(String db_name, String tbl_name, List part_vals, boolean deleteData) - throws NoSuchObjectException, MetaException, TException { + public boolean dropPartition(String db_name, String tbl_name, + List part_vals, boolean deleteData) throws NoSuchObjectException, + MetaException, TException { return client.drop_partition(db_name, tbl_name, part_vals, deleteData); } - + /** * @param name * @param dbname @@ -317,29 +333,33 @@ * @throws ExistingDependentsException * @throws MetaException * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String, java.lang.String, boolean) + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String, + * java.lang.String, boolean) */ - public void dropTable(String dbname, String name) throws NoSuchObjectException, - MetaException, TException { - dropTable(dbname, name, true, true); - } + public void dropTable(String dbname, String name) + throws NoSuchObjectException, MetaException, TException { + dropTable(dbname, name, true, true); + } /** * @param dbname * @param name - * @param deleteData delete the underlying data or just delete the table in metadata + * @param deleteData + * delete the underlying data or just delete the table in metadata * @throws NoSuchObjectException * @throws ExistingDependentsException * @throws MetaException * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String, java.lang.String, boolean) + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String, + * java.lang.String, boolean) */ - public void dropTable(String dbname, String name, boolean deleteData, boolean ignoreUknownTab) throws - MetaException, TException, NoSuchObjectException { + public void dropTable(String dbname, String name, boolean deleteData, + boolean ignoreUknownTab) throws MetaException, TException, + NoSuchObjectException { try { client.drop_table(dbname, name, deleteData); } catch (NoSuchObjectException e) { - if(!ignoreUknownTab) { + if (!ignoreUknownTab) { throw e; } } @@ -363,7 +383,8 @@ * @throws TException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type_all(java.lang.String) */ - public Map getTypeAll(String name) throws MetaException, TException { + public Map getTypeAll(String name) throws MetaException, + TException { return client.get_type_all(name); } @@ -386,8 +407,8 @@ * @throws MetaException * @throws TException */ - public List listPartitions(String db_name, String tbl_name, short max_parts) - throws NoSuchObjectException, MetaException, TException { + public List listPartitions(String db_name, String tbl_name, + short max_parts) throws NoSuchObjectException, MetaException, TException { return client.get_partitions(db_name, tbl_name, max_parts); } @@ -399,8 +420,8 @@ * @throws TException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_database(java.lang.String) */ - public Database getDatabase(String name) throws NoSuchObjectException, MetaException, - TException { + public Database getDatabase(String name) throws NoSuchObjectException, + MetaException, TException { return client.get_database(name); } @@ -411,13 +432,14 @@ * @return the partition * @throws MetaException * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String, java.lang.String, java.util.List) + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String, + * java.lang.String, java.util.List) */ - public Partition getPartition(String db_name, String tbl_name, List part_vals) - throws MetaException, TException { + public Partition getPartition(String db_name, String tbl_name, + List part_vals) throws MetaException, TException { return client.get_partition(db_name, tbl_name, part_vals); } - + /** * @param name * @param dbname @@ -425,10 +447,12 @@ * @throws NoSuchObjectException * @throws MetaException * @throws TException - * @throws NoSuchObjectException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_table(java.lang.String, java.lang.String) + * @throws NoSuchObjectException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_table(java.lang.String, + * java.lang.String) */ - public Table getTable(String dbname, String name) throws MetaException, TException, NoSuchObjectException { + public Table getTable(String dbname, String name) throws MetaException, + TException, NoSuchObjectException { return client.get_table(dbname, name); } @@ -442,23 +466,24 @@ public Type getType(String name) throws MetaException, TException { return client.get_type(name); } - - public List getTables(String dbname, String tablePattern) throws MetaException { + + public List getTables(String dbname, String tablePattern) + throws MetaException { try { return client.get_tables(dbname, tablePattern); } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } - return null; + return null; } - + public List getTables(String tablePattern) throws MetaException { String dbname = MetaStoreUtils.DEFAULT_DATABASE_NAME; - return this.getTables(dbname, tablePattern); + return this.getTables(dbname, tablePattern); } - public boolean tableExists(String tableName) throws MetaException, TException, - UnknownDBException { + public boolean tableExists(String tableName) throws MetaException, + TException, UnknownDBException { try { client.get_table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); } catch (NoSuchObjectException e) { @@ -467,21 +492,21 @@ return true; } - public Table getTable(String tableName) throws MetaException, TException, NoSuchObjectException { + public Table getTable(String tableName) throws MetaException, TException, + NoSuchObjectException { return getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); } - public List listPartitionNames(String dbName, String tblName, short max) - throws MetaException, TException { + public List listPartitionNames(String dbName, String tblName, + short max) throws MetaException, TException { return client.get_partition_names(dbName, tblName, max); } - public void alter_partition(String dbName, String tblName, - Partition newPart) throws InvalidOperationException, MetaException, - TException { + public void alter_partition(String dbName, String tblName, Partition newPart) + throws InvalidOperationException, MetaException, TException { client.alter_partition(dbName, tblName, newPart); } - + /** * @param db * @param tableName @@ -489,10 +514,12 @@ * @throws UnknownDBException * @throws MetaException * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_fields(java.lang.String, java.lang.String) + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_fields(java.lang.String, + * java.lang.String) */ - public List getFields(String db, String tableName) - throws MetaException, TException, UnknownTableException, UnknownDBException { + public List getFields(String db, String tableName) + throws MetaException, TException, UnknownTableException, + UnknownDBException { return client.get_fields(db, tableName); } @@ -503,14 +530,16 @@ * @throws UnknownDBException * @throws MetaException * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_schema(java.lang.String, java.lang.String) + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_schema(java.lang.String, + * java.lang.String) */ - public List getSchema(String db, String tableName) - throws MetaException, TException, UnknownTableException, UnknownDBException { + public List getSchema(String db, String tableName) + throws MetaException, TException, UnknownTableException, + UnknownDBException { return client.get_schema(db, tableName); } - public String getConfigValue(String name, String defaultValue) + public String getConfigValue(String name, String defaultValue) throws TException, ConfigValSecurityException { return client.get_config_value(name, defaultValue); }