Index: src/test/org/apache/hcatalog/api/TestHCatClient.java =================================================================== --- src/test/org/apache/hcatalog/api/TestHCatClient.java (revision 0) +++ src/test/org/apache/hcatalog/api/TestHCatClient.java (working copy) @@ -0,0 +1,279 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hcatalog.api; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import junit.framework.TestCase; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.HiveMetaStore; +import org.apache.hadoop.hive.metastore.api.PartitionEventType; +import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat; +import org.apache.hadoop.hive.ql.io.RCFileInputFormat; +import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; +import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe; +import org.apache.hadoop.mapred.TextInputFormat; +import org.apache.hcatalog.NoExitSecurityManager; +import org.apache.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer; +import org.apache.hcatalog.common.HCatException; +import org.apache.hcatalog.data.schema.HCatFieldSchema; +import org.apache.hcatalog.data.schema.HCatFieldSchema.Type; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TestHCatClient extends TestCase { + private static final Logger LOG = LoggerFactory.getLogger(TestHCatClient.class); + private boolean isServerRunning = false; + private static final String msPort = "20101"; + private HiveConf hcatConf; + private Thread t; + private SecurityManager securityManager; + + private static class RunMS implements Runnable { + + @Override + public void run() { + try { + HiveMetaStore.main(new String[] { "-v", "-p", msPort }); + } catch (Throwable t) { + LOG.error("Exiting. Got exception from metastore: ", t); + } + } + } + + @Override + protected void tearDown() throws Exception { + LOG.info("Shutting down metastore."); + System.setSecurityManager(securityManager); + } + + @Override + protected void setUp() throws Exception { + + if (isServerRunning) { + return; + } + + t = new Thread(new RunMS()); + t.start(); + Thread.sleep(40000); + + isServerRunning = true; + + securityManager = System.getSecurityManager(); + System.setSecurityManager(new NoExitSecurityManager()); + hcatConf = new HiveConf(this.getClass()); + hcatConf.set("hive.metastore.local", "false"); + hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + + msPort); + hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTRETRIES, 3); + hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, + HCatSemanticAnalyzer.class.getName()); + hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); + hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, + "false"); + System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); + } + + public void testBasicDDLCommands() throws Exception { + String db = "testdb"; + String tableOne = "testTable1"; + String tableTwo = "testTable2"; + HCatClient client = HCatClient.create(new Configuration(hcatConf)); + client.dropDatabase(db, true, HCatClient.DROP_DB_MODE.CASCADE); + + HCatCreateDBDesc dbDesc = HCatCreateDBDesc.create(db).ifNotExists(false) + .build(); + client.createDatabase(dbDesc); + List dbNames = client.listDatabaseNamesByPattern("*"); + assertTrue(dbNames.contains("default")); + assertTrue(dbNames.contains(db)); + + HCatDatabase testDb = client.getDatabase(db); + assertTrue(testDb.getComment() == null); + assertTrue(testDb.getProperties().size() == 0); + String warehouseDir = System + .getProperty("hive.metastore.warehouse.dir"); + assertTrue(testDb.getLocation().equals( + "file:" + warehouseDir + "/" + db + ".db")); + ArrayList cols = new ArrayList(); + cols.add(new HCatFieldSchema("id", Type.INT, "id comment")); + cols.add(new HCatFieldSchema("value", Type.STRING, "value comment")); + HCatCreateTableDesc tableDesc = HCatCreateTableDesc + .create(db, tableOne, cols).fileFormat("rcfile").build(); + client.createTable(tableDesc); + HCatTable table1 = client.getTable(db, tableOne); + assertTrue(table1.getInputFileFormat().equalsIgnoreCase( + RCFileInputFormat.class.getName())); + assertTrue(table1.getOutputFileFormat().equalsIgnoreCase( + RCFileOutputFormat.class.getName())); + assertTrue(table1.getSerdeLib().equalsIgnoreCase( + ColumnarSerDe.class.getName())); + assertTrue(table1.getCols().equals(cols)); + // Since "ifexists" was not set to true, trying to create the same table + // again + // will result in an exception. + try { + client.createTable(tableDesc); + } catch (HCatException e) { + assertTrue(e.getMessage().contains( + "AlreadyExistsException while creating table.")); + } + + client.dropTable(db, tableOne, true); + HCatCreateTableDesc tableDesc2 = HCatCreateTableDesc.create(db, + tableTwo, cols).build(); + client.createTable(tableDesc2); + HCatTable table2 = client.getTable(db, tableTwo); + assertTrue(table2.getInputFileFormat().equalsIgnoreCase( + TextInputFormat.class.getName())); + assertTrue(table2.getOutputFileFormat().equalsIgnoreCase( + IgnoreKeyTextOutputFormat.class.getName())); + assertTrue(table2.getLocation().equalsIgnoreCase( + "file:" + warehouseDir + "/" + db + ".db/" + tableTwo)); + client.close(); + } + + public void testPartitionsHCatClientImpl() throws Exception { + HCatClient client = HCatClient.create(new Configuration(hcatConf)); + String dbName = "ptnDB"; + String tableName = "pageView"; + client.dropDatabase(dbName, true, HCatClient.DROP_DB_MODE.CASCADE); + + HCatCreateDBDesc dbDesc = HCatCreateDBDesc.create(dbName) + .ifNotExists(true).build(); + client.createDatabase(dbDesc); + ArrayList cols = new ArrayList(); + cols.add(new HCatFieldSchema("userid", Type.INT, "id columns")); + cols.add(new HCatFieldSchema("viewtime", Type.BIGINT, + "view time columns")); + cols.add(new HCatFieldSchema("pageurl", Type.STRING, "")); + cols.add(new HCatFieldSchema("ip", Type.STRING, + "IP Address of the User")); + + ArrayList ptnCols = new ArrayList(); + ptnCols.add(new HCatFieldSchema("dt", Type.STRING, "date column")); + ptnCols.add(new HCatFieldSchema("country", Type.STRING, + "country column")); + HCatCreateTableDesc tableDesc = HCatCreateTableDesc + .create(dbName, tableName, cols).fileFormat("sequencefile") + .partCols(ptnCols).build(); + client.createTable(tableDesc); + + Map firstPtn = new HashMap(); + firstPtn.put("dt", "04/30/2012"); + firstPtn.put("country", "usa"); + HCatAddPartitionDesc addPtn = HCatAddPartitionDesc.create(dbName, + tableName, null, firstPtn).build(); + client.addPartition(addPtn); + + Map secondPtn = new HashMap(); + secondPtn.put("dt", "04/12/2012"); + secondPtn.put("country", "brazil"); + HCatAddPartitionDesc addPtn2 = HCatAddPartitionDesc.create(dbName, + tableName, null, secondPtn).build(); + client.addPartition(addPtn2); + + Map thirdPtn = new HashMap(); + thirdPtn.put("dt", "04/13/2012"); + thirdPtn.put("country", "argetina"); + HCatAddPartitionDesc addPtn3 = HCatAddPartitionDesc.create(dbName, + tableName, null, thirdPtn).build(); + client.addPartition(addPtn3); + + List ptnList = client.listPartitionsByFilter(dbName, + tableName, null); + assertTrue(ptnList.size() == 3); + + List ptnListTwo = client.listPartitionsByFilter(dbName, + tableName, "country = \"argetina\""); + assertTrue(ptnListTwo.size() == 1); + + client.markPartitionForEvent(dbName, tableName, thirdPtn, + PartitionEventType.LOAD_DONE); + boolean isMarked = client.isPartitionMarkedForEvent(dbName, tableName, + thirdPtn, PartitionEventType.LOAD_DONE); + assertTrue(isMarked); + client.close(); + } + + public void testDatabaseLocation() throws Exception{ + HCatClient client = HCatClient.create(new Configuration(hcatConf)); + String dbName = "locationDB"; + client.dropDatabase(dbName, true, HCatClient.DROP_DB_MODE.CASCADE); + + HCatCreateDBDesc dbDesc = HCatCreateDBDesc.create(dbName) + .ifNotExists(true).location("/tmp/"+dbName).build(); + client.createDatabase(dbDesc); + HCatDatabase newDB = client.getDatabase(dbName); + assertTrue(newDB.getLocation().equalsIgnoreCase("file:/tmp/" + dbName)); + client.close(); + } + + public void testCreateTableLike() throws Exception { + HCatClient client = HCatClient.create(new Configuration(hcatConf)); + String tableName = "tableone"; + String cloneTable = "tabletwo"; + client.dropTable(null, tableName, true); + client.dropTable(null, cloneTable, true); + + ArrayList cols = new ArrayList(); + cols.add(new HCatFieldSchema("id", Type.INT, "id columns")); + cols.add(new HCatFieldSchema("value", Type.STRING, "id columns")); + HCatCreateTableDesc tableDesc = HCatCreateTableDesc + .create(null, tableName, cols).fileFormat("rcfile").build(); + client.createTable(tableDesc); + // create a new table similar to previous one. + client.createTableLike(null, tableName, cloneTable, true, false, null); + List tables = client.listTableNamesByPattern(null, "table*"); + assertTrue(tables.size() ==2); + client.close(); + } + + public void testRenameTable() throws Exception { + HCatClient client = HCatClient.create(new Configuration(hcatConf)); + String tableName = "temptable"; + String newName = "mytable"; + client.dropTable(null, tableName, true); + client.dropTable(null, newName, true); + ArrayList cols = new ArrayList(); + cols.add(new HCatFieldSchema("id", Type.INT, "id columns")); + cols.add(new HCatFieldSchema("value", Type.STRING, "id columns")); + HCatCreateTableDesc tableDesc = HCatCreateTableDesc + .create(null, tableName, cols).fileFormat("rcfile").build(); + client.createTable(tableDesc); + client.renameTable(null, tableName,newName); + try { + client.getTable(null, tableName); + } catch(HCatException exp){ + assertTrue(exp.getMessage().contains("NoSuchObjectException while fetching table")); + } + HCatTable newTable = client.getTable(null, newName); + assertTrue(newTable != null); + assertTrue(newTable.getTableName().equals(newName)); + client.close(); + } + +} Index: src/java/org/apache/hcatalog/common/HCatConstants.java =================================================================== --- src/java/org/apache/hcatalog/common/HCatConstants.java (revision 1345356) +++ src/java/org/apache/hcatalog/common/HCatConstants.java (working copy) @@ -108,5 +108,5 @@ // Hadoop Conf Var Names public static final String CONF_MAPREDUCE_JOB_CREDENTIALS_BINARY = "mapreduce.job.credentials.binary"; - + public static final String HCAT_CLIENT_IMPL_CLASS = "hcat.client.impl.class"; } Index: src/java/org/apache/hcatalog/data/schema/HCatSchemaUtils.java =================================================================== --- src/java/org/apache/hcatalog/data/schema/HCatSchemaUtils.java (revision 1345356) +++ src/java/org/apache/hcatalog/data/schema/HCatSchemaUtils.java (working copy) @@ -110,24 +110,24 @@ public static HCatFieldSchema getHCatFieldSchema(FieldSchema fs) throws HCatException { String fieldName = fs.getName(); TypeInfo baseTypeInfo = TypeInfoUtils.getTypeInfoFromTypeString(fs.getType()); - return getHCatFieldSchema(fieldName, baseTypeInfo); + return getHCatFieldSchema(fieldName, baseTypeInfo, fs.getComment()); } - private static HCatFieldSchema getHCatFieldSchema(String fieldName, TypeInfo fieldTypeInfo) throws HCatException { + private static HCatFieldSchema getHCatFieldSchema(String fieldName, TypeInfo fieldTypeInfo, String comment) throws HCatException { Category typeCategory = fieldTypeInfo.getCategory(); HCatFieldSchema hCatFieldSchema; if (Category.PRIMITIVE == typeCategory){ - hCatFieldSchema = new HCatFieldSchema(fieldName,getPrimitiveHType(fieldTypeInfo),null); + hCatFieldSchema = new HCatFieldSchema(fieldName,getPrimitiveHType(fieldTypeInfo),comment); } else if (Category.STRUCT == typeCategory) { HCatSchema subSchema = constructHCatSchema((StructTypeInfo)fieldTypeInfo); - hCatFieldSchema = new HCatFieldSchema(fieldName,HCatFieldSchema.Type.STRUCT,subSchema,null); + hCatFieldSchema = new HCatFieldSchema(fieldName,HCatFieldSchema.Type.STRUCT,subSchema,comment); } else if (Category.LIST == typeCategory) { HCatSchema subSchema = getHCatSchema(((ListTypeInfo)fieldTypeInfo).getListElementTypeInfo()); - hCatFieldSchema = new HCatFieldSchema(fieldName,HCatFieldSchema.Type.ARRAY,subSchema,null); + hCatFieldSchema = new HCatFieldSchema(fieldName,HCatFieldSchema.Type.ARRAY,subSchema,comment); } else if (Category.MAP == typeCategory) { HCatFieldSchema.Type mapKeyType = getPrimitiveHType(((MapTypeInfo)fieldTypeInfo).getMapKeyTypeInfo()); HCatSchema subSchema = getHCatSchema(((MapTypeInfo)fieldTypeInfo).getMapValueTypeInfo()); - hCatFieldSchema = new HCatFieldSchema(fieldName,HCatFieldSchema.Type.MAP,mapKeyType,subSchema,null); + hCatFieldSchema = new HCatFieldSchema(fieldName,HCatFieldSchema.Type.MAP,mapKeyType,subSchema,comment); } else{ throw new TypeNotPresentException(fieldTypeInfo.getTypeName(),null); } @@ -174,7 +174,7 @@ private static HCatSchema constructHCatSchema(StructTypeInfo stypeInfo) throws HCatException { CollectionBuilder builder = getStructSchemaBuilder(); for (String fieldName : ((StructTypeInfo)stypeInfo).getAllStructFieldNames()){ - builder.addField(getHCatFieldSchema(fieldName,((StructTypeInfo)stypeInfo).getStructFieldTypeInfo(fieldName))); + builder.addField(getHCatFieldSchema(fieldName,((StructTypeInfo)stypeInfo).getStructFieldTypeInfo(fieldName),null)); } return builder.build(); } @@ -189,7 +189,7 @@ hCatSchema = getStructSchemaBuilder().addField(new HCatFieldSchema(null,Type.STRUCT,subSchema,null)).build(); } else if (Category.LIST == typeCategory) { CollectionBuilder builder = getListSchemaBuilder(); - builder.addField(getHCatFieldSchema(null,((ListTypeInfo)typeInfo).getListElementTypeInfo())); + builder.addField(getHCatFieldSchema(null,((ListTypeInfo)typeInfo).getListElementTypeInfo(), null)); hCatSchema = new HCatSchema(Arrays.asList(new HCatFieldSchema("",Type.ARRAY, builder.build(), ""))); } else if (Category.MAP == typeCategory) { HCatFieldSchema.Type mapKeyType = getPrimitiveHType(((MapTypeInfo)typeInfo).getMapKeyTypeInfo()); Index: src/java/org/apache/hcatalog/data/schema/HCatFieldSchema.java =================================================================== --- src/java/org/apache/hcatalog/data/schema/HCatFieldSchema.java (revision 1345356) +++ src/java/org/apache/hcatalog/data/schema/HCatFieldSchema.java (working copy) @@ -241,4 +241,28 @@ } return (typeString = sb.toString().toLowerCase()); } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof HCatFieldSchema) + return this.equals((HCatFieldSchema) that); + return false; + } + + public boolean equals(HCatFieldSchema that) { + if (that == null) + return false; + if (that.getName().equals(this.fieldName) == false) { + return false; + } + if (that.getCategory().equals(this.category) == false) { + return false; + } + if (that.getTypeString().equals(this.getTypeString()) == false) { + return false; + } + return true; + } } Index: src/java/org/apache/hcatalog/api/HCatDatabase.java =================================================================== --- src/java/org/apache/hcatalog/api/HCatDatabase.java (revision 0) +++ src/java/org/apache/hcatalog/api/HCatDatabase.java (working copy) @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hcatalog.api; + +import java.util.Map; + +import org.apache.hadoop.hive.metastore.api.Database; + + +/** + * HCatDatabase is wrapper class around org.apache.hadoop.hive.metastore.api.Database. + */ +public class HCatDatabase { + + private String dbName; + private String dbLocation; + private String comment; + private Map props; + + HCatDatabase(Database db) { + this.dbName = db.getName(); + this.props = db.getParameters(); + this.dbLocation = db.getLocationUri(); + this.comment = db.getDescription(); + } + + /** + * Gets the database name. + * + * @return the database name + */ + public String getName(){ + return dbName; + } + + /** + * Gets the dB location. + * + * @return the dB location + */ + public String getLocation(){ + return dbLocation; + } + + /** + * Gets the comment. + * + * @return the comment + */ + public String getComment(){ + return comment; + } + + /** + * Gets the dB properties. + * + * @return the dB properties + */ + public Map getProperties(){ + return props; + } + + @Override + public String toString() { + return "HCatDatabase [" + + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null") + + (dbLocation != null ? "dbLocation=" + dbLocation + ", " : "dbLocation=null") + + (comment != null ? "comment=" + comment + ", " : "comment=null") + + (props != null ? "props=" + props : "props=null") + "]"; + } + +} Index: src/java/org/apache/hcatalog/api/HCatCreateDBDesc.java =================================================================== --- src/java/org/apache/hcatalog/api/HCatCreateDBDesc.java (revision 0) +++ src/java/org/apache/hcatalog/api/HCatCreateDBDesc.java (working copy) @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hcatalog.api; + +import java.util.Map; + +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hcatalog.common.HCatException; + +/** + * The Class HCatCreateDBDesc for defining database attributes. + */ +public class HCatCreateDBDesc { + + private String dbName; + private String locationUri; + private String comment; + private Map dbProperties; + private boolean ifNotExits = false; + + /** + * Gets the database properties. + * + * @return the database properties + */ + public Map getDatabaseProperties() { + return this.dbProperties; + } + + /** + * Gets the if not exists. + * + * @return the if not exists + */ + public boolean getIfNotExists(){ + return this.ifNotExits; + } + + /** + * Gets the comments. + * + * @return the comments + */ + public String getComments() { + return this.comment; + } + + /** + * Gets the location. + * + * @return the location + */ + public String getLocation() { + return this.locationUri; + } + + /** + * Gets the database name. + * + * @return the database name + */ + public String getDatabaseName() { + return this.dbName; + } + + private HCatCreateDBDesc(String dbName){ + this.dbName = dbName; + } + + @Override + public String toString() { + return "HCatCreateDBDesc [" + + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null") + + (locationUri != null ? "location=" + locationUri + ", " + : "location=null") + + (comment != null ? "comment=" + comment + ", " : "comment=null") + + (dbProperties != null ? "dbProperties=" + dbProperties + ", " + : "dbProperties=null") + "ifNotExits=" + ifNotExits + "]"; + } + + /** + * Creates the builder for defining attributes. + * + * @param dbName the db name + * @return the builder + */ + public static Builder create(String dbName){ + return new Builder(dbName); + } + + Database toHiveDb(){ + Database hiveDB = new Database(); + hiveDB.setDescription(this.comment); + hiveDB.setLocationUri(this.locationUri); + hiveDB.setName(this.dbName); + hiveDB.setParameters(this.dbProperties); + return hiveDB; + } + + public static class Builder { + + private String innerLoc; + private String innerComment; + private Map innerDBProps; + private String dbName; + private boolean ifNotExists = false; + + private Builder(String dbName){ + this.dbName = dbName; + } + + /** + * Location. + * + * @param value the location of the database. + * @return the builder + */ + public Builder location(String value){ + this.innerLoc = value; + return this; + } + + /** + * Comment. + * + * @param value comments. + * @return the builder + */ + public Builder comment(String value){ + this.innerComment = value; + return this; + } + + /** + * If not exists. + * @param ifNotExists If set to true, hive will not throw exception, if a + * database with the same name already exists. + * @return the builder + */ + public Builder ifNotExists(boolean ifNotExists){ + this.ifNotExists = ifNotExists; + return this; + } + + /** + * Database properties. + * + * @param dbProps the database properties + * @return the builder + */ + public Builder databaseProperties(Map dbProps) { + this.innerDBProps = dbProps; + return this; + } + + + /** + * Builds the create database descriptor. + * + * @return An instance of HCatCreateDBDesc + * @throws HCatException + */ + public HCatCreateDBDesc build() throws HCatException { + if(this.dbName == null){ + throw new HCatException("Database name cannot be null."); + } + HCatCreateDBDesc desc = new HCatCreateDBDesc(this.dbName); + desc.comment = this.innerComment; + desc.locationUri = this.innerLoc; + desc.dbProperties = this.innerDBProps; + desc.ifNotExits = this.ifNotExists; + return desc; + + } + + } + +} Index: src/java/org/apache/hcatalog/api/HCatClientHMSImpl.java =================================================================== --- src/java/org/apache/hcatalog/api/HCatClientHMSImpl.java (revision 0) +++ src/java/org/apache/hcatalog/api/HCatClientHMSImpl.java (working copy) @@ -0,0 +1,590 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hcatalog.api; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionEventType; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; +import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hcatalog.common.HCatException; +import org.apache.hcatalog.common.HCatUtil; +import org.apache.thrift.TException; + +/** + * The HCatClientHMSImpl is the Hive Metastore client based implementation of HCatClient. + */ +public class HCatClientHMSImpl extends HCatClient { + + private HiveMetaStoreClient hmsClient; + private Configuration config; + private HiveConf hiveConfig; + + @Override + public List listDatabaseNamesByPattern(String pattern) + throws HCatException { + List dbNames = null; + try { + dbNames = hmsClient.getDatabases(pattern); + } catch (MetaException exp) { + throw new HCatException("MetaException while listing db names", exp); + } + return dbNames; + } + + @Override + public HCatDatabase getDatabase(String dbName) throws HCatException { + HCatDatabase db = null; + try { + Database hiveDB = hmsClient.getDatabase(checkDB(dbName)); + if (hiveDB != null) { + db = new HCatDatabase(hiveDB); + } + } catch (NoSuchObjectException exp) { + throw new HCatException( + "NoSuchObjectException while fetching database", exp); + } catch (MetaException exp) { + throw new HCatException("MetaException while fetching database", + exp); + } catch (TException exp) { + throw new HCatException("TException while fetching database", exp); + } + return db; + } + + @Override + public void createDatabase(HCatCreateDBDesc dbInfo) throws HCatException { + + try { + hmsClient.createDatabase(dbInfo.toHiveDb()); + } catch (AlreadyExistsException exp) { + if(!dbInfo.getIfNotExists()){ + throw new HCatException( + "AlreadyExistsException while creating database", exp); + } + } catch (InvalidObjectException exp) { + throw new HCatException( + "InvalidObjectException while creating database", exp); + } catch (MetaException exp) { + throw new HCatException("MetaException while creating database", + exp); + } catch (TException exp) { + throw new HCatException("TException while creating database", exp); + } + } + + @Override + public void dropDatabase(String dbName, boolean ifExists, DROP_DB_MODE mode) + throws HCatException { + boolean isCascade; + if (mode.toString().equalsIgnoreCase("cascade")) { + isCascade = true; + } else { + isCascade = false; + } + try { + hmsClient.dropDatabase(checkDB(dbName), true, ifExists, isCascade); + } catch (NoSuchObjectException e) { + if(!ifExists){ + throw new HCatException("NoSuchObjectException while dropping db.", + e);} + } catch (InvalidOperationException e) { + throw new HCatException( + "InvalidOperationException while dropping db.", e); + } catch (MetaException e) { + throw new HCatException("MetaException while dropping db.", e); + } catch (TException e) { + throw new HCatException("TException while dropping db.", e); + } + } + + @Override + public List listTableNamesByPattern(String dbName, + String tablePattern) throws HCatException { + List tableNames = null; + try { + tableNames = hmsClient.getTables(checkDB(dbName), tablePattern); + } catch (MetaException e) { + throw new HCatException( + "MetaException while fetching table names.", e); + } + return tableNames; + } + + @Override + public HCatTable getTable(String dbName, String tableName) + throws HCatException { + HCatTable table = null; + try { + Table hiveTable = hmsClient.getTable(checkDB(dbName), tableName); + if (hiveTable != null) { + table = new HCatTable(hiveTable); + } + } catch (MetaException e) { + throw new HCatException("MetaException while fetching table.", e); + } catch (TException e) { + throw new HCatException("TException while fetching table.", e); + } catch (NoSuchObjectException e) { + throw new HCatException( + "NoSuchObjectException while fetching table.", e); + } + return table; + } + + @Override + public void createTable(HCatCreateTableDesc createTableDesc) + throws HCatException { + try { + hmsClient.createTable(createTableDesc.toHiveTable(hiveConfig)); + } catch (AlreadyExistsException e) { + if (createTableDesc.getIfNotExists() == false) { + throw new HCatException( + "AlreadyExistsException while creating table.", e); + } + } catch (InvalidObjectException e) { + throw new HCatException( + "InvalidObjectException while creating table.", e); + } catch (MetaException e) { + throw new HCatException("MetaException while creating table.", e); + } catch (NoSuchObjectException e) { + throw new HCatException( + "NoSuchObjectException while creating table.", e); + } catch (TException e) { + throw new HCatException("TException while creating table.", e); + } catch (IOException e) { + throw new HCatException("IOException while creating hive conf.", e); + } + + } + + @Override + public void createTableLike(String dbName, String existingTblName, + String newTableName, boolean ifNotExists, boolean isExternal, + String location) throws HCatException { + + Table hiveTable = getHiveTableLike(checkDB(dbName), existingTblName, + newTableName, ifNotExists, location); + if (hiveTable != null) { + try { + hmsClient.createTable(hiveTable); + } catch (AlreadyExistsException e) { + if (!ifNotExists) { + throw new HCatException( + "A table already exists with the name " + + newTableName, e); + } + } catch (InvalidObjectException e) { + throw new HCatException( + "InvalidObjectException in create table like command.", + e); + } catch (MetaException e) { + throw new HCatException( + "MetaException in create table like command.", e); + } catch (NoSuchObjectException e) { + throw new HCatException( + "NoSuchObjectException in create table like command.", + e); + } catch (TException e) { + throw new HCatException( + "TException in create table like command.", e); + } + } + } + + @Override + public void dropTable(String dbName, String tableName, boolean ifExists) + throws HCatException { + try { + hmsClient.dropTable(checkDB(dbName), tableName); + } catch (NoSuchObjectException e) { + if(!ifExists){ + throw new HCatException("NoSuchObjectException while dropping table.", e); + } + } catch (MetaException e) { + throw new HCatException("MetaException while dropping table.", e); + } catch (TException e) { + throw new HCatException("TException while dropping table.", e); + } + } + + @Override + public void renameTable(String dbName, String oldName, String newName) + throws HCatException { + Table tbl; + try { + Table oldtbl = hmsClient.getTable(checkDB(dbName), oldName); + if (oldtbl != null) { + //TODO : Should be moved out. + if (oldtbl.getParameters() + .get(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE) != null) { + throw new HCatException( + "Cannot use rename command on a non-native table"); + } + tbl = new Table(oldtbl); + tbl.setTableName(newName); + hmsClient.alter_table(checkDB(dbName), oldName, tbl); + } + } catch (MetaException e) { + throw new HCatException("MetaException while renaming table", e); + } catch (TException e) { + throw new HCatException("TException while renaming table", e); + } catch (NoSuchObjectException e) { + throw new HCatException( + "NoSuchObjectException while renaming table", e); + } catch (InvalidOperationException e) { + throw new HCatException( + "InvalidOperationException while renaming table", e); + } + } + + @Override + public List getPartitions(String dbName, String tblName) + throws HCatException { + List hcatPtns = new ArrayList(); + try { + List hivePtns = hmsClient.listPartitions(checkDB(dbName), tblName, (short) -1); + for(Partition ptn : hivePtns){ + hcatPtns.add(new HCatPartition(ptn)); + } + } catch (NoSuchObjectException e) { + throw new HCatException( + "NoSuchObjectException while retrieving partition.", e); + } catch (MetaException e) { + throw new HCatException( + "MetaException while retrieving partition.", e); + } catch (TException e) { + throw new HCatException( + "TException while retrieving partition.", e); + } + return hcatPtns; + } + + @Override + public HCatPartition getPartition(String dbName, String tableName, + String partitionName) throws HCatException { + HCatPartition partition = null; + try { + Partition hivePartition = hmsClient.getPartition(checkDB(dbName), tableName, partitionName); + if(hivePartition != null){ + partition = new HCatPartition(hivePartition); + } + } catch (MetaException e) { + throw new HCatException( + "MetaException while retrieving partition.", e); + } catch (TException e) { + throw new HCatException( + "TException while retrieving partition.", e); + } catch (UnknownTableException e) { + throw new HCatException( + "UnknownTableException while retrieving partition.", e); + } catch (NoSuchObjectException e) { + throw new HCatException( + "NoSuchObjectException while retrieving partition.", e); + } + return partition; + } + + @Override + public void addPartition(HCatAddPartitionDesc partInfo) + throws HCatException { + Table tbl = null; + try { + tbl = hmsClient.getTable(partInfo.getDatabaseName(), + partInfo.getTableName()); + //TODO: Should be moved out. + if (tbl.getPartitionKeysSize() == 0) { + throw new HCatException( + "The table " + partInfo.getTableName() + " is not partitioned."); + } + + hmsClient.add_partition(partInfo.toHivePartition(tbl)); + } catch (InvalidObjectException e) { + throw new HCatException( + "InvalidObjectException while adding partition.", e); + } catch (AlreadyExistsException e) { + throw new HCatException( + "InvalidObjectException while adding partition.", e); + } catch (MetaException e) { + throw new HCatException( + "InvalidObjectException while adding partition.", e); + } catch (TException e) { + throw new HCatException( + "InvalidObjectException while adding partition.", e); + } catch (NoSuchObjectException e) { + throw new HCatException( + "The table " + partInfo.getTableName() + " is could not be found.", e); + } + } + + @Override + public void dropPartition(String dbName, String tableName, + String partitionName, boolean ifExists) throws HCatException { + try { + hmsClient.dropPartition(checkDB(dbName), tableName, partitionName, + ifExists); + } catch (NoSuchObjectException e) { + if (!ifExists) { + throw new HCatException( + "NoSuchObjectException while dropping partition.", e); + } + } catch (MetaException e) { + throw new HCatException("MetaException while dropping partition.", + e); + } catch (TException e) { + throw new HCatException("TException while dropping partition.", e); + } + } + + @Override + public List listPartitionsByFilter(String dbName, + String tblName, String filter) throws HCatException { + List hcatPtns = new ArrayList(); + try { + List hivePtns = hmsClient.listPartitionsByFilter(checkDB(dbName), + tblName, filter, (short) -1); + for (Partition ptn : hivePtns) { + hcatPtns.add(new HCatPartition(ptn)); + } + } catch (MetaException e) { + throw new HCatException("MetaException while fetching partitions.", + e); + } catch (NoSuchObjectException e) { + throw new HCatException( + "NoSuchObjectException while fetching partitions.", e); + } catch (TException e) { + throw new HCatException("TException while fetching partitions.", e); + } + return hcatPtns; + } + + @Override + public void markPartitionForEvent(String dbName, String tblName, + Map partKVs, PartitionEventType eventType) + throws HCatException { + try { + hmsClient.markPartitionForEvent(checkDB(dbName), tblName, partKVs, eventType); + } catch (MetaException e) { + throw new HCatException( + "MetaException while marking partition for event.", e); + } catch (NoSuchObjectException e) { + throw new HCatException( + "NoSuchObjectException while marking partition for event.", + e); + } catch (UnknownTableException e) { + throw new HCatException( + "UnknownTableException while marking partition for event.", + e); + } catch (UnknownDBException e) { + throw new HCatException( + "UnknownDBException while marking partition for event.", e); + } catch (TException e) { + throw new HCatException( + "TException while marking partition for event.", e); + } catch (InvalidPartitionException e) { + throw new HCatException( + "InvalidPartitionException while marking partition for event.", + e); + } catch (UnknownPartitionException e) { + throw new HCatException( + "UnknownPartitionException while marking partition for event.", + e); + } + } + + @Override + public boolean isPartitionMarkedForEvent(String dbName, String tblName, + Map partKVs, PartitionEventType eventType) + throws HCatException { + boolean isMarked = false; + try { + isMarked = hmsClient.isPartitionMarkedForEvent(checkDB(dbName), tblName, + partKVs, eventType); + } catch (MetaException e) { + throw new HCatException( + "MetaException while checking partition for event.", e); + } catch (NoSuchObjectException e) { + throw new HCatException( + "NoSuchObjectException while checking partition for event.", + e); + } catch (UnknownTableException e) { + throw new HCatException( + "UnknownTableException while checking partition for event.", + e); + } catch (UnknownDBException e) { + throw new HCatException( + "UnknownDBException while checking partition for event.", e); + } catch (TException e) { + throw new HCatException( + "TException while checking partition for event.", e); + } catch (InvalidPartitionException e) { + throw new HCatException( + "InvalidPartitionException while checking partition for event.", + e); + } catch (UnknownPartitionException e) { + throw new HCatException( + "UnknownPartitionException while checking partition for event.", + e); + } + return isMarked; + } + + @Override + public String getDelegationToken(String owner, String renewerKerberosPrincipalName) + throws HCatException { + String token = null; + try { + token = hmsClient.getDelegationToken(owner, renewerKerberosPrincipalName); + } catch (MetaException e) { + throw new HCatException( + "MetaException while getting delegation token.", e); + } catch (TException e) { + throw new HCatException( + "TException while getting delegation token.", e); + } + + return token; + } + + @Override + public long renewDelegationToken(String tokenStrForm) throws HCatException { + long time = 0; + try { + time = hmsClient.renewDelegationToken(tokenStrForm); + } catch (MetaException e) { + throw new HCatException( + "MetaException while renewing delegation token.", e); + } catch (TException e) { + throw new HCatException( + "TException while renewing delegation token.", e); + } + + return time; + } + + @Override + public void cancelDelegationToken(String tokenStrForm) throws HCatException { + try { + hmsClient.cancelDelegationToken(tokenStrForm); + } catch (MetaException e) { + throw new HCatException( + "MetaException while canceling delegation token.", e); + } catch (TException e) { + throw new HCatException( + "TException while canceling delegation token.", e); + } + } + + /* @param conf + /* @throws HCatException + * @see org.apache.hcatalog.api.HCatClient#initialize(org.apache.hadoop.conf.Configuration) + */ + @Override + void initialize(Configuration conf) throws HCatException { + this.config = conf; + try { + hiveConfig = HCatUtil.getHiveConf(config); + hmsClient = HCatUtil.createHiveClient(hiveConfig); + } catch(MetaException exp) { + throw new HCatException("MetaException while creating HMS client", exp); + } catch (IOException exp) { + throw new HCatException("IOException while creating HMS client", exp); + } + + } + + + private Table getHiveTableLike(String dbName, String existingTblName, + String newTableName, boolean isExternal, + String location) throws HCatException{ + Table oldtbl = null; + Table newTable = null; + try { + oldtbl = hmsClient.getTable(checkDB(dbName), existingTblName); + } catch (MetaException e1) { + throw new HCatException( + "MetaException while retrieving existing table.", e1); + } catch (TException e1) { + throw new HCatException( + "TException while retrieving existing table.", e1); + } catch (NoSuchObjectException e1) { + throw new HCatException( + "NoSuchObjectException while retrieving existing table.", + e1); + } + if (oldtbl != null) { + newTable = new Table(); + newTable.setTableName(newTableName); + newTable.setDbName(dbName); + StorageDescriptor sd = new StorageDescriptor(oldtbl.getSd()); + newTable.setSd(sd); + newTable.setParameters(oldtbl.getParameters()); + if (location == null) { + newTable.getSd().setLocation(oldtbl.getSd().getLocation()); + } else { + newTable.getSd().setLocation(location); + } + if (isExternal) { + newTable.putToParameters("EXTERNAL", "TRUE"); + newTable.setTableType(TableType.EXTERNAL_TABLE.toString()); + } else { + newTable.getParameters().remove("EXTERNAL"); + } + // set create time + newTable.setCreateTime((int) (System.currentTimeMillis() / 1000)); + newTable.setLastAccessTimeIsSet(false); + } + return newTable; + } + + /* @throws HCatException + * @see org.apache.hcatalog.api.HCatClient#closeClient() + */ + @Override + public void close() throws HCatException { + hmsClient.close(); + } + + private String checkDB(String name){ + if (StringUtils.isEmpty(name)) { + return MetaStoreUtils.DEFAULT_DATABASE_NAME; + } else { + return name; + } + } + +} Index: src/java/org/apache/hcatalog/api/HCatPartition.java =================================================================== --- src/java/org/apache/hcatalog/api/HCatPartition.java (revision 0) +++ src/java/org/apache/hcatalog/api/HCatPartition.java (working copy) @@ -0,0 +1,203 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hcatalog.api; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hcatalog.common.HCatException; +import org.apache.hcatalog.data.schema.HCatFieldSchema; +import org.apache.hcatalog.data.schema.HCatSchemaUtils; + +/** + * The HCatPartition is a wrapper around org.apache.hadoop.hive.metastore.api.Partition. + */ +public class HCatPartition { + + private String tableName; + private String dbName; + private List values; + private List tableCols; + private int createTime; + private int lastAccessTime; + private StorageDescriptor sd; + private Map parameters; + + HCatPartition(Partition partition) throws HCatException { + this.tableName = partition.getTableName(); + this.dbName = partition.getDbName(); + this.createTime = partition.getCreateTime(); + this.lastAccessTime = partition.getLastAccessTime(); + this.parameters = partition.getParameters(); + this.values = partition.getValues(); + this.sd = partition.getSd(); + this.tableCols = new ArrayList(); + for (FieldSchema fs : this.sd.getCols()) { + this.tableCols.add(HCatSchemaUtils.getHCatFieldSchema(fs)); + } + } + + /** + * Gets the table name. + * + * @return the table name + */ + public String getTableName(){ + return this.tableName; + } + + /** + * Gets the database name. + * + * @return the database name + */ + public String getDatabaseName(){ + return this.dbName; + } + + /** + * Gets the columns of the table. + * + * @return the columns + */ + public List getColumns(){ + return this.tableCols; + } + + /** + * Gets the input format. + * + * @return the input format + */ + public String getInputFormat(){ + return this.sd.getInputFormat(); + } + + /** + * Gets the output format. + * + * @return the output format + */ + public String getOutputFormat(){ + return this.sd.getOutputFormat(); + } + + /** + * Gets the storage handler. + * + * @return the storage handler + */ + public String getStorageHandler() { + return this.sd + .getParameters() + .get(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE); + } + + /** + * Gets the location. + * + * @return the location + */ + public String getLocation(){ + return this.sd.getLocation(); + } + + /** + * Gets the serde. + * + * @return the serde + */ + public String getSerDe(){ + return this.sd.getSerdeInfo().getSerializationLib(); + } + + public Map getParameters() { + return this.parameters; + } + + /** + * Gets the last access time. + * + * @return the last access time + */ + public int getLastAccessTime(){ + return this.lastAccessTime; + } + + /** + * Gets the creates the time. + * + * @return the creates the time + */ + public int getCreateTime() { + return this.createTime; + } + + /** + * Gets the values. + * + * @return the values + */ + public List getValues(){ + return this.values; + } + + /** + * Gets the bucket columns. + * + * @return the bucket columns + */ + public List getBucketCols(){ + return this.sd.getBucketCols(); + } + + /** + * Gets the number of buckets. + * + * @return the number of buckets + */ + public int getNumBuckets(){ + return this.sd.getNumBuckets(); + } + + /** + * Gets the sort columns. + * + * @return the sort columns + */ + public List getSortCols(){ + return this.sd.getSortCols(); + } + + @Override + public String toString() { + return "HCatPartition [" + + (tableName != null ? "tableName=" + tableName + ", " : "tableName=null") + + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null") + + (values != null ? "values=" + values + ", " : "values=null") + + "createTime=" + createTime + ", lastAccessTime=" + + lastAccessTime + ", " + (sd != null ? "sd=" + sd + ", " : "sd=null") + + (parameters != null ? "parameters=" + parameters : "parameters=null") + "]"; + } + +} Index: src/java/org/apache/hcatalog/api/HCatCreateTableDesc.java =================================================================== --- src/java/org/apache/hcatalog/api/HCatCreateTableDesc.java (revision 0) +++ src/java/org/apache/hcatalog/api/HCatCreateTableDesc.java (working copy) @@ -0,0 +1,519 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hcatalog.api; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat; +import org.apache.hadoop.hive.ql.io.RCFileInputFormat; +import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; +import org.apache.hadoop.hive.ql.metadata.HiveUtils; +import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe; +import org.apache.hadoop.mapred.SequenceFileInputFormat; +import org.apache.hadoop.mapred.SequenceFileOutputFormat; +import org.apache.hadoop.mapred.TextInputFormat; +import org.apache.hcatalog.common.HCatException; +import org.apache.hcatalog.data.schema.HCatFieldSchema; +import org.apache.hcatalog.data.schema.HCatSchemaUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The Class HCatCreateTableDesc for defining attributes for a new table. + */ +@SuppressWarnings("deprecation") +public class HCatCreateTableDesc{ + + private static final Logger LOG = LoggerFactory.getLogger(HCatCreateTableDesc.class); + + private String tableName; + private String dbName; + private boolean isExternal; + private String comment; + private String location; + private List cols; + private List partCols; + private List bucketCols; + private int numBuckets; + private List sortCols; + private Map tblProps; + private boolean ifNotExists; + private String fileFormat; + private String inputformat; + private String outputformat; + private String serde; + private String storageHandler; + + private HCatCreateTableDesc(String dbName, String tableName, List columns){ + this.dbName = dbName; + this.tableName = tableName; + this.cols = columns; + } + + /** + * Creates a builder for defining attributes. + * + * @param dbName the db name + * @param tableName the table name + * @param columns the columns + * @return the builder + */ + public static Builder create(String dbName, String tableName, List columns){ + return new Builder(dbName, tableName, columns); + } + + Table toHiveTable(HiveConf conf) throws HCatException{ + + Table newTable = new Table(); + newTable.setDbName(dbName); + newTable.setTableName(tableName); + if (tblProps != null) { + newTable.setParameters(tblProps); + } + + if (isExternal) { + newTable.putToParameters("EXTERNAL", "TRUE"); + newTable.setTableType(TableType.EXTERNAL_TABLE.toString()); + } else { + newTable.setTableType(TableType.MANAGED_TABLE.toString()); + } + + StorageDescriptor sd = new StorageDescriptor(); + sd.setSerdeInfo(new SerDeInfo()); + if (location != null) { + sd.setLocation(location); + } + if (this.comment != null) { + newTable.putToParameters("comment", comment); + } + if (!StringUtils.isEmpty(fileFormat)) { + sd.setInputFormat(inputformat); + sd.setOutputFormat(outputformat); + if (serde != null) { + sd.getSerdeInfo().setSerializationLib(serde); + } else { + LOG.info("Using LazySimpleSerDe for table " + tableName); + sd.getSerdeInfo() + .setSerializationLib( + org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class + .getName()); + } + } else { + try { + LOG.info("Creating instance of storage handler to get input/output, serder info."); + HiveStorageHandler sh = HiveUtils.getStorageHandler(conf, + storageHandler); + sd.setInputFormat(sh.getInputFormatClass().getName()); + sd.setOutputFormat(sh.getOutputFormatClass().getName()); + sd.getSerdeInfo().setSerializationLib( + sh.getSerDeClass().getName()); + newTable.putToParameters( + org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE, + storageHandler); + } catch (HiveException e) { + throw new HCatException( + "Exception while creating instance of storage handler", + e); + } + } + newTable.setSd(sd); + if (this.partCols != null) { + ArrayList hivePtnCols = new ArrayList(); + for (HCatFieldSchema fs : this.partCols) { + hivePtnCols.add(HCatSchemaUtils.getFieldSchema(fs)); + } + newTable.setPartitionKeys(hivePtnCols); + } + + if (this.cols != null) { + ArrayList hiveTblCols = new ArrayList(); + for (HCatFieldSchema fs : this.cols) { + hiveTblCols.add(HCatSchemaUtils.getFieldSchema(fs)); + } + newTable.getSd().setCols(hiveTblCols); + } + + if (this.bucketCols != null) { + newTable.getSd().setBucketCols(bucketCols); + newTable.getSd().setNumBuckets(numBuckets); + } + + if (this.sortCols != null) { + newTable.getSd().setSortCols(sortCols); + } + + newTable.setCreateTime((int) (System.currentTimeMillis() / 1000)); + newTable.setLastAccessTimeIsSet(false); + return newTable; + } + + /** + * Gets the if not exists. + * + * @return the if not exists + */ + public boolean getIfNotExists() { + return this.ifNotExists; + } + + /** + * Gets the table name. + * + * @return the table name + */ + public String getTableName() { + return this.tableName; + } + + /** + * Gets the cols. + * + * @return the cols + */ + public List getCols() { + return this.cols; + } + + /** + * Gets the partition cols. + * + * @return the partition cols + */ + public List getPartitionCols() { + return this.partCols; + } + + /** + * Gets the bucket cols. + * + * @return the bucket cols + */ + public List getBucketCols() { + return this.bucketCols; + } + + public int getNumBuckets() { + return this.numBuckets; + } + + /** + * Gets the comments. + * + * @return the comments + */ + public String getComments() { + return this.comment; + } + + /** + * Gets the storage handler. + * + * @return the storage handler + */ + public String getStorageHandler() { + return this.storageHandler; + } + + /** + * Gets the location. + * + * @return the location + */ + public String getLocation() { + return this.location; + } + + /** + * Gets the external. + * + * @return the external + */ + public boolean getExternal() { + return this.isExternal; + } + + /** + * Gets the sort cols. + * + * @return the sort cols + */ + public List getSortCols() { + return this.sortCols; + } + + /** + * Gets the tbl props. + * + * @return the tbl props + */ + public Map getTblProps() { + return this.tblProps; + } + + /** + * Gets the file format. + * + * @return the file format + */ + public String getFileFormat(){ + return this.fileFormat; + } + + /** + * Gets the database name. + * + * @return the database name + */ + public String getDatabaseName() { + return this.dbName; + } + + @Override + public String toString() { + return "HCatCreateTableDesc [" + + (tableName != null ? "tableName=" + tableName + ", " : "tableName=null") + + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null") + + "isExternal=" + + isExternal + + ", " + + (comment != null ? "comment=" + comment + ", " : "comment=null") + + (location != null ? "location=" + location + ", " : "location=null") + + (cols != null ? "cols=" + cols + ", " : "cols=null") + + (partCols != null ? "partCols=" + partCols + ", " : "partCols=null") + + (bucketCols != null ? "bucketCols=" + bucketCols + ", " : "bucketCols=null") + + "numBuckets=" + + numBuckets + + ", " + + (sortCols != null ? "sortCols=" + sortCols + ", " : "sortCols=null") + + (tblProps != null ? "tblProps=" + tblProps + ", " : "tblProps=null") + + "ifNotExists=" + + ifNotExists + + ", " + + (fileFormat != null ? "fileFormat=" + fileFormat + ", " : "fileFormat=null") + + (inputformat != null ? "inputformat=" + inputformat + ", " + : "inputformat=null") + + (outputformat != null ? "outputformat=" + outputformat + ", " + : "outputformat=null") + + (serde != null ? "serde=" + serde + ", " : "serde=null") + + (storageHandler != null ? "storageHandler=" + storageHandler + : "storageHandler=null") + "]"; + } + + public static class Builder{ + + private String tableName; + private boolean isExternal; + private List cols; + private List partCols; + private List bucketCols; + private List sortCols; + private int numBuckets; + private String comment; + private String fileFormat; + private String location; + private String storageHandler; + private Map tblProps; + private boolean ifNotExists; + private String dbName; + + + private Builder(String dbName, String tableName, List columns) { + this.dbName = dbName; + this.tableName = tableName; + this.cols = columns; + } + + + /** + * If not exists. + * + * @param ifNotExists If set to true, hive will not throw exception, if a + * table with the same name already exists. + * @return the builder + */ + public Builder ifNotExists(boolean ifNotExists) { + this.ifNotExists = ifNotExists; + return this; + } + + + /** + * Partition cols. + * + * @param partCols the partition cols + * @return the builder + */ + public Builder partCols(ArrayList partCols) { + this.partCols = partCols; + return this; + } + + + /** + * Bucket cols. + * + * @param bucketCols the bucket cols + * @return the builder + */ + public Builder bucketCols(ArrayList bucketCols, int buckets) { + this.bucketCols = bucketCols; + this.numBuckets = buckets; + return this; + } + + /** + * Storage handler. + * + * @param storageHandler the storage handler + * @return the builder + */ + public Builder storageHandler(String storageHandler) { + this.storageHandler = storageHandler; + return this; + } + + /** + * Location. + * + * @param location the location + * @return the builder + */ + public Builder location(String location) { + this.location = location; + return this; + } + + /** + * Comments. + * + * @param comment the comment + * @return the builder + */ + public Builder comments(String comment) { + this.comment = comment; + return this; + } + + /** + * Checks if is table external. + * + * @param isExternal the is external + * @return the builder + */ + public Builder isTableExternal(boolean isExternal) { + this.isExternal = isExternal; + return this; + } + + /** + * Sort cols. + * + * @param sortCols the sort cols + * @return the builder + */ + public Builder sortCols(ArrayList sortCols) { + this.sortCols = sortCols; + return this; + } + + /** + * Tbl props. + * + * @param tblProps the tbl props + * @return the builder + */ + public Builder tblProps(Map tblProps) { + this.tblProps = tblProps; + return this; + } + + /** + * File format. + * + * @param format the format + * @return the builder + */ + public Builder fileFormat(String format){ + this.fileFormat = format; + return this; + } + + /** + * Builds the HCatCreateTableDesc. + * + * @return HCatCreateTableDesc + * @throws HCatException + */ + public HCatCreateTableDesc build() throws HCatException { + if(this.dbName == null){ + LOG.info("Database name found null. Setting db to :" + + MetaStoreUtils.DEFAULT_DATABASE_NAME); + this.dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; + } + HCatCreateTableDesc desc = new HCatCreateTableDesc(this.dbName, + this.tableName, this.cols); + desc.ifNotExists = this.ifNotExists; + desc.isExternal = this.isExternal; + desc.comment = this.comment; + desc.partCols = this.partCols; + desc.bucketCols = this.bucketCols; + desc.numBuckets = this.numBuckets; + desc.location = this.location; + desc.tblProps = this.tblProps; + desc.sortCols = this.sortCols; + desc.serde = null; + if (!StringUtils.isEmpty(fileFormat)) { + desc.fileFormat = fileFormat; + if ("SequenceFile".equalsIgnoreCase(fileFormat)) { + desc.inputformat = SequenceFileInputFormat.class.getName(); + desc.outputformat = SequenceFileOutputFormat.class + .getName(); + } else if ("RCFile".equalsIgnoreCase(fileFormat)) { + desc.inputformat = RCFileInputFormat.class.getName(); + desc.outputformat = RCFileOutputFormat.class.getName(); + desc.serde = ColumnarSerDe.class.getName(); + } + desc.storageHandler = StringUtils.EMPTY; + } else if (!StringUtils.isEmpty(storageHandler)) { + desc.storageHandler = storageHandler; + } else { + desc.fileFormat = "TextFile"; + LOG.info("Using text file format for the table."); + desc.inputformat = TextInputFormat.class.getName(); + LOG.info("Table input format:" + desc.inputformat); + desc.outputformat = IgnoreKeyTextOutputFormat.class + .getName(); + LOG.info("Table output format:" + desc.outputformat); + } + return desc; + } + } +} Index: src/java/org/apache/hcatalog/api/HCatClient.java =================================================================== --- src/java/org/apache/hcatalog/api/HCatClient.java (revision 0) +++ src/java/org/apache/hcatalog/api/HCatClient.java (working copy) @@ -0,0 +1,299 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hcatalog.api; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.JavaUtils; +import org.apache.hadoop.hive.metastore.api.PartitionEventType; +import org.apache.hcatalog.common.HCatConstants; +import org.apache.hcatalog.common.HCatException; + +/** + * The abstract class HCatClient containing APIs for HCatalog DDL commands. + */ +public abstract class HCatClient { + + public enum DROP_DB_MODE { RESTRICT, CASCADE }; + + /** + * Creates an instance of HCatClient. + * + * @param conf An instance of configuration. + * @return An instance of HCatClient. + * @throws IOException + */ + public static HCatClient create(Configuration conf) throws IOException{ + HCatClient client = null; + String className = conf.get(HCatConstants.HCAT_CLIENT_IMPL_CLASS, + HCatClientHMSImpl.class.getName()); + try { + Class clientClass = Class.forName(className, + true, JavaUtils.getClassLoader()).asSubclass( + HCatClient.class); + client = (HCatClient) clientClass.newInstance(); + } catch (ClassNotFoundException e) { + throw new HCatException( + "ClassNotFoundException while creating client class.", e); + } catch (InstantiationException e) { + throw new HCatException( + "InstantiationException while creating client class.", e); + } catch (IllegalAccessException e) { + throw new HCatException( + "IllegalAccessException while creating client class.", e); + } + if(client != null){ + client.initialize(conf); + } + return client; + } + + abstract void initialize(Configuration conf) throws HCatException; + + /** + * Get all existing databases that match the given + * pattern. The matching occurs as per Java regular expressions + * + * @param databasePattern java re pattern + * @return list of database names + * @throws HCatException + */ + public abstract List listDatabaseNamesByPattern(String pattern) throws HCatException; + + /** + * Gets the database. + * + * @param dbName The name of the database. + * @return An instance of HCatDatabaseInfo. + * @throws HCatException + */ + public abstract HCatDatabase getDatabase(String dbName) throws HCatException; + + /** + * Creates the database. + * + * @param dbInfo An instance of HCatCreateDBDesc. + * @throws HCatException + */ + public abstract void createDatabase(HCatCreateDBDesc dbInfo) + throws HCatException; + + /** + * Drops a database. + * + * @param dbName The name of the database to delete. + * @param ifExists Hive returns an error if the database specified does not exist, + * unless ifExists is set to true. + * @param mode This is set to either "restrict" or "cascade". Restrict will + * remove the schema if all the tables are empty. Cascade removes + * everything including data and definitions. + * @throws HCatException + */ + public abstract void dropDatabase(String dbName, boolean ifExists, DROP_DB_MODE mode) throws HCatException; + + /** + * Returns all existing tables from the specified database which match the given + * pattern. The matching occurs as per Java regular expressions. + * @param dbName + * @param tablePattern + * @return list of table names + * @throws HCatException + */ + public abstract List listTableNamesByPattern(String dbName, String tablePattern) + throws HCatException; + + /** + * Gets the table. + * + * @param dbName The name of the database. + * @param tableName The name of the table. + * @return An instance of HCatTableInfo. + * @throws HCatException + */ + public abstract HCatTable getTable(String dbName, String tableName) + throws HCatException; + + /** + * Creates the table. + * + * @param createTableDesc An instance of HCatCreateTableDesc class. + * @throws HCatException + */ + public abstract void createTable(HCatCreateTableDesc createTableDesc) + throws HCatException; + + /** + * Creates the table like an existing table. + * + * @param dbName The name of the database. + * @param existingTblName The name of the existing table. + * @param newTableName The name of the new table. + * @param ifNotExists If true, then error related to already table existing is skipped. + * @param isExternal Set to "true", if table has be created at a different + * location other than default. + * @param location The location for the table. + * @throws HCatException + */ + public abstract void createTableLike(String dbName, String existingTblName, + String newTableName, boolean ifNotExists, boolean isExternal, + String location) throws HCatException; + + /** + * Drop table. + * + * @param dbName The name of the database. + * @param tableName The name of the table. + * @param ifExists Hive returns an error if the database specified does not exist, + * unless ifExists is set to true. + * @throws HCatException + */ + public abstract void dropTable(String dbName, String tableName, + boolean ifExists) throws HCatException; + + /** + * Renames a table. + * + * @param dbName The name of the database. + * @param oldName The name of the table to be renamed. + * @param newName The new name of the table. + * @throws HCatException + */ + public abstract void renameTable(String dbName, String oldName, String newName) throws HCatException; + + /** + * Gets all the partitions. + * + * @param dbName The name of the database. + * @param tblName The name of the table. + * @return A list of partition names. + * @throws HCatException + */ + public abstract List getPartitions(String dbName, String tblName) + throws HCatException; + + /** + * Gets the partition. + * + * @param dbName The database name. + * @param tableName The table name. + * @param partitionName The partition name, Comma separated list of col_name='value'. + * @return An instance of HCatPartitionInfo. + * @throws HCatException + */ + public abstract HCatPartition getPartition(String dbName, String tableName, + String partitionName) throws HCatException; + + /** + * Adds the partition. + * + * @param partInfo An instance of HCatAddPartitionDesc. + * @throws HCatException + */ + public abstract void addPartition(HCatAddPartitionDesc partInfo) throws HCatException; + + /** + * Drops partition. + * + * @param dbName The database name. + * @param tableName The table name. + * @param partitionName The partition name, Comma separated list of col_name='value'. + * @param ifExists Hive returns an error if the partition specified does not exist, unless ifExists is set to true. + * @throws HCatException + */ + public abstract void dropPartition(String dbName, String tableName, + String partitionName, boolean ifExists) throws HCatException; + + /** + * List partitions by filter. + * + * @param dbName The database name. + * @param tblName The table name. + * @param filter The filter string, + * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can + * be done only on string partition keys. + * @return list of partitions + * @throws HCatException + */ + public abstract List listPartitionsByFilter(String dbName, String tblName, + String filter) throws HCatException; + + /** + * Mark partition for event. + * + * @param dbName The database name. + * @param tblName The table name. + * @param partKVs the key-values associated with the partition. + * @param eventType the event type + * @throws HCatException + */ + public abstract void markPartitionForEvent(String dbName, String tblName, + Map partKVs, PartitionEventType eventType) + throws HCatException; + + /** + * Checks if a partition is marked for event. + * + * @param dbName the db name + * @param tblName the table name + * @param partKVs the key-values associated with the partition. + * @param eventType the event type + * @return true, if is partition marked for event + * @throws HCatException + */ + public abstract boolean isPartitionMarkedForEvent(String dbName, String tblName, + Map partKVs, PartitionEventType eventType) + throws HCatException; + + /** + * Gets the delegation token. + * + * @param owner the owner + * @param renewerKerberosPrincipalName the renewer kerberos principal name + * @return the delegation token + * @throws HCatException + */ + public abstract String getDelegationToken(String owner, String renewerKerberosPrincipalName) throws + HCatException; + + /** + * Renew delegation token. + * + * @param tokenStrForm the token string + * @return the new expiration time + * @throws HCatException + */ + public abstract long renewDelegationToken(String tokenStrForm) throws HCatException; + + /** + * Cancel delegation token. + * + * @param tokenStrForm the token string + * @throws HCatException + */ + public abstract void cancelDelegationToken(String tokenStrForm) throws HCatException; + + /** + * Close the hcatalog client. + * + * @throws HCatException + */ + public abstract void close() throws HCatException; +} \ No newline at end of file Index: src/java/org/apache/hcatalog/api/HCatAddPartitionDesc.java =================================================================== --- src/java/org/apache/hcatalog/api/HCatAddPartitionDesc.java (revision 0) +++ src/java/org/apache/hcatalog/api/HCatAddPartitionDesc.java (working copy) @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hcatalog.api; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hcatalog.common.HCatException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The Class HCatAddPartitionDesc helps users in defining partition attributes. + */ +public class HCatAddPartitionDesc { + + private static final Logger LOG = LoggerFactory.getLogger(HCatAddPartitionDesc.class); + private String tableName; + private String dbName; + private String location; + private Map partSpec; + + private HCatAddPartitionDesc(String dbName, String tbl, String loc, Map spec){ + this.dbName = dbName; + this.tableName = tbl; + this.location = loc; + this.partSpec = spec; + } + + /** + * Gets the location. + * + * @return the location + */ + public String getLocation() { + return this.location; + } + + + /** + * Gets the partition spec. + * + * @return the partition spec + */ + public Map getPartitionSpec() { + return this.partSpec; + } + + /** + * Gets the table name. + * + * @return the table name + */ + public String getTableName() { + return this.tableName; + } + + /** + * Gets the database name. + * + * @return the database name + */ + public String getDatabaseName() { + return this.dbName; + } + + @Override + public String toString() { + return "HCatAddPartitionDesc [" + + (tableName != null ? "tableName=" + tableName + ", " : "tableName=null") + + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null") + + (location != null ? "location=" + location + ", " : "location=null") + + (partSpec != null ? "partSpec=" + partSpec : "partSpec=null") + "]"; + } + + /** + * Creates the builder for specifying attributes. + * + * @param dbName the db name + * @param tableName the table name + * @param location the location + * @param partSpec the part spec + * @return the builder + * @throws HCatException + */ + public static Builder create(String dbName, String tableName, String location, + Map partSpec) throws HCatException { + return new Builder(dbName, tableName, location, partSpec); + } + + Partition toHivePartition(Table hiveTable) throws HCatException{ + Partition hivePtn = new Partition(); + hivePtn.setDbName(this.dbName); + hivePtn.setTableName(this.tableName); + + List pvals = new ArrayList(); + for (FieldSchema field : hiveTable.getPartitionKeys()) { + String val = partSpec.get(field.getName()); + if (val == null || val.length() == 0) { + throw new HCatException("create partition: Value for key " + + field.getName() + " is null or empty"); + } + pvals.add(val); + } + + hivePtn.setValues(pvals); + StorageDescriptor sd = new StorageDescriptor(hiveTable.getSd()); + hivePtn.setSd(sd); + hivePtn.setParameters(hiveTable.getParameters()); + if (this.location != null) { + hivePtn.getSd().setLocation(this.location); + } else { + String partName; + try { + partName = Warehouse.makePartName( + hiveTable.getPartitionKeys(), pvals); + LOG.info("Setting partition location to :" + partName); + } catch (MetaException e) { + throw new HCatException("Exception while creating partition name.", e); + } + Path partPath = new Path(hiveTable.getSd().getLocation(), partName); + hivePtn.getSd().setLocation(partPath.toString()); + } + hivePtn.setCreateTime((int) (System.currentTimeMillis() / 1000)); + hivePtn.setLastAccessTimeIsSet(false); + return hivePtn; + } + + public static class Builder { + + private String tableName; + private String location; + private Map values; + private String dbName; + + private Builder(String dbName, String tableName, String location, Map values){ + this.dbName = dbName; + this.tableName = tableName; + this.location = location; + this.values = values; + } + + /** + * Builds the HCatAddPartitionDesc. + * + * @return the h cat add partition desc + * @throws HCatException + */ + public HCatAddPartitionDesc build() throws HCatException { + if(this.dbName == null){ + this.dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; + } + HCatAddPartitionDesc desc = new HCatAddPartitionDesc( + this.dbName, this.tableName, this.location, + this.values); + return desc; + } + } + +} Index: src/java/org/apache/hcatalog/api/HCatTable.java =================================================================== --- src/java/org/apache/hcatalog/api/HCatTable.java (revision 0) +++ src/java/org/apache/hcatalog/api/HCatTable.java (working copy) @@ -0,0 +1,226 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hcatalog.api; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hcatalog.common.HCatException; +import org.apache.hcatalog.data.schema.HCatFieldSchema; +import org.apache.hcatalog.data.schema.HCatSchemaUtils; + +/** + * The HCatTable is a wrapper around org.apache.hadoop.hive.metastore.api.Table. + */ +public class HCatTable { + + private String tableName; + private String tabletype; + private List cols; + private List partCols; + private List bucketCols; + private List sortCols; + private int numBuckets; + private String inputFileFormat; + private String outputFileFormat; + private String storageHandler; + private Map tblProps; + private String dbName; + private String serde; + private String location; + + HCatTable(Table hiveTable) throws HCatException { + this.tableName = hiveTable.getTableName(); + this.dbName = hiveTable.getDbName(); + this.tabletype = hiveTable.getTableType(); + cols = new ArrayList(); + for (FieldSchema colFS : hiveTable.getSd().getCols()) { + cols.add(HCatSchemaUtils.getHCatFieldSchema(colFS)); + } + partCols = new ArrayList(); + for (FieldSchema colFS : hiveTable.getPartitionKeys()) { + cols.add(HCatSchemaUtils.getHCatFieldSchema(colFS)); + } + bucketCols = hiveTable.getSd().getBucketCols(); + sortCols = hiveTable.getSd().getSortCols(); + numBuckets = hiveTable.getSd().getNumBuckets(); + inputFileFormat = hiveTable.getSd().getInputFormat(); + outputFileFormat = hiveTable.getSd().getOutputFormat(); + storageHandler = hiveTable + .getSd() + .getParameters() + .get(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE); + tblProps = hiveTable.getParameters(); + serde = hiveTable.getSd().getSerdeInfo().getSerializationLib(); + location = hiveTable.getSd().getLocation(); + } + + /** + * Gets the table name. + * + * @return the table name + */ + public String getTableName() { + return tableName; + } + + /** + * Gets the db name. + * + * @return the db name + */ + public String getDbName() { + return dbName; + } + + /** + * Gets the columns. + * + * @return the columns + */ + public List getCols() { + return cols; + } + + /** + * Gets the part columns. + * + * @return the part columns + */ + public List getPartCols() { + return partCols; + } + + /** + * Gets the bucket columns. + * + * @return the bucket columns + */ + public List getBucketCols() { + return bucketCols; + } + + /** + * Gets the sort columns. + * + * @return the sort columns + */ + public List getSortCols() { + return sortCols; + } + + /** + * Gets the number of buckets. + * + * @return the number of buckets + */ + public int getNumBuckets() { + return numBuckets; + } + + /** + * Gets the storage handler. + * + * @return the storage handler + */ + public String getStorageHandler() { + return storageHandler; + } + + /** + * Gets the table props. + * + * @return the table props + */ + public Map getTblProps() { + return tblProps; + } + + /** + * Gets the tabletype. + * + * @return the tabletype + */ + public String getTabletype() { + return tabletype; + } + + /** + * Gets the input file format. + * + * @return the input file format + */ + public String getInputFileFormat() { + return inputFileFormat; + } + + /** + * Gets the output file format. + * + * @return the output file format + */ + public String getOutputFileFormat() { + return outputFileFormat; + } + + /** + * Gets the serde lib. + * + * @return the serde lib + */ + public String getSerdeLib(){ + return serde; + } + + /** + * Gets the location. + * + * @return the location + */ + public String getLocation(){ + return location; + } + + @Override + public String toString() { + return "HCatTable [" + + (tableName != null ? "tableName=" + tableName + ", " : "tableName=null") + + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null") + + (tabletype != null ? "tabletype=" + tabletype + ", " : "tabletype=null") + + (cols != null ? "cols=" + cols + ", " : "cols=null") + + (partCols != null ? "partCols=" + partCols + ", " : "partCols==null") + + (bucketCols != null ? "bucketCols=" + bucketCols + ", " : "bucketCols=null") + + (sortCols != null ? "sortCols=" + sortCols + ", " : "sortCols=null") + + "numBuckets=" + + numBuckets + + ", " + + (inputFileFormat != null ? "inputFileFormat=" + + inputFileFormat + ", " : "inputFileFormat=null") + + (outputFileFormat != null ? "outputFileFormat=" + + outputFileFormat + ", " : "outputFileFormat=null") + + (storageHandler != null ? "storageHandler=" + storageHandler + + ", " : "storageHandler=null") + + (tblProps != null ? "tblProps=" + tblProps + ", " : "tblProps=null") + + (serde != null ? "serde=" + serde + ", " : "serde=") + + (location != null ? "location=" + location : "location=") + "]"; + } +}