diff --git hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatLoader.java hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatLoader.java index 579811a..18efaed 100644 --- hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatLoader.java +++ hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/HCatLoader.java @@ -27,7 +27,7 @@ import java.util.Properties; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.security.Credentials; diff --git hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/PigHCatUtil.java hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/PigHCatUtil.java index 3550488..47fc3c1 100644 --- hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/PigHCatUtil.java +++ hcatalog-pig-adapter/src/main/java/org/apache/hcatalog/pig/PigHCatUtil.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.mapreduce.Job; import org.apache.hcatalog.common.HCatConstants; import org.apache.hcatalog.common.HCatException; @@ -141,7 +141,7 @@ public class PigHCatUtil { HiveMetaStoreClient client = null; try { client = getHiveMetaClient(hcatServerUri, hcatServerPrincipal, PigHCatUtil.class); - table = client.getTable(dbName, tableName); + table = HCatUtil.getTable(client, dbName, tableName); } catch (NoSuchObjectException nsoe){ throw new PigException("Table not found : " + nsoe.getMessage(), PIG_EXCEPTION_CODE); // prettier error messages to frontend } catch (Exception e) { diff --git src/java/org/apache/hcatalog/common/HCatUtil.java src/java/org/apache/hcatalog/common/HCatUtil.java index 6043618..b618c2f 100644 --- src/java/org/apache/hcatalog/common/HCatUtil.java +++ src/java/org/apache/hcatalog/common/HCatUtil.java @@ -39,9 +39,11 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; @@ -159,15 +161,12 @@ public class HCatUtil { } } - public static HCatSchema extractSchemaFromStorageDescriptor( - StorageDescriptor sd) throws HCatException { - if (sd == null) { - throw new HCatException( - "Cannot construct partition info from an empty storage descriptor."); - } - HCatSchema schema = new HCatSchema(HCatUtil.getHCatFieldSchemaList(sd - .getCols())); - return schema; + public static HCatSchema extractSchema(Table table) throws HCatException { + return new HCatSchema(HCatUtil.getHCatFieldSchemaList(table.getCols())); + } + + public static HCatSchema extractSchema(Partition partition) throws HCatException { + return new HCatSchema(HCatUtil.getHCatFieldSchemaList(partition.getCols())); } public static List getFieldSchemaList( @@ -183,14 +182,13 @@ public class HCatUtil { } } - public static Table getTable(HiveMetaStoreClient client, String dbName, - String tableName) throws Exception { - return client.getTable(dbName, tableName); + public static Table getTable(HiveMetaStoreClient client, String dbName, String tableName) + throws NoSuchObjectException, TException, MetaException { + return new Table(client.getTable(dbName, tableName)); } public static HCatSchema getTableSchemaWithPtnCols(Table table) throws IOException { - HCatSchema tableSchema = new HCatSchema(HCatUtil.getHCatFieldSchemaList( - new org.apache.hadoop.hive.ql.metadata.Table(table).getCols())); + HCatSchema tableSchema = new HCatSchema(HCatUtil.getHCatFieldSchemaList(table.getCols())); if (table.getPartitionKeys().size() != 0) { @@ -240,7 +238,7 @@ public class HCatUtil { partitionKeyMap.put(field.getName().toLowerCase(), field); } - List tableCols = table.getSd().getCols(); + List tableCols = table.getCols(); List newFields = new ArrayList(); for (int i = 0; i < partitionSchema.getFields().size(); i++) { diff --git src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java index e9f3fa3..3a3be3f 100644 --- src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java +++ src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.mapred.HCatMapRedUtil; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapreduce.JobContext; @@ -187,9 +187,8 @@ class FileOutputCommitterContainer extends OutputCommitterContainer { Path src; OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(jobContext); if (dynamicPartitioningUsed){ - src = new Path(getPartitionRootLocation( - jobInfo.getLocation().toString(),jobInfo.getTableInfo().getTable().getPartitionKeysSize() - )); + src = new Path(getPartitionRootLocation(jobInfo.getLocation(), + jobInfo.getTableInfo().getTable().getPartitionKeysSize())); }else{ src = new Path(jobInfo.getLocation()); } @@ -243,8 +242,8 @@ class FileOutputCommitterContainer extends OutputCommitterContainer { OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(context); Configuration conf = context.getConfiguration(); - Table table = jobInfo.getTableInfo().getTable(); - Path tblPath = new Path(table.getSd().getLocation()); + Table table = new Table(jobInfo.getTableInfo().getTable()); + Path tblPath = new Path(table.getTTable().getSd().getLocation()); FileSystem fs = tblPath.getFileSystem(conf); if( table.getPartitionKeys().size() == 0 ) { @@ -280,7 +279,8 @@ class FileOutputCommitterContainer extends OutputCommitterContainer { HiveConf hiveConf = HCatUtil.getHiveConf(conf); client = HCatUtil.getHiveClient(hiveConf); - StorerInfo storer = InternalUtil.extractStorerInfo(table.getSd(),table.getParameters()); + StorerInfo storer = + InternalUtil.extractStorerInfo(table.getTTable().getSd(), table.getParameters()); updateTableSchema(client, table, jobInfo.getOutputSchema()); @@ -426,12 +426,10 @@ class FileOutputCommitterContainer extends OutputCommitterContainer { Table table, FileSystem fs, String grpName, FsPermission perms) throws IOException { - StorageDescriptor tblSD = table.getSd(); - Partition partition = new Partition(); partition.setDbName(table.getDbName()); partition.setTableName(table.getTableName()); - partition.setSd(new StorageDescriptor(tblSD)); + partition.setSd(new StorageDescriptor(table.getTTable().getSd())); List fields = new ArrayList(); for(HCatFieldSchema fieldSchema : outputSchema.getFields()) { @@ -500,7 +498,7 @@ class FileOutputCommitterContainer extends OutputCommitterContainer { private String getFinalDynamicPartitionDestination(Table table, Map partKVs) { // file:///tmp/hcat_junit_warehouse/employee/_DYN0.7770480401313761/emp_country=IN/emp_state=KA -> // file:///tmp/hcat_junit_warehouse/employee/emp_country=IN/emp_state=KA - Path partPath = new Path(table.getSd().getLocation()); + Path partPath = new Path(table.getTTable().getSd().getLocation()); for(FieldSchema partKey : table.getPartitionKeys()){ partPath = constructPartialPartPath(partPath, partKey.getName().toLowerCase(), partKVs); } @@ -541,12 +539,12 @@ class FileOutputCommitterContainer extends OutputCommitterContainer { List newColumns = HCatUtil.validatePartitionSchema(table, partitionSchema); if( newColumns.size() != 0 ) { - List tableColumns = new ArrayList(table.getSd().getCols()); + List tableColumns = new ArrayList(table.getTTable().getSd().getCols()); tableColumns.addAll(newColumns); //Update table schema to add the newly added columns - table.getSd().setCols(tableColumns); - client.alter_table(table.getDbName(), table.getTableName(), table); + table.getTTable().getSd().setCols(tableColumns); + client.alter_table(table.getDbName(), table.getTableName(), table.getTTable()); } } diff --git src/java/org/apache/hcatalog/mapreduce/FileOutputFormatContainer.java src/java/org/apache/hcatalog/mapreduce/FileOutputFormatContainer.java index 1e8d999..a550c4d 100644 --- src/java/org/apache/hcatalog/mapreduce/FileOutputFormatContainer.java +++ src/java/org/apache/hcatalog/mapreduce/FileOutputFormatContainer.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.serde2.SerDe; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; @@ -120,7 +120,7 @@ class FileOutputFormatContainer extends OutputFormatContainer { handleDuplicatePublish(context, jobInfo, client, - jobInfo.getTableInfo().getTable()); + new Table(jobInfo.getTableInfo().getTable())); } catch (MetaException e) { throw new IOException(e); } catch (TException e) { @@ -190,7 +190,7 @@ class FileOutputFormatContainer extends OutputFormatContainer { table, outputInfo.getPartitionValues()); // non-partitioned table - Path tablePath = new Path(table.getSd().getLocation()); + Path tablePath = new Path(table.getTTable().getSd().getLocation()); FileSystem fs = tablePath.getFileSystem(context.getConfiguration()); if ( fs.exists(tablePath) ) { diff --git src/java/org/apache/hcatalog/mapreduce/HCatBaseOutputFormat.java src/java/org/apache/hcatalog/mapreduce/HCatBaseOutputFormat.java index e76690f..456343f 100644 --- src/java/org/apache/hcatalog/mapreduce/HCatBaseOutputFormat.java +++ src/java/org/apache/hcatalog/mapreduce/HCatBaseOutputFormat.java @@ -25,6 +25,7 @@ import java.util.Map; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.OutputFormat; @@ -216,7 +217,8 @@ public abstract class HCatBaseOutputFormat extends OutputFormat indexList = client.listIndexNames(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName(), Short.MAX_VALUE); @@ -83,7 +84,7 @@ public class HCatOutputFormat extends HCatBaseOutputFormat { throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a table with an automatic index from Pig/Mapreduce is not supported"); } } - StorageDescriptor sd = table.getSd(); + StorageDescriptor sd = table.getTTable().getSd(); if (sd.isCompressed()) { throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a compressed partition from Pig/Mapreduce is not supported"); @@ -97,7 +98,7 @@ public class HCatOutputFormat extends HCatBaseOutputFormat { throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a partition with sorted column definition from Pig/Mapreduce is not supported"); } - if (table.getPartitionKeysSize() == 0 ){ + if (table.getTTable().getPartitionKeysSize() == 0 ){ if ((outputJobInfo.getPartitionValues() != null) && (!outputJobInfo.getPartitionValues().isEmpty())){ // attempt made to save partition values in non-partitioned table - throw error. throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES, @@ -117,7 +118,7 @@ public class HCatOutputFormat extends HCatBaseOutputFormat { } if ((outputJobInfo.getPartitionValues() == null) - || (outputJobInfo.getPartitionValues().size() < table.getPartitionKeysSize())){ + || (outputJobInfo.getPartitionValues().size() < table.getTTable().getPartitionKeysSize())){ // dynamic partition usecase - partition values were null, or not all were specified // need to figure out which keys are not specified. List dynamicPartitioningKeys = new ArrayList(); @@ -128,7 +129,7 @@ public class HCatOutputFormat extends HCatBaseOutputFormat { } } - if (valueMap.size() + dynamicPartitioningKeys.size() != table.getPartitionKeysSize()){ + if (valueMap.size() + dynamicPartitioningKeys.size() != table.getTTable().getPartitionKeysSize()){ // If this isn't equal, then bogus key values have been inserted, error out. throw new HCatException(ErrorType.ERROR_INVALID_PARTITION_VALUES,"Invalid partition keys specified"); } @@ -148,9 +149,9 @@ public class HCatOutputFormat extends HCatBaseOutputFormat { outputJobInfo.setPartitionValues(valueMap); } - StorageDescriptor tblSD = table.getSd(); - HCatSchema tableSchema = HCatUtil.extractSchemaFromStorageDescriptor(tblSD); - StorerInfo storerInfo = InternalUtil.extractStorerInfo(tblSD,table.getParameters()); + HCatSchema tableSchema = HCatUtil.extractSchema(table); + StorerInfo storerInfo = + InternalUtil.extractStorerInfo(table.getTTable().getSd(), table.getParameters()); List partitionCols = new ArrayList(); for(FieldSchema schema : table.getPartitionKeys()) { @@ -160,7 +161,7 @@ public class HCatOutputFormat extends HCatBaseOutputFormat { HCatStorageHandler storageHandler = HCatUtil.getStorageHandler(job.getConfiguration(), storerInfo); //Serialize the output info into the configuration - outputJobInfo.setTableInfo(HCatTableInfo.valueOf(table)); + outputJobInfo.setTableInfo(HCatTableInfo.valueOf(table.getTTable())); outputJobInfo.setOutputSchema(tableSchema); harRequested = getHarRequested(hiveConf); outputJobInfo.setHarRequested(harRequested); @@ -169,7 +170,7 @@ public class HCatOutputFormat extends HCatBaseOutputFormat { HCatUtil.configureOutputStorageHandler(storageHandler,job,outputJobInfo); - Path tblPath = new Path(table.getSd().getLocation()); + Path tblPath = new Path(table.getTTable().getSd().getLocation()); /* Set the umask in conf such that files/dirs get created with table-dir * permissions. Following three assumptions are made: diff --git src/java/org/apache/hcatalog/mapreduce/HCatTableInfo.java src/java/org/apache/hcatalog/mapreduce/HCatTableInfo.java index 16423fa..cf255c5 100644 --- src/java/org/apache/hcatalog/mapreduce/HCatTableInfo.java +++ src/java/org/apache/hcatalog/mapreduce/HCatTableInfo.java @@ -137,17 +137,17 @@ public class HCatTableInfo implements Serializable { * @throws IOException */ static HCatTableInfo valueOf(Table table) throws IOException { - HCatSchema dataColumns = - HCatUtil.extractSchemaFromStorageDescriptor(table.getSd()); - StorerInfo storerInfo = + // Explicitly use {@link org.apache.hadoop.hive.ql.metadata.Table} when getting the schema, + // but store @{link org.apache.hadoop.hive.metastore.api.Table} as this class is serialized + // into the job conf. + org.apache.hadoop.hive.ql.metadata.Table mTable = + new org.apache.hadoop.hive.ql.metadata.Table(table); + HCatSchema schema = HCatUtil.extractSchema(mTable); + StorerInfo storerInfo = InternalUtil.extractStorerInfo(table.getSd(), table.getParameters()); - HCatSchema partitionColumns = HCatUtil.getPartitionColumns(table); - return new HCatTableInfo(table.getDbName(), - table.getTableName(), - dataColumns, - partitionColumns, - storerInfo, - table); + HCatSchema partitionColumns = HCatUtil.getPartitionColumns(mTable); + return new HCatTableInfo(table.getDbName(), table.getTableName(), schema, + partitionColumns, storerInfo, table); } @Override diff --git src/java/org/apache/hcatalog/mapreduce/InitializeInput.java src/java/org/apache/hcatalog/mapreduce/InitializeInput.java index 1baef53..b19317a 100644 --- src/java/org/apache/hcatalog/mapreduce/InitializeInput.java +++ src/java/org/apache/hcatalog/mapreduce/InitializeInput.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.mapreduce.Job; import org.apache.hcatalog.common.ErrorType; import org.apache.hcatalog.common.HCatConstants; @@ -94,12 +94,12 @@ public class InitializeInput { hiveConf = new HiveConf(HCatInputFormat.class); } client = HCatUtil.getHiveClient(hiveConf); - Table table = client.getTable(inputJobInfo.getDatabaseName(), - inputJobInfo.getTableName()); + Table table = HCatUtil.getTable(client, inputJobInfo.getDatabaseName(), + inputJobInfo.getTableName()); List partInfoList = new ArrayList(); - inputJobInfo.setTableInfo(HCatTableInfo.valueOf(table)); + inputJobInfo.setTableInfo(HCatTableInfo.valueOf(table.getTTable())); if( table.getPartitionKeys().size() != 0 ) { //Partitioned table List parts = client.listPartitionsByFilter(inputJobInfo.getDatabaseName(), @@ -115,18 +115,19 @@ public class InitializeInput { // populate partition info for (Partition ptn : parts){ - PartInfo partInfo = extractPartInfo(ptn.getSd(),ptn.getParameters(), - job.getConfiguration(), - inputJobInfo); + HCatSchema schema = HCatUtil.extractSchema( + new org.apache.hadoop.hive.ql.metadata.Partition(table, ptn)); + PartInfo partInfo = extractPartInfo(schema, ptn.getSd(), + ptn.getParameters(), job.getConfiguration(), inputJobInfo); partInfo.setPartitionValues(createPtnKeyValueMap(table, ptn)); partInfoList.add(partInfo); } }else{ //Non partitioned table - PartInfo partInfo = extractPartInfo(table.getSd(),table.getParameters(), - job.getConfiguration(), - inputJobInfo); + HCatSchema schema = HCatUtil.extractSchema(table); + PartInfo partInfo = extractPartInfo(schema, table.getTTable().getSd(), + table.getParameters(), job.getConfiguration(), inputJobInfo); partInfo.setPartitionValues(new HashMap()); partInfoList.add(partInfo); } @@ -160,29 +161,25 @@ public class InitializeInput { return ptnKeyValues; } - static PartInfo extractPartInfo(StorageDescriptor sd, + private static PartInfo extractPartInfo(HCatSchema schema, StorageDescriptor sd, Map parameters, Configuration conf, InputJobInfo inputJobInfo) throws IOException{ - HCatSchema schema = HCatUtil.extractSchemaFromStorageDescriptor(sd); + StorerInfo storerInfo = InternalUtil.extractStorerInfo(sd,parameters); Properties hcatProperties = new Properties(); - HCatStorageHandler storageHandler = HCatUtil.getStorageHandler(conf, - storerInfo); + HCatStorageHandler storageHandler = HCatUtil.getStorageHandler(conf, storerInfo); // copy the properties from storageHandler to jobProperties - MapjobProperties = HCatUtil.getInputJobProperties( - storageHandler, - inputJobInfo); + MapjobProperties = HCatUtil.getInputJobProperties(storageHandler, inputJobInfo); for (String key : parameters.keySet()){ hcatProperties.put(key, parameters.get(key)); } // FIXME // Bloating partinfo with inputJobInfo is not good - return new PartInfo(schema, storageHandler, - sd.getLocation(), hcatProperties, - jobProperties, inputJobInfo.getTableInfo()); + return new PartInfo(schema, storageHandler, sd.getLocation(), + hcatProperties, jobProperties, inputJobInfo.getTableInfo()); } } diff --git src/test/org/apache/hcatalog/common/TestHCatUtil.java src/test/org/apache/hcatalog/common/TestHCatUtil.java index 7e658a7..b07b2a5 100644 --- src/test/org/apache/hcatalog/common/TestHCatUtil.java +++ src/test/org/apache/hcatalog/common/TestHCatUtil.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.serde.Constants; import org.apache.hcatalog.data.schema.HCatFieldSchema; import org.apache.hcatalog.data.schema.HCatSchema; @@ -120,9 +120,11 @@ public class TestHCatUtil { "location", "org.apache.hadoop.mapred.TextInputFormat", "org.apache.hadoop.mapred.TextOutputFormat", false, -1, new SerDeInfo(), new ArrayList(), new ArrayList(), new HashMap()); - Table table = new Table("test_tblname", "test_dbname", "test_owner", 0, 0, 0, - sd, new ArrayList(), new HashMap(), - "viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name()); + org.apache.hadoop.hive.metastore.api.Table apiTable = + new org.apache.hadoop.hive.metastore.api.Table("test_tblname", "test_dbname", "test_owner", + 0, 0, 0, sd, new ArrayList(), new HashMap(), + "viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name()); + Table table = new Table(apiTable); List expectedHCatSchema = Lists.newArrayList(new HCatFieldSchema("username", HCatFieldSchema.Type.STRING, null)); @@ -133,7 +135,7 @@ public class TestHCatUtil { // Add a partition key & ensure its reflected in the schema. List partitionKeys = Lists.newArrayList(new FieldSchema("dt", Constants.STRING_TYPE_NAME, null)); - table.setPartitionKeys(partitionKeys); + table.getTTable().setPartitionKeys(partitionKeys); expectedHCatSchema.add(new HCatFieldSchema("dt", HCatFieldSchema.Type.STRING, null)); Assert.assertEquals(new HCatSchema(expectedHCatSchema), HCatUtil.getTableSchemaWithPtnCols(table)); @@ -163,9 +165,11 @@ public class TestHCatUtil { false, -1, serDeInfo, new ArrayList(), new ArrayList(), new HashMap()); - Table table = new Table("test_tblname", "test_dbname", "test_owner", 0, 0, 0, - sd, new ArrayList(), new HashMap(), - "viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name()); + org.apache.hadoop.hive.metastore.api.Table apiTable = + new org.apache.hadoop.hive.metastore.api.Table("test_tblname", "test_dbname", "test_owner", + 0, 0, 0, sd, new ArrayList(), new HashMap(), + "viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name()); + Table table = new Table(apiTable); List expectedHCatSchema = Lists.newArrayList( new HCatFieldSchema("myint", HCatFieldSchema.Type.INT, null),