Index: src/test/org/apache/hcatalog/pig/TestPigStorageDriver.java =================================================================== --- src/test/org/apache/hcatalog/pig/TestPigStorageDriver.java (revision 1103878) +++ src/test/org/apache/hcatalog/pig/TestPigStorageDriver.java (working copy) @@ -56,21 +56,21 @@ public class TestPigStorageDriver extends TestCase { - private HiveConf howlConf; - private Driver howlDriver; + private HiveConf hcatConf; + private Driver hcatDriver; private HiveMetaStoreClient msc; @Override protected void setUp() throws Exception { - howlConf = new HiveConf(this.getClass()); - howlConf.set(ConfVars.PREEXECHOOKS.varname, ""); - howlConf.set(ConfVars.POSTEXECHOOKS.varname, ""); - howlConf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - howlConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); - howlDriver = new Driver(howlConf); - msc = new HiveMetaStoreClient(howlConf); - SessionState.start(new CliSessionState(howlConf)); + hcatConf = new HiveConf(this.getClass()); + hcatConf.set(ConfVars.PREEXECHOOKS.varname, ""); + hcatConf.set(ConfVars.POSTEXECHOOKS.varname, ""); + hcatConf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + hcatConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); + hcatDriver = new Driver(hcatConf); + msc = new HiveMetaStoreClient(hcatConf); + SessionState.start(new CliSessionState(hcatConf)); super.setUp(); } @@ -82,33 +82,33 @@ public void testPigStorageDriver() throws IOException, CommandNeedRetryException{ - String fsLoc = howlConf.get("fs.default.name"); + String fsLoc = hcatConf.get("fs.default.name"); Path tblPath = new Path(fsLoc, "/tmp/test_pig/data"); String anyExistingFileInCurDir = "ivy.xml"; - tblPath.getFileSystem(howlConf).copyFromLocalFile(new Path(anyExistingFileInCurDir),tblPath); + tblPath.getFileSystem(hcatConf).copyFromLocalFile(new Path(anyExistingFileInCurDir),tblPath); - howlDriver.run("drop table junit_pigstorage"); + hcatDriver.run("drop table junit_pigstorage"); CommandProcessorResponse resp; String createTable = "create table junit_pigstorage (a string) partitioned by (b string) stored as RCFILE"; - resp = howlDriver.run(createTable); + resp = hcatDriver.run(createTable); assertEquals(0, resp.getResponseCode()); assertNull(resp.getErrorMessage()); - resp = howlDriver.run("alter table junit_pigstorage add partition (b='2010-10-10') location '"+new Path(fsLoc, "/tmp/test_pig")+"'"); + resp = hcatDriver.run("alter table junit_pigstorage add partition (b='2010-10-10') location '"+new Path(fsLoc, "/tmp/test_pig")+"'"); assertEquals(0, resp.getResponseCode()); assertNull(resp.getErrorMessage()); - resp = howlDriver.run("alter table junit_pigstorage partition (b='2010-10-10') set fileformat inputformat '" + RCFileInputFormat.class.getName() + resp = hcatDriver.run("alter table junit_pigstorage partition (b='2010-10-10') set fileformat inputformat '" + RCFileInputFormat.class.getName() +"' outputformat '"+RCFileOutputFormat.class.getName()+"' inputdriver '"+PigStorageInputDriver.class.getName()+"' outputdriver 'non-existent'"); assertEquals(0, resp.getResponseCode()); assertNull(resp.getErrorMessage()); - resp = howlDriver.run("desc extended junit_pigstorage partition (b='2010-10-10')"); + resp = hcatDriver.run("desc extended junit_pigstorage partition (b='2010-10-10')"); assertEquals(0, resp.getResponseCode()); assertNull(resp.getErrorMessage()); - PigServer server = new PigServer(ExecType.LOCAL, howlConf.getAllProperties()); + PigServer server = new PigServer(ExecType.LOCAL, hcatConf.getAllProperties()); UDFContext.getUDFContext().setClientSystemProps(); server.registerQuery(" a = load 'junit_pigstorage' using "+HCatLoader.class.getName()+";"); Iterator itr = server.openIterator("a"); @@ -131,26 +131,26 @@ } assertEquals(0,stream.available()); stream.close(); - howlDriver.run("drop table junit_pigstorage"); + hcatDriver.run("drop table junit_pigstorage"); } public void testDelim() throws MetaException, TException, UnknownTableException, NoSuchObjectException, InvalidOperationException, IOException, CommandNeedRetryException{ - howlDriver.run("drop table junit_pigstorage_delim"); + hcatDriver.run("drop table junit_pigstorage_delim"); CommandProcessorResponse resp; String createTable = "create table junit_pigstorage_delim (a string) partitioned by (b string) stored as RCFILE"; - resp = howlDriver.run(createTable); + resp = hcatDriver.run(createTable); assertEquals(0, resp.getResponseCode()); assertNull(resp.getErrorMessage()); - resp = howlDriver.run("alter table junit_pigstorage_delim add partition (b='2010-10-10')"); + resp = hcatDriver.run("alter table junit_pigstorage_delim add partition (b='2010-10-10')"); assertEquals(0, resp.getResponseCode()); assertNull(resp.getErrorMessage()); - resp = howlDriver.run("alter table junit_pigstorage_delim partition (b='2010-10-10') set fileformat inputformat '" + RCFileInputFormat.class.getName() + resp = hcatDriver.run("alter table junit_pigstorage_delim partition (b='2010-10-10') set fileformat inputformat '" + RCFileInputFormat.class.getName() +"' outputformat '"+RCFileOutputFormat.class.getName()+"' inputdriver '"+MyPigStorageDriver.class.getName()+"' outputdriver 'non-existent'"); Partition part = msc.getPartition(MetaStoreUtils.DEFAULT_DATABASE_NAME, "junit_pigstorage_delim", "b=2010-10-10"); @@ -159,7 +159,7 @@ msc.alter_partition(MetaStoreUtils.DEFAULT_DATABASE_NAME, "junit_pigstorage_delim", part); - PigServer server = new PigServer(ExecType.LOCAL, howlConf.getAllProperties()); + PigServer server = new PigServer(ExecType.LOCAL, hcatConf.getAllProperties()); UDFContext.getUDFContext().setClientSystemProps(); server.registerQuery(" a = load 'junit_pigstorage_delim' using "+HCatLoader.class.getName()+";"); try{ Index: src/test/org/apache/hcatalog/pig/TestHCatStorer.java =================================================================== --- src/test/org/apache/hcatalog/pig/TestHCatStorer.java (revision 1103878) +++ src/test/org/apache/hcatalog/pig/TestHCatStorer.java (working copy) @@ -78,7 +78,7 @@ // } // // MiniCluster.deleteFile(cluster, fileName); -// MiniCluster.createInputFile(cluster, fileName, new String[]{"test\t{([a#haddop,b#pig])}","data\t{([b#hive,a#howl])}"}); +// MiniCluster.createInputFile(cluster, fileName, new String[]{"test\t{([a#haddop,b#pig])}","data\t{([b#hive,a#hcat])}"}); // // PigServer server = new PigServer(ExecType.LOCAL, props); // UDFContext.getUDFContext().setClientSystemProps(); @@ -469,8 +469,8 @@ } MiniCluster.deleteFile(cluster, fileName); - MiniCluster.createInputFile(cluster, fileName, new String[]{"zookeeper\t(2)\t{(pig)}\t{(pnuts,hdfs)}\t{(hadoop),(howl)}", - "chubby\t(2)\t{(sawzall)}\t{(bigtable,gfs)}\t{(mapreduce),(howl)}"}); + MiniCluster.createInputFile(cluster, fileName, new String[]{"zookeeper\t(2)\t{(pig)}\t{(pnuts,hdfs)}\t{(hadoop),(hcat)}", + "chubby\t(2)\t{(sawzall)}\t{(bigtable,gfs)}\t{(mapreduce),(hcat)}"}); PigServer server = new PigServer(ExecType.LOCAL, props); UDFContext.getUDFContext().setClientSystemProps(); @@ -489,8 +489,8 @@ driver.getResults(res); driver.run("drop table junit_unparted"); Iterator itr = res.iterator(); - assertEquals("zookeeper\t{\"a1\":2}\t[\"pig\"]\t[{\"s1\":\"pnuts\",\"s2\":\"hdfs\"}]\t[{\"s3\":\"hadoop\"},{\"s3\":\"howl\"}]", itr.next()); - assertEquals("chubby\t{\"a1\":2}\t[\"sawzall\"]\t[{\"s1\":\"bigtable\",\"s2\":\"gfs\"}]\t[{\"s3\":\"mapreduce\"},{\"s3\":\"howl\"}]",itr.next()); + assertEquals("zookeeper\t{\"a1\":2}\t[\"pig\"]\t[{\"s1\":\"pnuts\",\"s2\":\"hdfs\"}]\t[{\"s3\":\"hadoop\"},{\"s3\":\"hcat\"}]", itr.next()); + assertEquals("chubby\t{\"a1\":2}\t[\"sawzall\"]\t[{\"s1\":\"bigtable\",\"s2\":\"gfs\"}]\t[{\"s3\":\"mapreduce\"},{\"s3\":\"hcat\"}]",itr.next()); assertFalse(itr.hasNext()); } @@ -509,7 +509,7 @@ int LOOP_SIZE = 3; String[] input = new String[LOOP_SIZE*LOOP_SIZE]; for(int i = 0; i < LOOP_SIZE*LOOP_SIZE; i++) { - input[i] = i + "\t" + i * 2.1f +"\t"+ i*1.1d + "\t" + i * 2L +"\t"+"lets howl"; + input[i] = i + "\t" + i * 2.1f +"\t"+ i*1.1d + "\t" + i * 2L +"\t"+"lets hcat"; } MiniCluster.createInputFile(cluster, fileName, input); Index: src/test/org/apache/hcatalog/rcfile/TestRCFileOutputStorageDriver.java =================================================================== --- src/test/org/apache/hcatalog/rcfile/TestRCFileOutputStorageDriver.java (revision 1103878) +++ src/test/org/apache/hcatalog/rcfile/TestRCFileOutputStorageDriver.java (working copy) @@ -60,7 +60,7 @@ bytesWritable.set(i, cu); } - //Convert byte array to HowlRecord using isd, convert howlrecord back to byte array + //Convert byte array to HCatRecord using isd, convert hcatrecord back to byte array //using osd, compare the two arrays HCatRecord record = isd.convertToHCatRecord(null, bytesWritable); @@ -77,7 +77,7 @@ private byte[][] buildBytesArray() throws UnsupportedEncodingException { byte[][] bytes = {"123".getBytes("UTF-8"), "456".getBytes("UTF-8"), "789".getBytes("UTF-8"), "1000".getBytes("UTF-8"), - "5.3".getBytes("UTF-8"), "howl and hadoop".getBytes("UTF-8"), + "5.3".getBytes("UTF-8"), "hcat and hadoop".getBytes("UTF-8"), new byte[0], "\\N".getBytes("UTF-8") }; return bytes; } Index: src/test/org/apache/hcatalog/mapreduce/TestHCatPartitioned.java =================================================================== --- src/test/org/apache/hcatalog/mapreduce/TestHCatPartitioned.java (revision 1103878) +++ src/test/org/apache/hcatalog/mapreduce/TestHCatPartitioned.java (working copy) @@ -42,7 +42,7 @@ @Override protected void initialize() throws Exception { - tableName = "testHowlPartitionedTable"; + tableName = "testHCatPartitionedTable"; writeRecords = new ArrayList(); for(int i = 0;i < 20;i++) { @@ -75,7 +75,7 @@ } - public void testHowlPartitionedTable() throws Exception { + public void testHCatPartitionedTable() throws Exception { Map partitionMap = new HashMap(); partitionMap.put("part1", "p1value1"); @@ -297,7 +297,7 @@ runMRRead(70); } - //Test that data inserted through howloutputformat is readable from hive + //Test that data inserted through hcatoutputformat is readable from hive private void hiveReadTest() throws Exception { String query = "select * from " + tableName; Index: src/test/org/apache/hcatalog/mapreduce/HCatMapReduceTest.java =================================================================== --- src/test/org/apache/hcatalog/mapreduce/HCatMapReduceTest.java (revision 1103878) +++ src/test/org/apache/hcatalog/mapreduce/HCatMapReduceTest.java (working copy) @@ -66,13 +66,13 @@ import org.apache.hcatalog.rcfile.RCFileOutputDriver; /** - * Test for HowlOutputFormat. Writes a partition using HowlOutputFormat and reads + * Test for HCatOutputFormat. Writes a partition using HCatOutputFormat and reads * it back using HCatInputFormat, checks the column values and counts. */ public abstract class HCatMapReduceTest extends TestCase { protected String dbName = "default"; - protected String tableName = "testHowlMapReduceTable"; + protected String tableName = "testHCatMapReduceTable"; protected String inputFormat = RCFileInputFormat.class.getName(); protected String outputFormat = RCFileOutputFormat.class.getName(); @@ -249,14 +249,14 @@ MapCreate.writeCount = 0; Configuration conf = new Configuration(); - Job job = new Job(conf, "howl mapreduce write test"); + Job job = new Job(conf, "hcat mapreduce write test"); job.setJarByClass(this.getClass()); job.setMapperClass(HCatMapReduceTest.MapCreate.class); // input/output settings job.setInputFormatClass(TextInputFormat.class); - Path path = new Path(fs.getWorkingDirectory(), "mapred/testHowlMapReduceInput"); + Path path = new Path(fs.getWorkingDirectory(), "mapred/testHCatMapReduceInput"); createInputFile(path, writeCount); TextInputFormat.setInputPaths(job, path); @@ -273,7 +273,7 @@ HCatOutputFormat.setSchema(job, new HCatSchema(partitionColumns)); - //new HowlOutputCommitter(null).setupJob(job); + //new HCatOutputCommitter(null).setupJob(job); job.waitForCompletion(true); new HCatOutputCommitter(null).cleanupJob(job); Assert.assertEquals(writeCount, MapCreate.writeCount); @@ -289,7 +289,7 @@ readRecords.clear(); Configuration conf = new Configuration(); - Job job = new Job(conf, "howl mapreduce read test"); + Job job = new Job(conf, "hcat mapreduce read test"); job.setJarByClass(this.getClass()); job.setMapperClass(HCatMapReduceTest.MapRead.class); @@ -306,7 +306,7 @@ job.setNumReduceTasks(0); - Path path = new Path(fs.getWorkingDirectory(), "mapred/testHowlMapReduceOutput"); + Path path = new Path(fs.getWorkingDirectory(), "mapred/testHCatMapReduceOutput"); if( fs.exists(path) ) { fs.delete(path, true); } @@ -323,7 +323,7 @@ protected HCatSchema getTableSchema() throws Exception { Configuration conf = new Configuration(); - Job job = new Job(conf, "howl mapreduce read schema test"); + Job job = new Job(conf, "hcat mapreduce read schema test"); job.setJarByClass(this.getClass()); // input/output settings Index: src/test/org/apache/hcatalog/mapreduce/TestHCatHiveCompatibility.java =================================================================== --- src/test/org/apache/hcatalog/mapreduce/TestHCatHiveCompatibility.java (revision 1103878) +++ src/test/org/apache/hcatalog/mapreduce/TestHCatHiveCompatibility.java (working copy) @@ -89,7 +89,7 @@ throw new IOException("Failed to create table."); } - // assert that the table created has no howl instrumentation, and that we're still able to read it. + // assert that the table created has no hcat instrumentation, and that we're still able to read it. Table table = client.getTable("default", "junit_unparted_noisd"); assertFalse(table.getParameters().containsKey(HCatConstants.HCAT_ISD_CLASS)); assertTrue(table.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS)); @@ -113,7 +113,7 @@ assertFalse(itr.hasNext()); assertEquals(11, i); - // assert that the table created still has no howl instrumentation + // assert that the table created still has no hcat instrumentation Table table2 = client.getTable("default", "junit_unparted_noisd"); assertFalse(table2.getParameters().containsKey(HCatConstants.HCAT_ISD_CLASS)); assertTrue(table2.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS)); @@ -130,7 +130,7 @@ throw new IOException("Failed to create table."); } - // assert that the table created has no howl instrumentation, and that we're still able to read it. + // assert that the table created has no hcat instrumentation, and that we're still able to read it. Table table = client.getTable("default", "junit_parted_noisd"); assertFalse(table.getParameters().containsKey(HCatConstants.HCAT_ISD_CLASS)); @@ -156,12 +156,12 @@ assertFalse(itr.hasNext()); assertEquals(11, i); - // assert that the table created still has no howl instrumentation + // assert that the table created still has no hcat instrumentation Table table2 = client.getTable("default", "junit_parted_noisd"); assertFalse(table2.getParameters().containsKey(HCatConstants.HCAT_ISD_CLASS)); assertTrue(table2.getSd().getInputFormat().equals(HCatConstants.HIVE_RCFILE_IF_CLASS)); - // assert that there is one partition present, and it had howl instrumentation inserted when it was created. + // assert that there is one partition present, and it had hcat instrumentation inserted when it was created. Partition ptn = client.getPartition("default", "junit_parted_noisd", Arrays.asList("42")); assertNotNull(ptn); Index: src/test/org/apache/hcatalog/mapreduce/TestHCatOutputFormat.java =================================================================== --- src/test/org/apache/hcatalog/mapreduce/TestHCatOutputFormat.java (revision 1103878) +++ src/test/org/apache/hcatalog/mapreduce/TestHCatOutputFormat.java (working copy) @@ -53,8 +53,8 @@ private HiveMetaStoreClient client; private HiveConf hiveConf; - private static final String dbName = "howlOutputFormatTestDB"; - private static final String tblName = "howlOutputFormatTestTable"; + private static final String dbName = "hcatOutputFormatTestDB"; + private static final String tblName = "hcatOutputFormatTestTable"; @Override protected void setUp() throws Exception { Index: src/test/org/apache/hcatalog/mapreduce/TestHCatNonPartitioned.java =================================================================== --- src/test/org/apache/hcatalog/mapreduce/TestHCatNonPartitioned.java (revision 1103878) +++ src/test/org/apache/hcatalog/mapreduce/TestHCatNonPartitioned.java (working copy) @@ -42,7 +42,7 @@ protected void initialize() throws HCatException { dbName = null; //test if null dbName works ("default" is used) - tableName = "testHowlNonPartitionedTable"; + tableName = "testHCatNonPartitionedTable"; writeRecords = new ArrayList(); @@ -75,7 +75,7 @@ } - public void testHowlNonPartitionedTable() throws Exception { + public void testHCatNonPartitionedTable() throws Exception { Map partitionMap = new HashMap(); runMRCreate(null, partitionColumns, writeRecords, 10); @@ -113,7 +113,7 @@ hiveReadTest(); } - //Test that data inserted through howloutputformat is readable from hive + //Test that data inserted through hcatoutputformat is readable from hive private void hiveReadTest() throws Exception { String query = "select * from " + tableName; Index: src/test/org/apache/hcatalog/cli/TestPermsGrp.java =================================================================== --- src/test/org/apache/hcatalog/cli/TestPermsGrp.java (revision 1103878) +++ src/test/org/apache/hcatalog/cli/TestPermsGrp.java (working copy) @@ -52,7 +52,7 @@ private boolean isServerRunning = false; private static final String msPort = "20101"; - private HiveConf howlConf; + private HiveConf hcatConf; private Warehouse clientWH; private Thread t; private HiveMetaStoreClient msc; @@ -87,17 +87,17 @@ securityManager = System.getSecurityManager(); System.setSecurityManager(new NoExitSecurityManager()); - howlConf = new HiveConf(this.getClass()); - howlConf.set("hive.metastore.local", "false"); - howlConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + msPort); - howlConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTRETRIES, 3); + hcatConf = new HiveConf(this.getClass()); + hcatConf.set("hive.metastore.local", "false"); + hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + msPort); + hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTRETRIES, 3); - howlConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); - howlConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - howlConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - howlConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - clientWH = new Warehouse(howlConf); - msc = new HiveMetaStoreClient(howlConf,null); + hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); + hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); + hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + clientWH = new Warehouse(hcatConf); + msc = new HiveMetaStoreClient(hcatConf,null); System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); } @@ -126,12 +126,12 @@ assertEquals(((ExitException)e).getStatus(), 0); } dfsPath = clientWH.getDefaultTablePath(dbName, tblName); - assertTrue(dfsPath.getFileSystem(howlConf).getFileStatus(dfsPath).getPermission().equals(FsPermission.valueOf("drwx-wx---"))); + assertTrue(dfsPath.getFileSystem(hcatConf).getFileStatus(dfsPath).getPermission().equals(FsPermission.valueOf("drwx-wx---"))); cleanupTbl(dbName, tblName, typeName); // User specified perms in invalid format. - howlConf.set(HCatConstants.HCAT_PERMS, "rwx"); + hcatConf.set(HCatConstants.HCAT_PERMS, "rwx"); // make sure create table fails. try{ HCatCli.main(new String[]{"-e","create table simptbl (name string) stored as RCFILE", "-p","rwx"}); @@ -142,7 +142,7 @@ // No physical dir gets created. dfsPath = clientWH.getDefaultTablePath(MetaStoreUtils.DEFAULT_DATABASE_NAME,tblName); try{ - dfsPath.getFileSystem(howlConf).getFileStatus(dfsPath); + dfsPath.getFileSystem(hcatConf).getFileStatus(dfsPath); assert false; } catch(Exception fnfe){ assertTrue(fnfe instanceof FileNotFoundException); @@ -158,8 +158,8 @@ } // test for invalid group name - howlConf.set(HCatConstants.HCAT_PERMS, "drw-rw-rw-"); - howlConf.set(HCatConstants.HCAT_GROUP, "THIS_CANNOT_BE_A_VALID_GRP_NAME_EVER"); + hcatConf.set(HCatConstants.HCAT_PERMS, "drw-rw-rw-"); + hcatConf.set(HCatConstants.HCAT_GROUP, "THIS_CANNOT_BE_A_VALID_GRP_NAME_EVER"); try{ // create table must fail. @@ -179,7 +179,7 @@ } try{ // neither dir should get created. - dfsPath.getFileSystem(howlConf).getFileStatus(dfsPath); + dfsPath.getFileSystem(hcatConf).getFileStatus(dfsPath); assert false; } catch(Exception e){ assertTrue(e instanceof FileNotFoundException); Index: src/test/org/apache/hcatalog/cli/TestUseDatabase.java =================================================================== --- src/test/org/apache/hcatalog/cli/TestUseDatabase.java (revision 1103878) +++ src/test/org/apache/hcatalog/cli/TestUseDatabase.java (working copy) @@ -33,19 +33,19 @@ /* Unit test for GitHub Howl issue #3 */ public class TestUseDatabase extends TestCase{ - private Driver howlDriver; + private Driver hcatDriver; @Override protected void setUp() throws Exception { - HiveConf howlConf = new HiveConf(this.getClass()); - howlConf.set(ConfVars.PREEXECHOOKS.varname, ""); - howlConf.set(ConfVars.POSTEXECHOOKS.varname, ""); - howlConf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + HiveConf hcatConf = new HiveConf(this.getClass()); + hcatConf.set(ConfVars.PREEXECHOOKS.varname, ""); + hcatConf.set(ConfVars.POSTEXECHOOKS.varname, ""); + hcatConf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - howlConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); - howlDriver = new Driver(howlConf); - SessionState.start(new CliSessionState(howlConf)); + hcatConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); + hcatDriver = new Driver(hcatConf); + SessionState.start(new CliSessionState(hcatConf)); } String query; @@ -54,23 +54,23 @@ public void testAlterTablePass() throws IOException, CommandNeedRetryException{ - howlDriver.run("create database " + dbName); - howlDriver.run("use " + dbName); - howlDriver.run("create table " + tblName + " (a int) partitioned by (b string) stored as RCFILE"); + hcatDriver.run("create database " + dbName); + hcatDriver.run("use " + dbName); + hcatDriver.run("create table " + tblName + " (a int) partitioned by (b string) stored as RCFILE"); CommandProcessorResponse response; - response = howlDriver.run("alter table " + tblName + " add partition (b='2') location '/tmp'"); + response = hcatDriver.run("alter table " + tblName + " add partition (b='2') location '/tmp'"); assertEquals(0, response.getResponseCode()); assertNull(response.getErrorMessage()); - response = howlDriver.run("alter table " + tblName + " set fileformat INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " + + response = hcatDriver.run("alter table " + tblName + " set fileformat INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " + "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver'"); assertEquals(0, response.getResponseCode()); assertNull(response.getErrorMessage()); - howlDriver.run("drop table " + tblName); - howlDriver.run("drop database " + dbName); + hcatDriver.run("drop table " + tblName); + hcatDriver.run("drop database " + dbName); } } Index: src/test/org/apache/hcatalog/cli/TestSemanticAnalysis.java =================================================================== --- src/test/org/apache/hcatalog/cli/TestSemanticAnalysis.java (revision 1103878) +++ src/test/org/apache/hcatalog/cli/TestSemanticAnalysis.java (working copy) @@ -50,26 +50,26 @@ public class TestSemanticAnalysis extends TestCase{ - private Driver howlDriver; + private Driver hcatDriver; private Driver hiveDriver; private HiveMetaStoreClient msc; @Override protected void setUp() throws Exception { - HiveConf howlConf = new HiveConf(this.getClass()); - howlConf.set(ConfVars.PREEXECHOOKS.varname, ""); - howlConf.set(ConfVars.POSTEXECHOOKS.varname, ""); - howlConf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + HiveConf hcatConf = new HiveConf(this.getClass()); + hcatConf.set(ConfVars.PREEXECHOOKS.varname, ""); + hcatConf.set(ConfVars.POSTEXECHOOKS.varname, ""); + hcatConf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - HiveConf hiveConf = new HiveConf(howlConf,this.getClass()); + HiveConf hiveConf = new HiveConf(hcatConf,this.getClass()); hiveDriver = new Driver(hiveConf); - howlConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); - howlDriver = new Driver(howlConf); + hcatConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); + hcatDriver = new Driver(hcatConf); - msc = new HiveMetaStoreClient(howlConf); - SessionState.start(new CliSessionState(howlConf)); + msc = new HiveMetaStoreClient(hcatConf); + SessionState.start(new CliSessionState(hcatConf)); } String query; @@ -80,7 +80,7 @@ hiveDriver.run("drop table junit_sem_analysis"); hiveDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as TEXTFILE"); hiveDriver.run("alter table junit_sem_analysis add partition (b='2010-10-10')"); - howlDriver.run("alter table junit_sem_analysis partition (b='2010-10-10') set fileformat RCFILE"); + hcatDriver.run("alter table junit_sem_analysis partition (b='2010-10-10') set fileformat RCFILE"); Table tbl = msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName); assertEquals(TextInputFormat.class.getName(),tbl.getSd().getInputFormat()); @@ -100,7 +100,7 @@ assertEquals(RCFileInputDriver.class.getName(), partParams.get(HCatConstants.HCAT_ISD_CLASS)); assertEquals(RCFileOutputDriver.class.getName(), partParams.get(HCatConstants.HCAT_OSD_CLASS)); - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); } public void testDatabaseOperations() throws MetaException, CommandNeedRetryException { @@ -110,30 +110,30 @@ String testDb2 = "testdatabaseoperatons2"; if (dbs.contains(testDb1.toLowerCase())){ - assertEquals(0,howlDriver.run("drop database "+testDb1).getResponseCode()); + assertEquals(0,hcatDriver.run("drop database "+testDb1).getResponseCode()); } if (dbs.contains(testDb2.toLowerCase())){ - assertEquals(0,howlDriver.run("drop database "+testDb2).getResponseCode()); + assertEquals(0,hcatDriver.run("drop database "+testDb2).getResponseCode()); } - assertEquals(0,howlDriver.run("create database "+testDb1).getResponseCode()); + assertEquals(0,hcatDriver.run("create database "+testDb1).getResponseCode()); assertTrue(msc.getAllDatabases().contains(testDb1)); - assertEquals(0,howlDriver.run("create database if not exists "+testDb1).getResponseCode()); + assertEquals(0,hcatDriver.run("create database if not exists "+testDb1).getResponseCode()); assertTrue(msc.getAllDatabases().contains(testDb1)); - assertEquals(0,howlDriver.run("create database if not exists "+testDb2).getResponseCode()); + assertEquals(0,hcatDriver.run("create database if not exists "+testDb2).getResponseCode()); assertTrue(msc.getAllDatabases().contains(testDb2)); - assertEquals(0,howlDriver.run("drop database "+testDb1).getResponseCode()); - assertEquals(0,howlDriver.run("drop database "+testDb2).getResponseCode()); + assertEquals(0,hcatDriver.run("drop database "+testDb1).getResponseCode()); + assertEquals(0,hcatDriver.run("drop database "+testDb2).getResponseCode()); assertFalse(msc.getAllDatabases().contains(testDb1)); assertFalse(msc.getAllDatabases().contains(testDb2)); } public void testCreateTableIfNotExists() throws MetaException, TException, NoSuchObjectException, CommandNeedRetryException{ - howlDriver.run("drop table "+tblName); - howlDriver.run("create table junit_sem_analysis (a int) stored as RCFILE"); + hcatDriver.run("drop table "+tblName); + hcatDriver.run("create table junit_sem_analysis (a int) stored as RCFILE"); Table tbl = msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName); List cols = tbl.getSd().getCols(); assertEquals(1, cols.size()); @@ -144,7 +144,7 @@ assertEquals(RCFileInputDriver.class.getName(), tblParams.get(HCatConstants.HCAT_ISD_CLASS)); assertEquals(RCFileOutputDriver.class.getName(), tblParams.get(HCatConstants.HCAT_OSD_CLASS)); - CommandProcessorResponse resp = howlDriver.run("create table if not exists junit_sem_analysis (a int) stored as RCFILE"); + CommandProcessorResponse resp = hcatDriver.run("create table if not exists junit_sem_analysis (a int) stored as RCFILE"); assertEquals(0, resp.getResponseCode()); assertNull(resp.getErrorMessage()); tbl = msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName); @@ -157,50 +157,50 @@ tblParams = tbl.getParameters(); assertEquals(RCFileInputDriver.class.getName(), tblParams.get(HCatConstants.HCAT_ISD_CLASS)); assertEquals(RCFileOutputDriver.class.getName(), tblParams.get(HCatConstants.HCAT_OSD_CLASS)); - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); } public void testAlterTblTouch() throws CommandNeedRetryException{ - howlDriver.run("drop table junit_sem_analysis"); - howlDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"); - CommandProcessorResponse response = howlDriver.run("alter table junit_sem_analysis touch"); + hcatDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"); + CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis touch"); assertEquals(10, response.getResponseCode()); assertTrue(response.getErrorMessage().contains("Operation not supported.")); - howlDriver.run("alter table junit_sem_analysis touch partition (b='12')"); + hcatDriver.run("alter table junit_sem_analysis touch partition (b='12')"); assertEquals(10, response.getResponseCode()); assertTrue(response.getErrorMessage().contains("Operation not supported.")); - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); } public void testChangeColumns() throws CommandNeedRetryException{ - howlDriver.run("drop table junit_sem_analysis"); - howlDriver.run("create table junit_sem_analysis (a int, c string) partitioned by (b string) stored as RCFILE"); - CommandProcessorResponse response = howlDriver.run("alter table junit_sem_analysis change a a1 int"); + hcatDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("create table junit_sem_analysis (a int, c string) partitioned by (b string) stored as RCFILE"); + CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis change a a1 int"); assertEquals(10, response.getResponseCode()); assertTrue(response.getErrorMessage().contains("Operation not supported.")); - response = howlDriver.run("alter table junit_sem_analysis change a a string"); + response = hcatDriver.run("alter table junit_sem_analysis change a a string"); assertEquals(10, response.getResponseCode()); assertTrue(response.getErrorMessage().contains("Operation not supported.")); - response = howlDriver.run("alter table junit_sem_analysis change a a int after c"); + response = hcatDriver.run("alter table junit_sem_analysis change a a int after c"); assertEquals(10, response.getResponseCode()); assertTrue(response.getErrorMessage().contains("Operation not supported.")); - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); } public void testAddReplaceCols() throws IOException, MetaException, TException, NoSuchObjectException, CommandNeedRetryException{ - howlDriver.run("drop table junit_sem_analysis"); - howlDriver.run("create table junit_sem_analysis (a int, c string) partitioned by (b string) stored as RCFILE"); - CommandProcessorResponse response = howlDriver.run("alter table junit_sem_analysis replace columns (a1 tinyint)"); + hcatDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("create table junit_sem_analysis (a int, c string) partitioned by (b string) stored as RCFILE"); + CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis replace columns (a1 tinyint)"); assertEquals(10, response.getResponseCode()); assertTrue(response.getErrorMessage().contains("Operation not supported.")); - response = howlDriver.run("alter table junit_sem_analysis add columns (d tinyint)"); + response = hcatDriver.run("alter table junit_sem_analysis add columns (d tinyint)"); assertEquals(0, response.getResponseCode()); assertNull(response.getErrorMessage()); Table tbl = msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName); @@ -209,23 +209,23 @@ assertTrue(cols.get(0).equals(new FieldSchema("a", "int", "from deserializer"))); assertTrue(cols.get(1).equals(new FieldSchema("c", "string", "from deserializer"))); assertTrue(cols.get(2).equals(new FieldSchema("d", "tinyint", null))); - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); } public void testAlterTblClusteredBy() throws CommandNeedRetryException{ - howlDriver.run("drop table junit_sem_analysis"); - howlDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"); - CommandProcessorResponse response = howlDriver.run("alter table junit_sem_analysis clustered by (a) into 7 buckets"); + hcatDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"); + CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis clustered by (a) into 7 buckets"); assertEquals(10, response.getResponseCode()); assertTrue(response.getErrorMessage().contains("Operation not supported.")); - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); } public void testAlterTableSetFF() throws IOException, MetaException, TException, NoSuchObjectException, CommandNeedRetryException{ - howlDriver.run("drop table junit_sem_analysis"); - howlDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"); + hcatDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"); Table tbl = msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName); assertEquals(RCFileInputFormat.class.getName(),tbl.getSd().getInputFormat()); @@ -235,9 +235,9 @@ assertEquals(RCFileInputDriver.class.getName(), tblParams.get(HCatConstants.HCAT_ISD_CLASS)); assertEquals(RCFileOutputDriver.class.getName(), tblParams.get(HCatConstants.HCAT_OSD_CLASS)); - howlDriver.run("alter table junit_sem_analysis set fileformat INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " + + hcatDriver.run("alter table junit_sem_analysis set fileformat INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " + "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver'"); - howlDriver.run("desc extended junit_sem_analysis"); + hcatDriver.run("desc extended junit_sem_analysis"); tbl = msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName); assertEquals(RCFileInputFormat.class.getName(),tbl.getSd().getInputFormat()); @@ -246,14 +246,14 @@ assertEquals("mydriver", tblParams.get(HCatConstants.HCAT_ISD_CLASS)); assertEquals("yourdriver", tblParams.get(HCatConstants.HCAT_OSD_CLASS)); - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); } public void testAddPartFail() throws CommandNeedRetryException{ hiveDriver.run("drop table junit_sem_analysis"); hiveDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"); - CommandProcessorResponse response = howlDriver.run("alter table junit_sem_analysis add partition (b='2') location '/some/path'"); + CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis add partition (b='2') location '/some/path'"); assertEquals(10, response.getResponseCode()); assertTrue(response.getErrorMessage().contains("FAILED: Error in semantic analysis: Operation not supported. Partitions can be added only in a table created through HCatalog. " + "It seems table junit_sem_analysis was not created through HCatalog.")); @@ -262,39 +262,39 @@ public void testAddPartPass() throws IOException, CommandNeedRetryException{ - howlDriver.run("drop table junit_sem_analysis"); - howlDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"); - CommandProcessorResponse response = howlDriver.run("alter table junit_sem_analysis add partition (b='2') location '/tmp'"); + hcatDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"); + CommandProcessorResponse response = hcatDriver.run("alter table junit_sem_analysis add partition (b='2') location '/tmp'"); assertEquals(0, response.getResponseCode()); assertNull(response.getErrorMessage()); - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); } public void testCTAS() throws CommandNeedRetryException{ - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); query = "create table junit_sem_analysis (a int) as select * from tbl2"; - CommandProcessorResponse response = howlDriver.run(query); + CommandProcessorResponse response = hcatDriver.run(query); assertEquals(10, response.getResponseCode()); assertTrue(response.getErrorMessage().contains("FAILED: Error in semantic analysis: Operation not supported. Create table as Select is not a valid operation.")); - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); } public void testStoredAs() throws CommandNeedRetryException{ - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); query = "create table junit_sem_analysis (a int)"; - CommandProcessorResponse response = howlDriver.run(query); + CommandProcessorResponse response = hcatDriver.run(query); assertEquals(10, response.getResponseCode()); assertTrue(response.getErrorMessage().contains("FAILED: Error in semantic analysis: STORED AS specification is either incomplete or incorrect.")); - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); } public void testAddDriverInfo() throws IOException, MetaException, TException, NoSuchObjectException, CommandNeedRetryException{ - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); query = "create table junit_sem_analysis (a int) partitioned by (b string) stored as " + "INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " + "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver' "; - assertEquals(0,howlDriver.run(query).getResponseCode()); + assertEquals(0,hcatDriver.run(query).getResponseCode()); Table tbl = msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tblName); assertEquals(RCFileInputFormat.class.getName(),tbl.getSd().getInputFormat()); @@ -303,15 +303,15 @@ assertEquals("mydriver", tblParams.get(HCatConstants.HCAT_ISD_CLASS)); assertEquals("yourdriver", tblParams.get(HCatConstants.HCAT_OSD_CLASS)); - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); } public void testInvalidateNonStringPartition() throws IOException, CommandNeedRetryException{ - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); query = "create table junit_sem_analysis (a int) partitioned by (b int) stored as RCFILE"; - CommandProcessorResponse response = howlDriver.run(query); + CommandProcessorResponse response = hcatDriver.run(query); assertEquals(10,response.getResponseCode()); assertEquals("FAILED: Error in semantic analysis: Operation not supported. HCatalog only supports partition columns of type string. For column: b Found type: int", response.getErrorMessage()); @@ -320,10 +320,10 @@ public void testInvalidateSeqFileStoredAs() throws IOException, CommandNeedRetryException{ - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); query = "create table junit_sem_analysis (a int) partitioned by (b string) stored as SEQUENCEFILE"; - CommandProcessorResponse response = howlDriver.run(query); + CommandProcessorResponse response = hcatDriver.run(query); assertEquals(10,response.getResponseCode()); assertEquals("FAILED: Error in semantic analysis: Operation not supported. HCatalog doesn't support Sequence File by default yet. You may specify it through INPUT/OUTPUT storage drivers.", response.getErrorMessage()); @@ -332,10 +332,10 @@ public void testInvalidateTextFileStoredAs() throws IOException, CommandNeedRetryException{ - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); query = "create table junit_sem_analysis (a int) partitioned by (b string) stored as TEXTFILE"; - CommandProcessorResponse response = howlDriver.run(query); + CommandProcessorResponse response = hcatDriver.run(query); assertEquals(10,response.getResponseCode()); assertEquals("FAILED: Error in semantic analysis: Operation not supported. HCatalog doesn't support Text File by default yet. You may specify it through INPUT/OUTPUT storage drivers.", response.getErrorMessage()); @@ -344,10 +344,10 @@ public void testInvalidateClusteredBy() throws IOException, CommandNeedRetryException{ - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); query = "create table junit_sem_analysis (a int) partitioned by (b string) clustered by (a) into 10 buckets stored as TEXTFILE"; - CommandProcessorResponse response = howlDriver.run(query); + CommandProcessorResponse response = hcatDriver.run(query); assertEquals(10,response.getResponseCode()); assertEquals("FAILED: Error in semantic analysis: Operation not supported. HCatalog doesn't allow Clustered By in create table.", response.getErrorMessage()); @@ -360,7 +360,7 @@ hiveDriver.run(query); query = "create table like_table like junit_sem_analysis"; - CommandProcessorResponse response = howlDriver.run(query); + CommandProcessorResponse response = hcatDriver.run(query); assertEquals(10,response.getResponseCode()); assertEquals("FAILED: Error in semantic analysis: Operation not supported. CREATE TABLE LIKE is not supported.", response.getErrorMessage()); } @@ -368,18 +368,18 @@ public void testCTLPass() throws IOException, MetaException, TException, NoSuchObjectException, CommandNeedRetryException{ try{ - howlDriver.run("drop table junit_sem_analysis"); + hcatDriver.run("drop table junit_sem_analysis"); } catch( Exception e){ System.err.println(e.getMessage()); } query = "create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"; - howlDriver.run(query); + hcatDriver.run(query); String likeTbl = "like_table"; - howlDriver.run("drop table "+likeTbl); + hcatDriver.run("drop table "+likeTbl); query = "create table like_table like junit_sem_analysis"; - CommandProcessorResponse resp = howlDriver.run(query); + CommandProcessorResponse resp = hcatDriver.run(query); assertEquals(10, resp.getResponseCode()); assertEquals("FAILED: Error in semantic analysis: Operation not supported. CREATE TABLE LIKE is not supported.", resp.getErrorMessage()); // Table tbl = msc.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, likeTbl); @@ -390,20 +390,20 @@ // assertEquals("org.apache.hadoop.hive.ql.io.RCFileInputFormat",tbl.getSd().getInputFormat()); // assertEquals("org.apache.hadoop.hive.ql.io.RCFileOutputFormat",tbl.getSd().getOutputFormat()); // Map tblParams = tbl.getParameters(); -// assertEquals("org.apache.hadoop.hive.howl.rcfile.RCFileInputStorageDriver", tblParams.get("howl.isd")); -// assertEquals("org.apache.hadoop.hive.howl.rcfile.RCFileOutputStorageDriver", tblParams.get("howl.osd")); +// assertEquals("org.apache.hadoop.hive.hcat.rcfile.RCFileInputStorageDriver", tblParams.get("hcat.isd")); +// assertEquals("org.apache.hadoop.hive.hcat.rcfile.RCFileOutputStorageDriver", tblParams.get("hcat.osd")); // -// howlDriver.run("drop table junit_sem_analysis"); -// howlDriver.run("drop table "+likeTbl); +// hcatDriver.run("drop table junit_sem_analysis"); +// hcatDriver.run("drop table "+likeTbl); } // This test case currently fails, since add partitions don't inherit anything from tables. // public void testAddPartInheritDrivers() throws MetaException, TException, NoSuchObjectException{ // -// howlDriver.run("drop table "+tblName); -// howlDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"); -// howlDriver.run("alter table "+tblName+" add partition (b='2010-10-10')"); +// hcatDriver.run("drop table "+tblName); +// hcatDriver.run("create table junit_sem_analysis (a int) partitioned by (b string) stored as RCFILE"); +// hcatDriver.run("alter table "+tblName+" add partition (b='2010-10-10')"); // // List partVals = new ArrayList(1); // partVals.add("2010-10-10"); Index: src/test/org/apache/hcatalog/data/TestDefaultHCatRecord.java =================================================================== --- src/test/org/apache/hcatalog/data/TestDefaultHCatRecord.java (revision 1103878) +++ src/test/org/apache/hcatalog/data/TestDefaultHCatRecord.java (working copy) @@ -90,7 +90,7 @@ rec_1.add( new Integer(789)); rec_1.add( new Long(1000L)); rec_1.add( new Double(5.3D)); - rec_1.add( new String("howl and hadoop")); + rec_1.add( new String("hcat and hadoop")); rec_1.add( null); rec_1.add( "null"); @@ -102,7 +102,7 @@ rec_2.add( new Integer(789)); rec_2.add( new Long(1000L)); rec_2.add( new Double(5.3D)); - rec_2.add( new String("howl and hadoop")); + rec_2.add( new String("hcat and hadoop")); rec_2.add( null); rec_2.add( "null"); HCatRecord tup_2 = new DefaultHCatRecord(rec_2); @@ -113,14 +113,14 @@ rec_3.add( new Integer(789)); rec_3.add( new Long(1000L)); rec_3.add( new Double(5.3D)); - rec_3.add( new String("howl and hadoop")); + rec_3.add( new String("hcat and hadoop")); rec_3.add( null); List innerList = new ArrayList(); innerList.add(314); innerList.add(007); rec_3.add( innerList); Map map = new HashMap(3); - map.put(new Short("2"), "howl is cool"); + map.put(new Short("2"), "hcat is cool"); map.put(new Short("3"), "is it?"); map.put(new Short("4"), "or is it not?"); rec_3.add(map); @@ -133,12 +133,12 @@ rec_4.add( new Integer(789)); rec_4.add( new Long(1000L)); rec_4.add( new Double(5.3D)); - rec_4.add( new String("howl and hadoop")); + rec_4.add( new String("hcat and hadoop")); rec_4.add( null); rec_4.add( "null"); Map map2 = new HashMap(3); - map2.put(new Short("2"), "howl is cool"); + map2.put(new Short("2"), "hcat is cool"); map2.put(new Short("3"), "is it?"); map2.put(new Short("4"), "or is it not?"); rec_4.add(map2); Index: src/java/org/apache/hcatalog/pig/HCatBaseLoader.java =================================================================== --- src/java/org/apache/hcatalog/pig/HCatBaseLoader.java (revision 1103878) +++ src/java/org/apache/hcatalog/pig/HCatBaseLoader.java (working copy) @@ -95,7 +95,7 @@ // can retrieve it later. storeInUDFContext(signature, PRUNE_PROJECTION_INFO, requiredFieldsInfo); - // Howl will always prune columns based on what we ask of it - so the + // HCat will always prune columns based on what we ask of it - so the // response is true return new RequiredFieldResponse(true); } Index: src/java/org/apache/hcatalog/pig/HCatLoader.java =================================================================== --- src/java/org/apache/hcatalog/pig/HCatLoader.java (revision 1103878) +++ src/java/org/apache/hcatalog/pig/HCatLoader.java (working copy) @@ -40,26 +40,26 @@ import org.apache.pig.impl.util.UDFContext; /** - * Pig {@link LoadFunc} to read data from Howl + * Pig {@link LoadFunc} to read data from HCat */ public class HCatLoader extends HCatBaseLoader { private static final String PARTITION_FILTER = "partition.filter"; // for future use - private HCatInputFormat howlInputFormat = null; + private HCatInputFormat hcatInputFormat = null; private String dbName; private String tableName; - private String howlServerUri; + private String hcatServerUri; private String partitionFilterString; private final PigHCatUtil phutil = new PigHCatUtil(); @Override public InputFormat getInputFormat() throws IOException { - if(howlInputFormat == null) { - howlInputFormat = new HCatInputFormat(); + if(hcatInputFormat == null) { + hcatInputFormat = new HCatInputFormat(); } - return howlInputFormat; + return hcatInputFormat; } @Override @@ -76,30 +76,30 @@ // get partitionFilterString stored in the UDFContext - it would have // been stored there by an earlier call to setPartitionFilter - // call setInput on OwlInputFormat only in the frontend because internally - // it makes calls to the owl server - we don't want these to happen in + // call setInput on HCatInputFormat only in the frontend because internally + // it makes calls to the hcat server - we don't want these to happen in // the backend // in the hadoop front end mapred.task.id property will not be set in // the Configuration if (!HCatUtil.checkJobContextIfRunningFromBackend(job)){ HCatInputFormat.setInput(job, HCatTableInfo.getInputTableInfo( - howlServerUri!=null ? howlServerUri : - (howlServerUri = PigHCatUtil.getHowlServerUri(job)), - PigHCatUtil.getHowlServerPrincipal(job), + hcatServerUri!=null ? hcatServerUri : + (hcatServerUri = PigHCatUtil.getHCatServerUri(job)), + PigHCatUtil.getHCatServerPrincipal(job), dbName, tableName, getPartitionFilterString())); } // Need to also push projections by calling setOutputSchema on - // OwlInputFormat - we have to get the RequiredFields information + // HCatInputFormat - we have to get the RequiredFields information // from the UdfContext, translate it to an Schema and then pass it // The reason we do this here is because setLocation() is called by // Pig runtime at InputFormat.getSplits() and // InputFormat.createRecordReader() time - we are not sure when - // OwlInputFormat needs to know about pruned projections - so doing it - // here will ensure we communicate to OwlInputFormat about pruned + // HCatInputFormat needs to know about pruned projections - so doing it + // here will ensure we communicate to HCatInputFormat about pruned // projections at getSplits() and createRecordReader() time UDFContext udfContext = UDFContext.getUDFContext(); @@ -108,7 +108,7 @@ RequiredFieldList requiredFieldsInfo = (RequiredFieldList)props.get(PRUNE_PROJECTION_INFO); if(requiredFieldsInfo != null) { - // convert to owlschema and pass to OwlInputFormat + // convert to hcatschema and pass to HCatInputFormat try { outputSchema = phutil.getHCatSchema(requiredFieldsInfo.getFields(),signature,this.getClass()); HCatInputFormat.setOutputSchema(job, outputSchema); @@ -118,11 +118,11 @@ } else{ // else - this means pig's optimizer never invoked the pushProjection // method - so we need all fields and hence we should not call the - // setOutputSchema on OwlInputFormat + // setOutputSchema on HCatInputFormat if (HCatUtil.checkJobContextIfRunningFromBackend(job)){ try { - HCatSchema howlTableSchema = (HCatSchema) props.get(HCatConstants.HCAT_TABLE_SCHEMA); - outputSchema = howlTableSchema; + HCatSchema hcatTableSchema = (HCatSchema) props.get(HCatConstants.HCAT_TABLE_SCHEMA); + outputSchema = hcatTableSchema; } catch (Exception e) { throw new IOException(e); } @@ -134,8 +134,8 @@ public String[] getPartitionKeys(String location, Job job) throws IOException { Table table = phutil.getTable(location, - howlServerUri!=null?howlServerUri:PigHCatUtil.getHowlServerUri(job), - PigHCatUtil.getHowlServerPrincipal(job)); + hcatServerUri!=null?hcatServerUri:PigHCatUtil.getHCatServerUri(job), + PigHCatUtil.getHCatServerPrincipal(job)); List tablePartitionKeys = table.getPartitionKeys(); String[] partitionKeys = new String[tablePartitionKeys.size()]; for(int i = 0; i < tablePartitionKeys.size(); i++) { @@ -147,28 +147,28 @@ @Override public ResourceSchema getSchema(String location, Job job) throws IOException { Table table = phutil.getTable(location, - howlServerUri!=null?howlServerUri:PigHCatUtil.getHowlServerUri(job), - PigHCatUtil.getHowlServerPrincipal(job)); - HCatSchema howlTableSchema = HCatUtil.getTableSchemaWithPtnCols(table); + hcatServerUri!=null?hcatServerUri:PigHCatUtil.getHCatServerUri(job), + PigHCatUtil.getHCatServerPrincipal(job)); + HCatSchema hcatTableSchema = HCatUtil.getTableSchemaWithPtnCols(table); try { - PigHCatUtil.validateHowlTableSchemaFollowsPigRules(howlTableSchema); + PigHCatUtil.validateHCatTableSchemaFollowsPigRules(hcatTableSchema); } catch (IOException e){ throw new PigException( - "Table schema incompatible for reading through HowlLoader :" + e.getMessage() - + ";[Table schema was "+ howlTableSchema.toString() +"]" + "Table schema incompatible for reading through HCatLoader :" + e.getMessage() + + ";[Table schema was "+ hcatTableSchema.toString() +"]" ,PigHCatUtil.PIG_EXCEPTION_CODE, e); } - storeInUDFContext(signature, HCatConstants.HCAT_TABLE_SCHEMA, howlTableSchema); - outputSchema = howlTableSchema; - return PigHCatUtil.getResourceSchema(howlTableSchema); + storeInUDFContext(signature, HCatConstants.HCAT_TABLE_SCHEMA, hcatTableSchema); + outputSchema = hcatTableSchema; + return PigHCatUtil.getResourceSchema(hcatTableSchema); } @Override public void setPartitionFilter(Expression partitionFilter) throws IOException { // convert the partition filter expression into a string expected by - // howl and pass it in setLocation() + // hcat and pass it in setLocation() - partitionFilterString = getHowlComparisonString(partitionFilter); + partitionFilterString = getHCatComparisonString(partitionFilter); // store this in the udf context so we can get it later storeInUDFContext(signature, @@ -184,9 +184,9 @@ return partitionFilterString; } - private String getHowlComparisonString(Expression expr) { + private String getHCatComparisonString(Expression expr) { if(expr instanceof BinaryExpression){ - // call getOwlComparisonString on lhs and rhs, and and join the + // call getHCatComparisonString on lhs and rhs, and and join the // results with OpType string // we can just use OpType.toString() on all Expression types except @@ -201,9 +201,9 @@ opStr = expr.getOpType().toString(); } BinaryExpression be = (BinaryExpression)expr; - return "(" + getHowlComparisonString(be.getLhs()) + + return "(" + getHCatComparisonString(be.getLhs()) + opStr + - getHowlComparisonString(be.getRhs()) + ")"; + getHCatComparisonString(be.getRhs()) + ")"; } else { // should be a constant or column return expr.toString(); Index: src/java/org/apache/hcatalog/pig/HCatBaseStorer.java =================================================================== --- src/java/org/apache/hcatalog/pig/HCatBaseStorer.java (revision 1103878) +++ src/java/org/apache/hcatalog/pig/HCatBaseStorer.java (working copy) @@ -62,12 +62,12 @@ /** * */ - protected static final String COMPUTED_OUTPUT_SCHEMA = "howl.output.schema"; + protected static final String COMPUTED_OUTPUT_SCHEMA = "hcat.output.schema"; protected final Map partitions; protected Schema pigSchema; private RecordWriter, HCatRecord> writer; protected HCatSchema computedSchema; - protected static final String PIG_SCHEMA = "howl.pig.store.schema"; + protected static final String PIG_SCHEMA = "hcat.pig.store.schema"; protected String sign; public HCatBaseStorer(String partSpecs, String schema) throws ParseException, FrontendException { @@ -101,7 +101,7 @@ if(pigSchema != null){ if(! Schema.equals(runtimeSchema, pigSchema, false, true) ){ throw new FrontendException("Schema provided in store statement doesn't match with the Schema" + - "returned by Pig run-time. Schema provided in HowlStorer: "+pigSchema.toString()+ " Schema received from Pig runtime: "+runtimeSchema.toString(), PigHCatUtil.PIG_EXCEPTION_CODE); + "returned by Pig run-time. Schema provided in HCatStorer: "+pigSchema.toString()+ " Schema received from Pig runtime: "+runtimeSchema.toString(), PigHCatUtil.PIG_EXCEPTION_CODE); } } else { pigSchema = runtimeSchema; @@ -117,20 +117,20 @@ List fieldSchemas = new ArrayList(pigSchema.size()); for(FieldSchema fSchema : pigSchema.getFields()){ byte type = fSchema.type; - HCatFieldSchema howlFSchema; + HCatFieldSchema hcatFSchema; try { // Find out if we need to throw away the tuple or not. if(type == DataType.BAG && removeTupleFromBag(tableSchema, fSchema)){ List arrFields = new ArrayList(1); - arrFields.add(getHowlFSFromPigFS(fSchema.schema.getField(0).schema.getField(0), tableSchema)); - howlFSchema = new HCatFieldSchema(fSchema.alias, Type.ARRAY, new HCatSchema(arrFields), null); + arrFields.add(getHCatFSFromPigFS(fSchema.schema.getField(0).schema.getField(0), tableSchema)); + hcatFSchema = new HCatFieldSchema(fSchema.alias, Type.ARRAY, new HCatSchema(arrFields), null); } else{ - howlFSchema = getHowlFSFromPigFS(fSchema, tableSchema); + hcatFSchema = getHCatFSFromPigFS(fSchema, tableSchema); } - fieldSchemas.add(howlFSchema); + fieldSchemas.add(hcatFSchema); } catch (HCatException he){ throw new FrontendException(he.getMessage(),PigHCatUtil.PIG_EXCEPTION_CODE,he); } @@ -163,7 +163,7 @@ } - private HCatFieldSchema getHowlFSFromPigFS(FieldSchema fSchema, HCatSchema hcatTblSchema) throws FrontendException, HCatException{ + private HCatFieldSchema getHCatFSFromPigFS(FieldSchema fSchema, HCatSchema hcatTblSchema) throws FrontendException, HCatException{ byte type = fSchema.type; switch(type){ @@ -187,17 +187,17 @@ case DataType.BAG: Schema bagSchema = fSchema.schema; List arrFields = new ArrayList(1); - arrFields.add(getHowlFSFromPigFS(bagSchema.getField(0), hcatTblSchema)); + arrFields.add(getHCatFSFromPigFS(bagSchema.getField(0), hcatTblSchema)); return new HCatFieldSchema(fSchema.alias, Type.ARRAY, new HCatSchema(arrFields), ""); case DataType.TUPLE: List fieldNames = new ArrayList(); - List howlFSs = new ArrayList(); + List hcatFSs = new ArrayList(); for( FieldSchema fieldSchema : fSchema.schema.getFields()){ fieldNames.add( fieldSchema.alias); - howlFSs.add(getHowlFSFromPigFS(fieldSchema, hcatTblSchema)); + hcatFSs.add(getHCatFSFromPigFS(fieldSchema, hcatTblSchema)); } - return new HCatFieldSchema(fSchema.alias, Type.STRUCT, new HCatSchema(howlFSs), ""); + return new HCatFieldSchema(fSchema.alias, Type.STRUCT, new HCatSchema(hcatFSs), ""); case DataType.MAP:{ // Pig's schema contain no type information about map's keys and @@ -258,11 +258,11 @@ } } - private Object getJavaObj(Object pigObj, HCatFieldSchema howlFS) throws ExecException, HCatException{ + private Object getJavaObj(Object pigObj, HCatFieldSchema hcatFS) throws ExecException, HCatException{ // The real work-horse. Spend time and energy in this method if there is - // need to keep HowlStorer lean and go fast. - Type type = howlFS.getType(); + // need to keep HCatStorer lean and go fast. + Type type = hcatFS.getType(); switch(type){ @@ -273,14 +273,14 @@ // // List innerList = new ArrayList(innerTup.size()); // int i = 0; - // for(HowlTypeInfo structFieldTypeInfo : typeInfo.getAllStructFieldTypeInfos()){ + // for(HCatTypeInfo structFieldTypeInfo : typeInfo.getAllStructFieldTypeInfos()){ // innerList.add(getJavaObj(innerTup.get(i++), structFieldTypeInfo)); // } // return innerList; case ARRAY: // Unwrap the bag. DataBag pigBag = (DataBag)pigObj; - HCatFieldSchema tupFS = howlFS.getArrayElementSchema().get(0); + HCatFieldSchema tupFS = hcatFS.getArrayElementSchema().get(0); boolean needTuple = tupFS.getType() == Type.STRUCT; List bagContents = new ArrayList((int)pigBag.size()); Iterator bagItr = pigBag.iterator(); @@ -327,18 +327,18 @@ byte type = pigField.type; String alias = pigField.alias; validateAlias(alias); - HCatFieldSchema howlField = getTableCol(alias, tblSchema); + HCatFieldSchema hcatField = getTableCol(alias, tblSchema); if(DataType.isComplex(type)){ switch(type){ case DataType.MAP: - if(howlField != null){ - if(howlField.getMapKeyType() != Type.STRING){ - throw new FrontendException("Key Type of map must be String "+howlField, PigHCatUtil.PIG_EXCEPTION_CODE); + if(hcatField != null){ + if(hcatField.getMapKeyType() != Type.STRING){ + throw new FrontendException("Key Type of map must be String "+hcatField, PigHCatUtil.PIG_EXCEPTION_CODE); } - if(howlField.getMapValueSchema().get(0).isComplex()){ - throw new FrontendException("Value type of map cannot be complex" + howlField, PigHCatUtil.PIG_EXCEPTION_CODE); + if(hcatField.getMapValueSchema().get(0).isComplex()){ + throw new FrontendException("Value type of map cannot be complex" + hcatField, PigHCatUtil.PIG_EXCEPTION_CODE); } } break; @@ -351,37 +351,37 @@ } validateAlias(innerField.alias); } - if(howlField != null){ + if(hcatField != null){ // Do the same validation for HCatSchema. - HCatFieldSchema arrayFieldScehma = howlField.getArrayElementSchema().get(0); + HCatFieldSchema arrayFieldScehma = hcatField.getArrayElementSchema().get(0); Type hType = arrayFieldScehma.getType(); if(hType == Type.STRUCT){ for(HCatFieldSchema structFieldInBag : arrayFieldScehma.getStructSubSchema().getFields()){ if(structFieldInBag.getType() == Type.STRUCT || structFieldInBag.getType() == Type.ARRAY){ - throw new FrontendException("Nested Complex types not allowed "+ howlField, PigHCatUtil.PIG_EXCEPTION_CODE); + throw new FrontendException("Nested Complex types not allowed "+ hcatField, PigHCatUtil.PIG_EXCEPTION_CODE); } } } if(hType == Type.MAP){ if(arrayFieldScehma.getMapKeyType() != Type.STRING){ - throw new FrontendException("Key Type of map must be String "+howlField, PigHCatUtil.PIG_EXCEPTION_CODE); + throw new FrontendException("Key Type of map must be String "+hcatField, PigHCatUtil.PIG_EXCEPTION_CODE); } if(arrayFieldScehma.getMapValueSchema().get(0).isComplex()){ - throw new FrontendException("Value type of map cannot be complex "+howlField, PigHCatUtil.PIG_EXCEPTION_CODE); + throw new FrontendException("Value type of map cannot be complex "+hcatField, PigHCatUtil.PIG_EXCEPTION_CODE); } } if(hType == Type.ARRAY) { - throw new FrontendException("Arrays cannot contain array within it. "+howlField, PigHCatUtil.PIG_EXCEPTION_CODE); + throw new FrontendException("Arrays cannot contain array within it. "+hcatField, PigHCatUtil.PIG_EXCEPTION_CODE); } } break; case DataType.TUPLE: validateUnNested(pigField.schema); - if(howlField != null){ - for(HCatFieldSchema structFieldSchema : howlField.getStructSubSchema().getFields()){ + if(hcatField != null){ + for(HCatFieldSchema structFieldSchema : hcatField.getStructSubSchema().getFields()){ if(structFieldSchema.isComplex()){ - throw new FrontendException("Nested Complex types are not allowed."+howlField, PigHCatUtil.PIG_EXCEPTION_CODE); + throw new FrontendException("Nested Complex types are not allowed."+hcatField, PigHCatUtil.PIG_EXCEPTION_CODE); } } } @@ -393,15 +393,15 @@ } } - for(HCatFieldSchema howlField : tblSchema.getFields()){ + for(HCatFieldSchema hcatField : tblSchema.getFields()){ // We dont do type promotion/demotion. - Type hType = howlField.getType(); + Type hType = hcatField.getType(); switch(hType){ case SMALLINT: case TINYINT: case BOOLEAN: - throw new FrontendException("Incompatible type found in howl table schema: "+howlField, PigHCatUtil.PIG_EXCEPTION_CODE); + throw new FrontendException("Incompatible type found in hcat table schema: "+hcatField, PigHCatUtil.PIG_EXCEPTION_CODE); } } } @@ -418,9 +418,9 @@ // Finds column by name in HCatSchema, if not found returns null. private HCatFieldSchema getTableCol(String alias, HCatSchema tblSchema){ - for(HCatFieldSchema howlField : tblSchema.getFields()){ - if(howlField.getName().equalsIgnoreCase(alias)){ - return howlField; + for(HCatFieldSchema hcatField : tblSchema.getFields()){ + if(hcatField.getName().equalsIgnoreCase(alias)){ + return hcatField; } } // Its a new column Index: src/java/org/apache/hcatalog/pig/PigHCatUtil.java =================================================================== --- src/java/org/apache/hcatalog/pig/PigHCatUtil.java (revision 1103878) +++ src/java/org/apache/hcatalog/pig/PigHCatUtil.java (working copy) @@ -54,7 +54,7 @@ static final int PIG_EXCEPTION_CODE = 1115; // http://wiki.apache.org/pig/PigErrorHandlingFunctionalSpecification#Error_codes private static final String DEFAULT_DB = MetaStoreUtils.DEFAULT_DATABASE_NAME; - private final Map, Table> howlTableCache = + private final Map, Table> hcatTableCache = new HashMap, Table>(); private static final TupleFactory tupFac = TupleFactory.getInstance(); @@ -62,7 +62,7 @@ static public Pair getDBTableNames(String location) throws IOException { // the location string will be of the form: // . - parse it and - // communicate the information to HowlInputFormat + // communicate the information to HCatInputFormat String[] dbTableNametokens = location.split("\\."); if(dbTableNametokens.length == 1) { @@ -77,12 +77,12 @@ } } - static public String getHowlServerUri(Job job) { + static public String getHCatServerUri(Job job) { return job.getConfiguration().get(HCatConstants.HCAT_METASTORE_URI); } - static public String getHowlServerPrincipal(Job job) { + static public String getHCatServerPrincipal(Job job) { return job.getConfiguration().get(HCatConstants.HCAT_METASTORE_PRINCIPAL); } @@ -120,20 +120,20 @@ Properties props = UDFContext.getUDFContext().getUDFProperties( classForUDFCLookup, new String[] {signature}); - HCatSchema howlTableSchema = (HCatSchema) props.get(HCatConstants.HCAT_TABLE_SCHEMA); + HCatSchema hcatTableSchema = (HCatSchema) props.get(HCatConstants.HCAT_TABLE_SCHEMA); ArrayList fcols = new ArrayList(); for(RequiredField rf: fields) { - fcols.add(howlTableSchema.getFields().get(rf.getIndex())); + fcols.add(hcatTableSchema.getFields().get(rf.getIndex())); } return new HCatSchema(fcols); } - public Table getTable(String location, String howlServerUri, String howlServerPrincipal) throws IOException{ - Pair loc_server = new Pair(location, howlServerUri); - Table howlTable = howlTableCache.get(loc_server); - if(howlTable != null){ - return howlTable; + public Table getTable(String location, String hcatServerUri, String hcatServerPrincipal) throws IOException{ + Pair loc_server = new Pair(location, hcatServerUri); + Table hcatTable = hcatTableCache.get(loc_server); + if(hcatTable != null){ + return hcatTable; } Pair dbTablePair = PigHCatUtil.getDBTableNames(location); @@ -141,21 +141,21 @@ String tableName = dbTablePair.second; Table table = null; try { - client = createHiveMetaClient(howlServerUri, howlServerPrincipal, PigHCatUtil.class); + client = createHiveMetaClient(hcatServerUri, hcatServerPrincipal, PigHCatUtil.class); table = client.getTable(dbName, tableName); } catch (NoSuchObjectException nsoe){ throw new PigException("Table not found : " + nsoe.getMessage(), PIG_EXCEPTION_CODE); // prettier error messages to frontend } catch (Exception e) { throw new IOException(e); } - howlTableCache.put(loc_server, table); + hcatTableCache.put(loc_server, table); return table; } - public static ResourceSchema getResourceSchema(HCatSchema howlSchema) throws IOException { + public static ResourceSchema getResourceSchema(HCatSchema hcatSchema) throws IOException { List rfSchemaList = new ArrayList(); - for (HCatFieldSchema hfs : howlSchema.getFields()){ + for (HCatFieldSchema hfs : hcatSchema.getFields()){ ResourceFieldSchema rfSchema; rfSchema = getResourceSchemaFromFieldSchema(hfs); rfSchemaList.add(rfSchema); @@ -229,7 +229,7 @@ } /** - * @param type owl column type + * @param type hcat column type * @return corresponding pig type * @throws IOException */ @@ -351,8 +351,8 @@ } - public static void validateHowlTableSchemaFollowsPigRules(HCatSchema howlTableSchema) throws IOException { - for (HCatFieldSchema hfs : howlTableSchema.getFields()){ + public static void validateHCatTableSchemaFollowsPigRules(HCatSchema hcatTableSchema) throws IOException { + for (HCatFieldSchema hfs : hcatTableSchema.getFields()){ Type htype = hfs.getType(); if (htype == Type.ARRAY){ validateIsPigCompatibleArrayWithPrimitivesOrSimpleComplexTypes(hfs); Index: src/java/org/apache/hcatalog/pig/drivers/LoadFuncBasedInputDriver.java =================================================================== --- src/java/org/apache/hcatalog/pig/drivers/LoadFuncBasedInputDriver.java (revision 1103878) +++ src/java/org/apache/hcatalog/pig/drivers/LoadFuncBasedInputDriver.java (working copy) @@ -38,9 +38,9 @@ /** - * This is a base class which wraps a Load func in HowlInputStorageDriver. + * This is a base class which wraps a Load func in HCatInputStorageDriver. * If you already have a LoadFunc, then this class along with LoadFuncBasedInputFormat - * is doing all the heavy lifting. For a new Howl Input Storage Driver just extend it + * is doing all the heavy lifting. For a new HCat Input Storage Driver just extend it * and override the initialize(). {@link PigStorageInputDriver} illustrates * that well. */ @@ -57,7 +57,7 @@ throws IOException { List data = ((Tuple)baseValue).getAll(); - List howlRecord = new ArrayList(desiredColNames.size()); + List hcatRecord = new ArrayList(desiredColNames.size()); /* Iterate through columns asked for in output schema, look them up in * original data schema. If found, put it. Else look up in partition columns @@ -66,28 +66,28 @@ */ for(String colName : desiredColNames){ Integer idx = dataSchema.getPosition(colName); - howlRecord.add( idx != null ? data.get(idx) : partVals.get(colName)); + hcatRecord.add( idx != null ? data.get(idx) : partVals.get(colName)); } - return new DefaultHCatRecord(howlRecord); + return new DefaultHCatRecord(hcatRecord); } @Override public InputFormat getInputFormat( - Properties howlProperties) { + Properties hcatProperties) { return inputFormat; } @Override - public void setOriginalSchema(JobContext jobContext, HCatSchema howlSchema) throws IOException { + public void setOriginalSchema(JobContext jobContext, HCatSchema hcatSchema) throws IOException { - dataSchema = howlSchema; + dataSchema = hcatSchema; } @Override - public void setOutputSchema(JobContext jobContext, HCatSchema howlSchema) throws IOException { + public void setOutputSchema(JobContext jobContext, HCatSchema hcatSchema) throws IOException { - desiredColNames = howlSchema.getFieldNames(); + desiredColNames = hcatSchema.getFieldNames(); } @Override Index: src/java/org/apache/hcatalog/pig/HCatStorer.java =================================================================== --- src/java/org/apache/hcatalog/pig/HCatStorer.java (revision 1103878) +++ src/java/org/apache/hcatalog/pig/HCatStorer.java (working copy) @@ -40,7 +40,7 @@ import org.apache.pig.impl.util.UDFContext; /** - * HowlStorer. + * HCatStorer. * */ @@ -75,11 +75,11 @@ String[] userStr = location.split("\\."); HCatTableInfo tblInfo; if(userStr.length == 2) { - tblInfo = HCatTableInfo.getOutputTableInfo(PigHCatUtil.getHowlServerUri(job), - PigHCatUtil.getHowlServerPrincipal(job), userStr[0],userStr[1],partitions); + tblInfo = HCatTableInfo.getOutputTableInfo(PigHCatUtil.getHCatServerUri(job), + PigHCatUtil.getHCatServerPrincipal(job), userStr[0],userStr[1],partitions); } else { - tblInfo = HCatTableInfo.getOutputTableInfo(PigHCatUtil.getHowlServerUri(job), - PigHCatUtil.getHowlServerPrincipal(job), null,userStr[0],partitions); + tblInfo = HCatTableInfo.getOutputTableInfo(PigHCatUtil.getHCatServerUri(job), + PigHCatUtil.getHCatServerPrincipal(job), null,userStr[0],partitions); } @@ -101,13 +101,13 @@ // information passed to HCatOutputFormat was not right throw new PigException(he.getMessage(), PigHCatUtil.PIG_EXCEPTION_CODE, he); } - HCatSchema howlTblSchema = HCatOutputFormat.getTableSchema(job); + HCatSchema hcatTblSchema = HCatOutputFormat.getTableSchema(job); try{ - doSchemaValidations(pigSchema, howlTblSchema); + doSchemaValidations(pigSchema, hcatTblSchema); } catch(HCatException he){ throw new FrontendException(he.getMessage(), PigHCatUtil.PIG_EXCEPTION_CODE, he); } - computedSchema = convertPigSchemaToHCatSchema(pigSchema,howlTblSchema); + computedSchema = convertPigSchemaToHCatSchema(pigSchema,hcatTblSchema); HCatOutputFormat.setSchema(job, computedSchema); p.setProperty(HCatConstants.HCAT_KEY_OUTPUT_INFO, config.get(HCatConstants.HCAT_KEY_OUTPUT_INFO)); if(config.get(HCatConstants.HCAT_KEY_HIVE_CONF) != null){ @@ -134,7 +134,7 @@ @Override public void storeSchema(ResourceSchema schema, String arg1, Job job) throws IOException { if( job.getConfiguration().get("mapred.job.tracker", "").equalsIgnoreCase("local") ) { - //In local mode, mapreduce will not call HowlOutputCommitter.cleanupJob. + //In local mode, mapreduce will not call HCatOutputCommitter.cleanupJob. //Calling it from here so that the partition publish happens. //This call needs to be removed after MAPREDUCE-1447 is fixed. new HCatOutputCommitter(null).cleanupJob(job); Index: src/java/org/apache/hcatalog/rcfile/RCFileInputDriver.java =================================================================== --- src/java/org/apache/hcatalog/rcfile/RCFileInputDriver.java (revision 1103878) +++ src/java/org/apache/hcatalog/rcfile/RCFileInputDriver.java (working copy) @@ -69,7 +69,7 @@ private Map namePosMapping; @Override - public InputFormat getInputFormat(Properties howlProperties) { + public InputFormat getInputFormat(Properties hcatProperties) { return new RCFileMapReduceInputFormat(); } @@ -117,7 +117,7 @@ public HCatRecord convertToHCatRecord(WritableComparable ignored, Writable bytesRefArray) throws IOException { // Deserialize bytesRefArray into struct and then convert that struct to - // HowlRecord. + // HCatRecord. ColumnarStruct struct; try { struct = (ColumnarStruct)serde.deserialize(bytesRefArray); @@ -152,7 +152,7 @@ private Object getTypedObj(Object data, ObjectInspector oi) throws IOException{ // The real work-horse method. We are gobbling up all the laziness benefits - // of Hive-RCFile by deserializing everything and creating crisp HowlRecord + // of Hive-RCFile by deserializing everything and creating crisp HCatRecord // with crisp Java objects inside it. We have to do it because higher layer // may not know how to do it. @@ -200,26 +200,26 @@ } @Override - public void initialize(JobContext context,Properties howlProperties) + public void initialize(JobContext context,Properties hcatProperties) throws IOException { - super.initialize(context, howlProperties); + super.initialize(context, hcatProperties); // Columnar Serde needs to know names and types of columns it needs to read. List fields = HCatUtil.getFieldSchemaList(colsInData); - howlProperties.setProperty(Constants.LIST_COLUMNS,MetaStoreUtils. + hcatProperties.setProperty(Constants.LIST_COLUMNS,MetaStoreUtils. getColumnNamesFromFieldSchema(fields)); - howlProperties.setProperty(Constants.LIST_COLUMN_TYPES, MetaStoreUtils. + hcatProperties.setProperty(Constants.LIST_COLUMN_TYPES, MetaStoreUtils. getColumnTypesFromFieldSchema(fields)); // It seems RCFIle reads and writes nulls differently as compared to default hive. // setting these props to match LazySimpleSerde - howlProperties.setProperty(Constants.SERIALIZATION_NULL_FORMAT, "\\N"); - howlProperties.setProperty(Constants.SERIALIZATION_FORMAT, "1"); + hcatProperties.setProperty(Constants.SERIALIZATION_NULL_FORMAT, "\\N"); + hcatProperties.setProperty(Constants.SERIALIZATION_FORMAT, "1"); try { serde = new ColumnarSerDe(); - serde.initialize(context.getConfiguration(), howlProperties); + serde.initialize(context.getConfiguration(), hcatProperties); oi = (StructObjectInspector) serde.getObjectInspector(); structFields = oi.getAllStructFieldRefs(); Index: src/java/org/apache/hcatalog/mapreduce/HCatInputStorageDriver.java =================================================================== --- src/java/org/apache/hcatalog/mapreduce/HCatInputStorageDriver.java (revision 1103878) +++ src/java/org/apache/hcatalog/mapreduce/HCatInputStorageDriver.java (working copy) @@ -34,8 +34,8 @@ import org.apache.hcatalog.data.HCatRecord; import org.apache.hcatalog.data.schema.HCatSchema; -/** The abstract class to be implemented by underlying storage drivers to enable data access from Howl through - * HowlInputFormat. +/** The abstract class to be implemented by underlying storage drivers to enable data access from HCat through + * HCatInputFormat. */ public abstract class HCatInputStorageDriver { @@ -48,15 +48,15 @@ * @param properties the properties containing parameters required for initialization of InputFormat * @return the InputFormat instance */ - public abstract InputFormat getInputFormat(Properties howlProperties); + public abstract InputFormat getInputFormat(Properties hcatProperties); /** - * Converts to HowlRecord format usable by HowlInputFormat to convert to required valuetype. + * Converts to HCatRecord format usable by HCatInputFormat to convert to required valuetype. * Implementers of StorageDriver should look to overwriting this function so as to convert their - * value type to HowlRecord. Default implementation is provided for StorageDriver implementations - * on top of an underlying InputFormat that already uses HowlRecord as a tuple - * @param value the underlying value to convert to HowlRecord + * value type to HCatRecord. Default implementation is provided for StorageDriver implementations + * on top of an underlying InputFormat that already uses HCatRecord as a tuple + * @param value the underlying value to convert to HCatRecord */ public abstract HCatRecord convertToHCatRecord(WritableComparable baseKey, Writable baseValue) throws IOException; @@ -126,29 +126,29 @@ } /** - * Set the schema of the data as originally published in Howl. The storage driver might validate that this matches with - * the schema it has (like Zebra) or it will use this to create a HowlRecord matching the output schema. + * Set the schema of the data as originally published in HCat. The storage driver might validate that this matches with + * the schema it has (like Zebra) or it will use this to create a HCatRecord matching the output schema. * @param jobContext the job context object - * @param howlSchema the schema published in Howl for this data + * @param hcatSchema the schema published in HCat for this data * @param instantiationState * @throws IOException Signals that an I/O exception has occurred. */ - public abstract void setOriginalSchema(JobContext jobContext, HCatSchema howlSchema) throws IOException; + public abstract void setOriginalSchema(JobContext jobContext, HCatSchema hcatSchema) throws IOException; /** - * Set the consolidated schema for the HowlRecord data returned by the storage driver. All tuples returned by the RecordReader should + * Set the consolidated schema for the HCatRecord data returned by the storage driver. All tuples returned by the RecordReader should * have this schema. Nulls should be inserted for columns not present in the data. * @param jobContext the job context object - * @param howlSchema the schema to use as the consolidated schema + * @param hcatSchema the schema to use as the consolidated schema * @throws IOException Signals that an I/O exception has occurred. */ - public abstract void setOutputSchema(JobContext jobContext, HCatSchema howlSchema) throws IOException; + public abstract void setOutputSchema(JobContext jobContext, HCatSchema hcatSchema) throws IOException; /** * Sets the partition key values for the current partition. The storage driver is passed this so that the storage - * driver can add the partition key values to the output HowlRecord if the partition key values are not present on disk. + * driver can add the partition key values to the output HCatRecord if the partition key values are not present on disk. * @param jobContext the job context object - * @param partitionValues the partition values having a map with partition key name as key and the HowlKeyValue as value + * @param partitionValues the partition values having a map with partition key name as key and the HCatKeyValue as value * @param instantiationState * @throws IOException Signals that an I/O exception has occurred. */ Index: src/java/org/apache/hcatalog/mapreduce/HCatOutputStorageDriver.java =================================================================== --- src/java/org/apache/hcatalog/mapreduce/HCatOutputStorageDriver.java (revision 1103878) +++ src/java/org/apache/hcatalog/mapreduce/HCatOutputStorageDriver.java (working copy) @@ -35,18 +35,18 @@ import org.apache.hcatalog.data.schema.HCatSchema; -/** The abstract class to be implemented by underlying storage drivers to enable data access from Howl through - * HowlOutputFormat. +/** The abstract class to be implemented by underlying storage drivers to enable data access from HCat through + * HCatOutputFormat. */ public abstract class HCatOutputStorageDriver { /** * Initialize the storage driver with specified properties, default implementation does nothing. * @param context the job context object - * @param howlProperties the properties for the storage driver + * @param hcatProperties the properties for the storage driver * @throws IOException Signals that an I/O exception has occurred. */ - public void initialize(JobContext context, Properties howlProperties) throws IOException { + public void initialize(JobContext context, Properties hcatProperties) throws IOException { } /** @@ -81,17 +81,17 @@ public abstract void setPartitionValues(JobContext jobContext, Map partitionValues) throws IOException; /** - * Generate the key for the underlying outputformat. The value given to HowlOutputFormat is passed as the - * argument. The key given to HowlOutputFormat is ignored.. - * @param value the value given to HowlOutputFormat + * Generate the key for the underlying outputformat. The value given to HCatOutputFormat is passed as the + * argument. The key given to HCatOutputFormat is ignored.. + * @param value the value given to HCatOutputFormat * @return a key instance * @throws IOException Signals that an I/O exception has occurred. */ public abstract WritableComparable generateKey(HCatRecord value) throws IOException; /** - * Convert the given HowlRecord value to the actual value type. - * @param value the HowlRecord value to convert + * Convert the given HCatRecord value to the actual value type. + * @param value the HCatRecord value to convert * @return a value instance * @throws IOException Signals that an I/O exception has occurred. */ Index: src/java/org/apache/hcatalog/mapreduce/HCatRecordReader.java =================================================================== --- src/java/org/apache/hcatalog/mapreduce/HCatRecordReader.java (revision 1103878) +++ src/java/org/apache/hcatalog/mapreduce/HCatRecordReader.java (working copy) @@ -26,8 +26,8 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hcatalog.data.HCatRecord; -/** The Howl wrapper for the underlying RecordReader, this ensures that the initialize on - * the underlying record reader is done with the underlying split, not with HowlSplit. +/** The HCat wrapper for the underlying RecordReader, this ensures that the initialize on + * the underlying record reader is done with the underlying split, not with HCatSplit. */ class HCatRecordReader extends RecordReader { @@ -38,7 +38,7 @@ private final HCatInputStorageDriver storageDriver; /** - * Instantiates a new howl record reader. + * Instantiates a new hcat record reader. * @param baseRecordReader the base record reader */ public HCatRecordReader(HCatInputStorageDriver storageDriver, RecordReader baseRecordReader) { Index: src/java/org/apache/hcatalog/mapreduce/HCatBaseInputFormat.java =================================================================== --- src/java/org/apache/hcatalog/mapreduce/HCatBaseInputFormat.java (revision 1103878) +++ src/java/org/apache/hcatalog/mapreduce/HCatBaseInputFormat.java (working copy) @@ -37,11 +37,11 @@ public abstract class HCatBaseInputFormat extends InputFormat { /** - * get the schema for the HowlRecord data returned by HowlInputFormat. + * get the schema for the HCatRecord data returned by HCatInputFormat. * * @param job * the job object - * @param howlSchema + * @param hcatSchema * the schema to use as the consolidated schema * @throws IllegalArgumentException */ @@ -55,7 +55,7 @@ } /** - * Set the schema for the HowlRecord data returned by HowlInputFormat. + * Set the schema for the HCatRecord data returned by HCatInputFormat. * @param job the job object * @param hcatSchema the schema to use as the consolidated schema */ @@ -68,7 +68,7 @@ * Logically split the set of input files for the job. Returns the * underlying InputFormat's splits * @param jobContext the job context object - * @return the splits, an HowlInputSplit wrapper over the storage + * @return the splits, an HCatInputSplit wrapper over the storage * driver InputSplits * @throws IOException or InterruptedException */ @@ -127,11 +127,11 @@ /** * Create the RecordReader for the given InputSplit. Returns the underlying * RecordReader if the required operations are supported and schema matches - * with HowlTable schema. Returns an HowlRecordReader if operations need to - * be implemented in Howl. + * with HCatTable schema. Returns an HCatRecordReader if operations need to + * be implemented in HCat. * @param split the split * @param taskContext the task attempt context - * @return the record reader instance, either an HowlRecordReader(later) or + * @return the record reader instance, either an HCatRecordReader(later) or * the underlying storage driver's RecordReader * @throws IOException or InterruptedException */ @@ -139,12 +139,12 @@ public RecordReader createRecordReader(InputSplit split, TaskAttemptContext taskContext) throws IOException, InterruptedException { - HCatSplit howlSplit = (HCatSplit) split; - PartInfo partitionInfo = howlSplit.getPartitionInfo(); + HCatSplit hcatSplit = (HCatSplit) split; + PartInfo partitionInfo = hcatSplit.getPartitionInfo(); //If running through a Pig job, the JobInfo will not be available in the - //backend process context (since HowlLoader works on a copy of the JobContext and does - //not call HowlInputFormat.setInput in the backend process). + //backend process context (since HCatLoader works on a copy of the JobContext and does + //not call HCatInputFormat.setInput in the backend process). //So this function should NOT attempt to read the JobInfo. HCatInputStorageDriver storageDriver; @@ -155,26 +155,26 @@ } //Pass all required information to the storage driver - initStorageDriver(storageDriver, taskContext, partitionInfo, howlSplit.getTableSchema()); + initStorageDriver(storageDriver, taskContext, partitionInfo, hcatSplit.getTableSchema()); //Get the input format for the storage driver InputFormat inputFormat = storageDriver.getInputFormat(partitionInfo.getInputStorageDriverProperties()); - //Create the underlying input formats record record and an Howl wrapper + //Create the underlying input formats record record and an HCat wrapper RecordReader recordReader = - inputFormat.createRecordReader(howlSplit.getBaseSplit(), taskContext); + inputFormat.createRecordReader(hcatSplit.getBaseSplit(), taskContext); return new HCatRecordReader(storageDriver,recordReader); } /** - * Gets the HowlTable schema for the table specified in the HowlInputFormat.setInput call - * on the specified job context. This information is available only after HowlInputFormat.setInput + * Gets the HCatTable schema for the table specified in the HCatInputFormat.setInput call + * on the specified job context. This information is available only after HCatInputFormat.setInput * has been called for a JobContext. * @param context the context * @return the table schema - * @throws Exception if HowlInputFromat.setInput has not been called for the current context + * @throws Exception if HCatInputFromat.setInput has not been called for the current context */ public static HCatSchema getTableSchema(JobContext context) throws Exception { JobInfo jobInfo = getJobInfo(context); @@ -184,7 +184,7 @@ /** * Gets the JobInfo object by reading the Configuration and deserializing * the string. If JobInfo is not present in the configuration, throws an - * exception since that means HowlInputFormat.setInput has not been called. + * exception since that means HCatInputFormat.setInput has not been called. * @param jobContext the job context * @return the JobInfo object * @throws Exception the exception @@ -192,7 +192,7 @@ private static JobInfo getJobInfo(JobContext jobContext) throws Exception { String jobString = jobContext.getConfiguration().get(HCatConstants.HCAT_KEY_JOB_INFO); if( jobString == null ) { - throw new Exception("job information not found in JobContext. HowlInputFormat.setInput() not called?"); + throw new Exception("job information not found in JobContext. HCatInputFormat.setInput() not called?"); } return (JobInfo) HCatUtil.deserialize(jobString); Index: src/java/org/apache/hcatalog/mapreduce/HCatTableInfo.java =================================================================== --- src/java/org/apache/hcatalog/mapreduce/HCatTableInfo.java (revision 1103878) +++ src/java/org/apache/hcatalog/mapreduce/HCatTableInfo.java (working copy) @@ -25,8 +25,8 @@ /** * - * HCatTableInfo - class to communicate table information to {@link HowlInputFormat} - * and {@link HowlOutputFormat} + * HCatTableInfo - class to communicate table information to {@link HCatInputFormat} + * and {@link HCatOutputFormat} * */ public class HCatTableInfo implements Serializable { @@ -44,9 +44,9 @@ /** The Metadata server uri */ private final String serverUri; - /** If the howl server is configured to work with hadoop security, this + /** If the hcat server is configured to work with hadoop security, this * variable will hold the principal name of the server - this will be used - * in the authentication to the howl server using kerberos + * in the authentication to the hcat server using kerberos */ private final String serverKerberosPrincipal; @@ -67,13 +67,13 @@ private Map partitionValues; /** - * Initializes a new HCatTableInfo instance to be used with {@link HowlInputFormat} + * Initializes a new HCatTableInfo instance to be used with {@link HCatInputFormat} * for reading data from a table. * @param serverUri the Metadata server uri - * @param serverKerberosPrincipal If the howl server is configured to + * @param serverKerberosPrincipal If the hcat server is configured to * work with hadoop security, the kerberos principal name of the server - else null * The principal name should be of the form: - * /_HOST@ like "howl/_HOST@myrealm.com" + * /_HOST@ like "hcat/_HOST@myrealm.com" * The special string _HOST will be replaced automatically with the correct host name * @param dbName the db name * @param tableName the table name @@ -86,13 +86,13 @@ } /** - * Initializes a new HCatTableInfo instance to be used with {@link HowlInputFormat} + * Initializes a new HCatTableInfo instance to be used with {@link HCatInputFormat} * for reading data from a table. * @param serverUri the Metadata server uri - * @param serverKerberosPrincipal If the howl server is configured to + * @param serverKerberosPrincipal If the hcat server is configured to * work with hadoop security, the kerberos principal name of the server - else null * The principal name should be of the form: - * /_HOST@ like "howl/_HOST@myrealm.com" + * /_HOST@ like "hcat/_HOST@myrealm.com" * The special string _HOST will be replaced automatically with the correct host name * @param dbName the db name * @param tableName the table name @@ -115,13 +115,13 @@ this.filter = filter; } /** - * Initializes a new HCatTableInfo instance to be used with {@link HowlOutputFormat} + * Initializes a new HCatTableInfo instance to be used with {@link HCatOutputFormat} * for writing data from a table. * @param serverUri the Metadata server uri - * @param serverKerberosPrincipal If the howl server is configured to + * @param serverKerberosPrincipal If the hcat server is configured to * work with hadoop security, the kerberos principal name of the server - else null * The principal name should be of the form: - * /_HOST@ like "howl/_HOST@myrealm.com" + * /_HOST@ like "hcat/_HOST@myrealm.com" * The special string _HOST will be replaced automatically with the correct host name * @param dbName the db name * @param tableName the table name Index: src/java/org/apache/hcatalog/mapreduce/HCatBaseOutputFormat.java =================================================================== --- src/java/org/apache/hcatalog/mapreduce/HCatBaseOutputFormat.java (revision 1103878) +++ src/java/org/apache/hcatalog/mapreduce/HCatBaseOutputFormat.java (working copy) @@ -37,11 +37,11 @@ public abstract class HCatBaseOutputFormat extends OutputFormat, HCatRecord> { /** - * Gets the table schema for the table specified in the HowlOutputFormat.setOutput call + * Gets the table schema for the table specified in the HCatOutputFormat.setOutput call * on the specified job context. * @param context the context * @return the table schema - * @throws IOException if HowlOutputFromat.setOutput has not been called for the passed context + * @throws IOException if HCatOutputFromat.setOutput has not been called for the passed context */ public static HCatSchema getTableSchema(JobContext context) throws IOException { OutputJobInfo jobInfo = getJobInfo(context); @@ -76,9 +76,9 @@ } /** - * Gets the HowlOuputJobInfo object by reading the Configuration and deserializing + * Gets the HCatOuputJobInfo object by reading the Configuration and deserializing * the string. If JobInfo is not present in the configuration, throws an - * exception since that means HowlOutputFormat.setOutput has not been called. + * exception since that means HCatOutputFormat.setOutput has not been called. * @param jobContext the job context * @return the OutputJobInfo object * @throws IOException the IO exception Index: src/java/org/apache/hcatalog/mapreduce/HCatEximOutputCommitter.java =================================================================== --- src/java/org/apache/hcatalog/mapreduce/HCatEximOutputCommitter.java (revision 1103878) +++ src/java/org/apache/hcatalog/mapreduce/HCatEximOutputCommitter.java (working copy) @@ -53,7 +53,7 @@ @Override public void cleanupJob(JobContext jobContext) throws IOException { - LOG.info("HowlEximOutputCommitter.cleanup invoked; m.o.d : " + + LOG.info("HCatEximOutputCommitter.cleanup invoked; m.o.d : " + jobContext.getConfiguration().get("mapred.output.dir")); if (baseCommitter != null) { LOG.info("baseCommitter.class = " + baseCommitter.getClass().getName()); Index: src/java/org/apache/hcatalog/mapreduce/HCatInputFormat.java =================================================================== --- src/java/org/apache/hcatalog/mapreduce/HCatInputFormat.java (revision 1103878) +++ src/java/org/apache/hcatalog/mapreduce/HCatInputFormat.java (working copy) @@ -22,7 +22,7 @@ import org.apache.hadoop.mapreduce.Job; -/** The InputFormat to use to read data from Howl */ +/** The InputFormat to use to read data from HCat */ public class HCatInputFormat extends HCatBaseInputFormat { /** Index: src/java/org/apache/hcatalog/mapreduce/HCatOutputFormat.java =================================================================== --- src/java/org/apache/hcatalog/mapreduce/HCatOutputFormat.java (revision 1103878) +++ src/java/org/apache/hcatalog/mapreduce/HCatOutputFormat.java (working copy) @@ -62,8 +62,8 @@ import org.apache.hcatalog.data.schema.HCatSchema; import org.apache.thrift.TException; -/** The OutputFormat to use to write data to Howl. The key value is ignored and - * and should be given as null. The value is the HowlRecord to write.*/ +/** The OutputFormat to use to write data to HCat. The key value is ignored and + * and should be given as null. The value is the HCatRecord to write.*/ public class HCatOutputFormat extends HCatBaseOutputFormat { /** The directory under which data is initially written for a non partitioned table */ @@ -147,13 +147,13 @@ if(UserGroupInformation.isSecurityEnabled()){ UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - // check if oozie has set up a howl deleg. token - if so use it + // check if oozie has set up a hcat deleg. token - if so use it TokenSelector tokenSelector = new DelegationTokenSelector(); // TODO: will oozie use a "service" called "oozie" - then instead of // new Text() do new Text("oozie") below - if this change is made also // remember to do: // job.getConfiguration().set(HCAT_KEY_TOKEN_SIGNATURE, "oozie"); - // Also change code in HowlOutputCommitter.cleanupJob() to cancel the + // Also change code in HCatOutputCommitter.cleanupJob() to cancel the // token only if token.service is not "oozie" - remove the condition of // HCAT_KEY_TOKEN_SIGNATURE != null in that code. Token token = tokenSelector.selectToken( @@ -165,9 +165,9 @@ } else { // we did not get token set up by oozie, let's get them ourselves here. - // we essentially get a token per unique Output HowlTableInfo - this is + // we essentially get a token per unique Output HCatTableInfo - this is // done because through Pig, setOutput() method is called multiple times - // We want to only get the token once per unique output HowlTableInfo - + // We want to only get the token once per unique output HCatTableInfo - // we cannot just get one token since in multi-query case (> 1 store in 1 job) // or the case when a single pig script results in > 1 jobs, the single // token will get cancelled by the output committer and the subsequent @@ -178,9 +178,9 @@ // cancel. String tokenSignature = getTokenSignature(outputInfo); if(tokenMap.get(tokenSignature) == null) { - // get delegation tokens from howl server and store them into the "job" - // These will be used in the HowlOutputCommitter to publish partitions to - // howl + // get delegation tokens from hcat server and store them into the "job" + // These will be used in the HCatOutputCommitter to publish partitions to + // hcat // when the JobTracker in Hadoop MapReduce starts supporting renewal of // arbitrary tokens, the renewer should be the principal of the JobTracker String tokenStrForm = client.getDelegationToken(ugi.getUserName()); @@ -211,7 +211,7 @@ } - // a signature string to associate with a HowlTableInfo - essentially + // a signature string to associate with a HCatTableInfo - essentially // a concatenation of dbname, tablename and partition keyvalues. private static String getTokenSignature(HCatTableInfo outputInfo) { StringBuilder result = new StringBuilder(""); @@ -312,7 +312,7 @@ try{ fs.setOwner(workFile, null, tblPathStat.getGroup()); } catch(AccessControlException ace){ - // log the messages before ignoring. Currently, logging is not built in Howl. + // log the messages before ignoring. Currently, logging is not built in HCat. } return rw; } Index: src/java/org/apache/hcatalog/mapreduce/HCatRecordWriter.java =================================================================== --- src/java/org/apache/hcatalog/mapreduce/HCatRecordWriter.java (revision 1103878) +++ src/java/org/apache/hcatalog/mapreduce/HCatRecordWriter.java (working copy) @@ -49,7 +49,7 @@ if(partColsToDel == null){ throw new HCatException("It seems that setSchema() is not called on " + - "HowlOutputFormat. Please make sure that method is called."); + "HCatOutputFormat. Please make sure that method is called."); } this.storageDriver = HCatOutputFormat.getOutputDriverInstance(context, jobInfo); Index: src/java/org/apache/hcatalog/mapreduce/InitializeInput.java =================================================================== --- src/java/org/apache/hcatalog/mapreduce/InitializeInput.java (revision 1103878) +++ src/java/org/apache/hcatalog/mapreduce/InitializeInput.java (working copy) @@ -68,7 +68,7 @@ * Set the input to use for the Job. This queries the metadata server with the specified partition predicates, * gets the matching partitions, puts the information in the configuration object. * @param job the job object - * @param inputInfo the howl table input info + * @param inputInfo the hcat table input info * @throws Exception */ public static void setInput(Job job, HCatTableInfo inputInfo) throws Exception { @@ -111,12 +111,12 @@ partInfoList.add(partInfo); } - JobInfo howlJobInfo = new JobInfo(inputInfo, tableSchema, partInfoList); - inputInfo.setJobInfo(howlJobInfo); + JobInfo hcatJobInfo = new JobInfo(inputInfo, tableSchema, partInfoList); + inputInfo.setJobInfo(hcatJobInfo); job.getConfiguration().set( HCatConstants.HCAT_KEY_JOB_INFO, - HCatUtil.serialize(howlJobInfo) + HCatUtil.serialize(hcatJobInfo) ); } finally { if (client != null ) { @@ -149,7 +149,7 @@ static PartInfo extractPartInfo(StorageDescriptor sd, Map parameters) throws IOException{ HCatSchema schema = HCatUtil.extractSchemaFromStorageDescriptor(sd); String inputStorageDriverClass = null; - Properties howlProperties = new Properties(); + Properties hcatProperties = new Properties(); if (parameters.containsKey(HCatConstants.HCAT_ISD_CLASS)){ inputStorageDriverClass = parameters.get(HCatConstants.HCAT_ISD_CLASS); }else{ @@ -162,10 +162,10 @@ } for (String key : parameters.keySet()){ if (key.startsWith(HCAT_KEY_PREFIX)){ - howlProperties.put(key, parameters.get(key)); + hcatProperties.put(key, parameters.get(key)); } } - return new PartInfo(schema,inputStorageDriverClass, sd.getLocation(), howlProperties); + return new PartInfo(schema,inputStorageDriverClass, sd.getLocation(), hcatProperties); } @@ -195,14 +195,14 @@ } } - Properties howlProperties = new Properties(); + Properties hcatProperties = new Properties(); for (String key : properties.keySet()){ if (key.startsWith(HCAT_KEY_PREFIX)){ - howlProperties.put(key, properties.get(key)); + hcatProperties.put(key, properties.get(key)); } } - return new StorerInfo(inputSDClass, outputSDClass, howlProperties); + return new StorerInfo(inputSDClass, outputSDClass, hcatProperties); } } Index: src/java/org/apache/hcatalog/mapreduce/PartInfo.java =================================================================== --- src/java/org/apache/hcatalog/mapreduce/PartInfo.java (revision 1103878) +++ src/java/org/apache/hcatalog/mapreduce/PartInfo.java (working copy) @@ -35,8 +35,8 @@ /** The information about which input storage driver to use */ private final String inputStorageDriverClass; - /** Howl-specific properties set at the partition */ - private final Properties howlProperties; + /** HCat-specific properties set at the partition */ + private final Properties hcatProperties; /** The data location. */ private final String location; @@ -45,17 +45,17 @@ private Map partitionValues; /** - * Instantiates a new howl partition info. + * Instantiates a new hcat partition info. * @param partitionSchema the partition schema * @param inputStorageDriverClass the input storage driver class name * @param location the location - * @param howlProperties howl-specific properties at the partition + * @param hcatProperties hcat-specific properties at the partition */ - public PartInfo(HCatSchema partitionSchema, String inputStorageDriverClass, String location, Properties howlProperties){ + public PartInfo(HCatSchema partitionSchema, String inputStorageDriverClass, String location, Properties hcatProperties){ this.partitionSchema = partitionSchema; this.inputStorageDriverClass = inputStorageDriverClass; this.location = location; - this.howlProperties = howlProperties; + this.hcatProperties = hcatProperties; } /** @@ -77,11 +77,11 @@ /** - * Gets the value of howlProperties. - * @return the howlProperties + * Gets the value of hcatProperties. + * @return the hcatProperties */ public Properties getInputStorageDriverProperties() { - return howlProperties; + return hcatProperties; } /** Index: src/java/org/apache/hcatalog/mapreduce/JobInfo.java =================================================================== --- src/java/org/apache/hcatalog/mapreduce/JobInfo.java (revision 1103878) +++ src/java/org/apache/hcatalog/mapreduce/JobInfo.java (working copy) @@ -39,15 +39,15 @@ private final List partitions; /** - * Instantiates a new howl job info. + * Instantiates a new hcat job info. * @param tableName the table name * @param tableSchema the table schema * @param partitions the partitions */ - public JobInfo(HCatTableInfo howlTableInfo, HCatSchema tableSchema, + public JobInfo(HCatTableInfo hcatTableInfo, HCatSchema tableSchema, List partitions) { - this.tableName = howlTableInfo.getTableName(); - this.dbName = howlTableInfo.getDatabaseName(); + this.tableName = hcatTableInfo.getTableName(); + this.dbName = hcatTableInfo.getDatabaseName(); this.tableSchema = tableSchema; this.partitions = partitions; } Index: src/java/org/apache/hcatalog/mapreduce/HCatSplit.java =================================================================== --- src/java/org/apache/hcatalog/mapreduce/HCatSplit.java (revision 1103878) +++ src/java/org/apache/hcatalog/mapreduce/HCatSplit.java (working copy) @@ -28,7 +28,7 @@ import org.apache.hcatalog.common.HCatUtil; import org.apache.hcatalog.data.schema.HCatSchema; -/** The HowlSplit wrapper around the InputSplit returned by the underlying InputFormat */ +/** The HCatSplit wrapper around the InputSplit returned by the underlying InputFormat */ class HCatSplit extends InputSplit implements Writable { /** The partition info for the split. */ @@ -37,16 +37,16 @@ /** The split returned by the underlying InputFormat split. */ private InputSplit baseSplit; - /** The schema for the HowlTable */ + /** The schema for the HCatTable */ private HCatSchema tableSchema; /** - * Instantiates a new howl split. + * Instantiates a new hcat split. */ public HCatSplit() { } /** - * Instantiates a new howl split. + * Instantiates a new hcat split. * * @param partitionInfo the partition info * @param baseSplit the base split Index: src/java/org/apache/hcatalog/mapreduce/HCatEximInputFormat.java =================================================================== --- src/java/org/apache/hcatalog/mapreduce/HCatEximInputFormat.java (revision 1103878) +++ src/java/org/apache/hcatalog/mapreduce/HCatEximInputFormat.java (working copy) @@ -39,7 +39,7 @@ import org.apache.hcatalog.data.schema.HCatSchema; import org.apache.hcatalog.data.schema.HCatSchemaUtils; -/** The InputFormat to use to read data from Howl */ +/** The InputFormat to use to read data from HCat */ public class HCatEximInputFormat extends HCatBaseInputFormat { /** @@ -52,7 +52,7 @@ * the job object * @param inputInfo * the table input info - * @return two howl schemas, for the table columns and the partition keys + * @return two hcat schemas, for the table columns and the partition keys * @throws IOException * the exception in communicating with the metadata server */ @@ -92,20 +92,20 @@ }else{ throw new IOException("No input storage driver classname found, cannot read partition"); } - Properties howlProperties = new Properties(); + Properties hcatProperties = new Properties(); for (String key : parameters.keySet()){ if (key.startsWith(InitializeInput.HCAT_KEY_PREFIX)){ - howlProperties.put(key, parameters.get(key)); + hcatProperties.put(key, parameters.get(key)); } } - PartInfo partInfo = new PartInfo(schema, inputStorageDriverClass, location + "/data", howlProperties); + PartInfo partInfo = new PartInfo(schema, inputStorageDriverClass, location + "/data", hcatProperties); partInfoList.add(partInfo); } - JobInfo howlJobInfo = new JobInfo(inputInfo, + JobInfo hcatJobInfo = new JobInfo(inputInfo, HCatUtil.getTableSchemaWithPtnCols(table), partInfoList); job.getConfiguration().set( HCatConstants.HCAT_KEY_JOB_INFO, - HCatUtil.serialize(howlJobInfo)); + HCatUtil.serialize(hcatJobInfo)); List rv = new ArrayList(2); rv.add(HCatSchemaUtils.getHCatSchema(table.getSd().getCols())); rv.add(HCatSchemaUtils.getHCatSchema(partCols)); Index: src/java/org/apache/hcatalog/mapreduce/HCatEximOutputFormat.java =================================================================== --- src/java/org/apache/hcatalog/mapreduce/HCatEximOutputFormat.java (revision 1103878) +++ src/java/org/apache/hcatalog/mapreduce/HCatEximOutputFormat.java (working copy) @@ -54,10 +54,10 @@ import org.apache.hcatalog.rcfile.RCFileOutputDriver; /** - * The OutputFormat to use to write data to Howl without a howl server. This can then - * be imported into a howl instance, or used with a HowlEximInputFormat. As in - * HowlOutputFormat, the key value is ignored and - * and should be given as null. The value is the HowlRecord to write. + * The OutputFormat to use to write data to HCat without a hcat server. This can then + * be imported into a hcat instance, or used with a HCatEximInputFormat. As in + * HCatOutputFormat, the key value is ignored and + * and should be given as null. The value is the HCatRecord to write. */ public class HCatEximOutputFormat extends HCatBaseOutputFormat { Index: src/java/org/apache/hcatalog/cli/SemanticAnalysis/AlterTableFileFormatHook.java =================================================================== --- src/java/org/apache/hcatalog/cli/SemanticAnalysis/AlterTableFileFormatHook.java (revision 1103878) +++ src/java/org/apache/hcatalog/cli/SemanticAnalysis/AlterTableFileFormatHook.java (working copy) @@ -87,16 +87,16 @@ List> rootTasks) throws SemanticException { Map partSpec = ((DDLWork)rootTasks.get(rootTasks.size()-1).getWork()).getAlterTblDesc().getPartSpec(); - Map howlProps = new HashMap(2); - howlProps.put(HCatConstants.HCAT_ISD_CLASS, inDriver); - howlProps.put(HCatConstants.HCAT_OSD_CLASS, outDriver); + Map hcatProps = new HashMap(2); + hcatProps.put(HCatConstants.HCAT_ISD_CLASS, inDriver); + hcatProps.put(HCatConstants.HCAT_OSD_CLASS, outDriver); try { Hive db = context.getHive(); Table tbl = db.getTable(tableName); if(partSpec == null){ // File format is for table; not for partition. - tbl.getTTable().getParameters().putAll(howlProps); + tbl.getTTable().getParameters().putAll(hcatProps); db.alterTable(tableName, tbl); }else{ Partition part = db.getPartition(tbl,partSpec,false); @@ -104,7 +104,7 @@ if(partParams == null){ partParams = new HashMap(); } - partParams.putAll(howlProps); + partParams.putAll(hcatProps); part.getTPartition().setParameters(partParams); db.alterPartition(tableName, part); } Index: src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java =================================================================== --- src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java (revision 1103878) +++ src/java/org/apache/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java (working copy) @@ -54,7 +54,7 @@ this.ast = ast; switch (ast.getToken().getType()) { - // Howl wants to intercept following tokens and special-handle them. + // HCat wants to intercept following tokens and special-handle them. case HiveParser.TOK_CREATETABLE: hook = new CreateTableHook(); return hook.preAnalyze(context, ast); @@ -63,13 +63,13 @@ hook = new CreateDatabaseHook(); return hook.preAnalyze(context, ast); - // DML commands used in Howl where we use the same implementation as default Hive. + // DML commands used in HCat where we use the same implementation as default Hive. case HiveParser.TOK_SHOWDATABASES: case HiveParser.TOK_DROPDATABASE: case HiveParser.TOK_SWITCHDATABASE: return ast; - // Howl will allow these operations to be performed since they are DDL statements. + // HCat will allow these operations to be performed since they are DDL statements. case HiveParser.TOK_DROPTABLE: case HiveParser.TOK_DESCTABLE: case HiveParser.TOK_ALTERTABLE_ADDCOLS: Index: src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java =================================================================== --- src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java (revision 1103878) +++ src/java/org/apache/hcatalog/cli/SemanticAnalysis/CreateTableHook.java (working copy) @@ -100,7 +100,7 @@ // throw new SemanticException(he); // } // if(!(tblProps.containsKey(InitializeInput.HOWL_ISD_CLASS) && tblProps.containsKey(InitializeInput.HOWL_OSD_CLASS))){ -// throw new SemanticException("Operation not supported. Table "+likeTableName+" should have been created through Howl. Seems like its not."); +// throw new SemanticException("Operation not supported. Table "+likeTableName+" should have been created through HCat. Seems like its not."); // } // return ast; } @@ -168,7 +168,7 @@ if(desc == null){ // Desc will be null if its CREATE TABLE LIKE. Desc will be contained - // in CreateTableLikeDesc. Currently, Howl disallows CTLT in pre-hook. + // in CreateTableLikeDesc. Currently, HCat disallows CTLT in pre-hook. // So, desc can never be null. return; } Index: src/java/org/apache/hcatalog/common/ErrorType.java =================================================================== --- src/java/org/apache/hcatalog/common/ErrorType.java (revision 1103878) +++ src/java/org/apache/hcatalog/common/ErrorType.java (working copy) @@ -18,21 +18,21 @@ package org.apache.hcatalog.common; /** - * Enum type representing the various errors throws by Howl. + * Enum type representing the various errors throws by HCat. */ public enum ErrorType { - /* Howl Input Format related errors 1000 - 1999 */ + /* HCat Input Format related errors 1000 - 1999 */ ERROR_DB_INIT (1000, "Error initializing database session"), ERROR_EXCEED_MAXPART (1001, "Query result exceeded maximum number of partitions allowed"), - /* Howl Output Format related errors 2000 - 2999 */ + /* HCat Output Format related errors 2000 - 2999 */ ERROR_INVALID_TABLE (2000, "Table specified does not exist"), ERROR_SET_OUTPUT (2001, "Error setting output information"), ERROR_DUPLICATE_PARTITION (2002, "Partition already present with given partition key values"), ERROR_NON_EMPTY_TABLE (2003, "Non-partitioned table already contains data"), - ERROR_NOT_INITIALIZED (2004, "HowlOutputFormat not initialized, setOutput has to be called"), + ERROR_NOT_INITIALIZED (2004, "HCatOutputFormat not initialized, setOutput has to be called"), ERROR_INIT_STORAGE_DRIVER (2005, "Error initializing output storage driver instance"), ERROR_PUBLISHING_PARTITION (2006, "Error adding partition to metastore"), ERROR_SCHEMA_COLUMN_MISMATCH (2007, "Invalid column position in partition schema"), @@ -48,7 +48,7 @@ /* Miscellaneous errors, range 9000 - 9998 */ ERROR_UNIMPLEMENTED (9000, "Functionality currently unimplemented"), - ERROR_INTERNAL_EXCEPTION (9001, "Exception occurred while processing Howl request"); + ERROR_INTERNAL_EXCEPTION (9001, "Exception occurred while processing HCat request"); /** The error code. */ private int errorCode; Index: src/java/org/apache/hcatalog/common/HCatConstants.java =================================================================== --- src/java/org/apache/hcatalog/common/HCatConstants.java (revision 1103878) +++ src/java/org/apache/hcatalog/common/HCatConstants.java (working copy) @@ -56,7 +56,7 @@ // IMPORTANT IMPORTANT IMPORTANT!!!!! //The keys used to store info into the job Configuration. - //If any new keys are added, the HowlStorer needs to be updated. The HowlStorer + //If any new keys are added, the HCatStorer needs to be updated. The HCatStorer //updates the job configuration in the backend to insert these keys to avoid //having to call setOutput from the backend (which would cause a metastore call //from the map jobs) Index: src/java/org/apache/hcatalog/common/HCatUtil.java =================================================================== --- src/java/org/apache/hcatalog/common/HCatUtil.java (revision 1103878) +++ src/java/org/apache/hcatalog/common/HCatUtil.java (working copy) @@ -131,12 +131,12 @@ return schema; } - public static List getFieldSchemaList(List howlFields) { - if(howlFields == null) { + public static List getFieldSchemaList(List hcatFields) { + if(hcatFields == null) { return null; } else { List result = new ArrayList(); - for(HCatFieldSchema f: howlFields) { + for(HCatFieldSchema f: hcatFields) { result.add(HCatSchemaUtils.getFieldSchema(f)); } return result; Index: src/java/org/apache/hcatalog/common/HCatException.java =================================================================== --- src/java/org/apache/hcatalog/common/HCatException.java (revision 1103878) +++ src/java/org/apache/hcatalog/common/HCatException.java (working copy) @@ -20,7 +20,7 @@ import java.io.IOException; /** - * Class representing exceptions thrown by Howl. + * Class representing exceptions thrown by HCat. */ public class HCatException extends IOException { @@ -30,7 +30,7 @@ private final ErrorType errorType; /** - * Instantiates a new howl exception. + * Instantiates a new hcat exception. * @param errorType the error type */ public HCatException(ErrorType errorType) { @@ -39,7 +39,7 @@ /** - * Instantiates a new howl exception. + * Instantiates a new hcat exception. * @param errorType the error type * @param cause the cause */ @@ -48,7 +48,7 @@ } /** - * Instantiates a new howl exception. + * Instantiates a new hcat exception. * @param errorType the error type * @param extraMessage extra messages to add to the message string */ @@ -57,7 +57,7 @@ } /** - * Instantiates a new howl exception. + * Instantiates a new hcat exception. * @param errorType the error type * @param extraMessage extra messages to add to the message string * @param cause the cause @@ -74,7 +74,7 @@ //TODO : remove default error type constructors after all exceptions //are changed to use error types /** - * Instantiates a new howl exception. + * Instantiates a new hcat exception. * @param message the error message */ public HCatException(String message) { @@ -82,7 +82,7 @@ } /** - * Instantiates a new howl exception. + * Instantiates a new hcat exception. * @param message the error message * @param cause the cause */ Index: src/java/org/apache/hcatalog/data/schema/HCatSchema.java =================================================================== --- src/java/org/apache/hcatalog/data/schema/HCatSchema.java (revision 1103878) +++ src/java/org/apache/hcatalog/data/schema/HCatSchema.java (working copy) @@ -110,15 +110,15 @@ return fieldSchemas.size(); } - public void remove(final HCatFieldSchema howlFieldSchema) throws HCatException { + public void remove(final HCatFieldSchema hcatFieldSchema) throws HCatException { - if(!fieldSchemas.contains(howlFieldSchema)){ - throw new HCatException("Attempt to delete a non-existent column from Howl Schema: "+ howlFieldSchema); + if(!fieldSchemas.contains(hcatFieldSchema)){ + throw new HCatException("Attempt to delete a non-existent column from HCat Schema: "+ hcatFieldSchema); } - fieldSchemas.remove(howlFieldSchema); - fieldPositionMap.remove(howlFieldSchema); - fieldNames.remove(howlFieldSchema.getName()); + fieldSchemas.remove(hcatFieldSchema); + fieldPositionMap.remove(hcatFieldSchema); + fieldNames.remove(hcatFieldSchema.getName()); } @Override Index: src/java/org/apache/hcatalog/data/schema/HCatSchemaUtils.java =================================================================== --- src/java/org/apache/hcatalog/data/schema/HCatSchemaUtils.java (revision 1103878) +++ src/java/org/apache/hcatalog/data/schema/HCatSchemaUtils.java (working copy) @@ -209,13 +209,13 @@ return outerSchema.get(0).getStructSubSchema(); } - public static FieldSchema getFieldSchema(HCatFieldSchema howlFieldSchema){ - return new FieldSchema(howlFieldSchema.getName(),howlFieldSchema.getTypeString(),howlFieldSchema.getComment()); + public static FieldSchema getFieldSchema(HCatFieldSchema hcatFieldSchema){ + return new FieldSchema(hcatFieldSchema.getName(),hcatFieldSchema.getTypeString(),hcatFieldSchema.getComment()); } - public static List getFieldSchemas(List howlFieldSchemas){ + public static List getFieldSchemas(List hcatFieldSchemas){ List lfs = new ArrayList(); - for (HCatFieldSchema hfs : howlFieldSchemas){ + for (HCatFieldSchema hfs : hcatFieldSchemas){ lfs.add(getFieldSchema(hfs)); } return lfs; Index: src/java/org/apache/hcatalog/data/HCatRecordable.java =================================================================== --- src/java/org/apache/hcatalog/data/HCatRecordable.java (revision 1103878) +++ src/java/org/apache/hcatalog/data/HCatRecordable.java (working copy) @@ -22,7 +22,7 @@ import org.apache.hadoop.io.WritableComparable; /** - * Interface that determines whether we can implement a HowlRecord on top of it + * Interface that determines whether we can implement a HCatRecord on top of it */ public interface HCatRecordable extends WritableComparable { @@ -34,7 +34,7 @@ Object get(int fieldNum); /** - * Gets all the fields of the howl record. + * Gets all the fields of the hcat record. * @return the list of fields */ List getAll(); @@ -47,7 +47,7 @@ void set(int fieldNum, Object value); /** - * Gets the size of the howl record. + * Gets the size of the hcat record. * @return the size */ int size(); Index: src/java/org/apache/hcatalog/data/HCatArrayBag.java =================================================================== --- src/java/org/apache/hcatalog/data/HCatArrayBag.java (revision 1103878) +++ src/java/org/apache/hcatalog/data/HCatArrayBag.java (working copy) @@ -38,11 +38,11 @@ DataBag convertedBag = null; // List tupleList = null; - public class HowlArrayBagIterator implements Iterator { + public class HCatArrayBagIterator implements Iterator { Iterator iter = null; - public HowlArrayBagIterator(List rawItemList) { + public HCatArrayBagIterator(List rawItemList) { iter = rawItemList.iterator(); } @@ -123,7 +123,7 @@ if (convertedBag != null){ return convertedBag.iterator(); }else{ - return new HowlArrayBagIterator(rawItemList); + return new HCatArrayBagIterator(rawItemList); } } Index: src/java/org/apache/hcatalog/data/HCatRecord.java =================================================================== --- src/java/org/apache/hcatalog/data/HCatRecord.java (revision 1103878) +++ src/java/org/apache/hcatalog/data/HCatRecord.java (working copy) @@ -27,7 +27,7 @@ /** * Abstract class exposing get and set semantics for basic record usage. * Note : - * HowlRecord is designed only to be used as in-memory representation only. + * HCatRecord is designed only to be used as in-memory representation only. * Don't use it to store data on the physical device. */ public abstract class HCatRecord implements HCatRecordable { Index: bin/hcat.sh =================================================================== --- bin/hcat.sh (revision 1103878) +++ bin/hcat.sh (working copy) @@ -16,11 +16,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -HOWL_DIR=`dirname "$0"` +HCAT_DIR=`dirname "$0"` -HOWL_JAR_LOC=`find . -name "hcatalog*.jar"` +HCAT_JAR_LOC=`find . -name "hcatalog*.jar"` -HADOOP_CLASSPATH=$HADOOP_CLASSPATH:${HOWL_JAR_LOC}:../lib/commons-cli-2.0-SNAPSHOT.jar:../build/cli/hive-cli-0.7.0.jar:../ql/lib/antlr-runtime-3.0.1.jar +HADOOP_CLASSPATH=$HADOOP_CLASSPATH:${HCAT_JAR_LOC}:../lib/commons-cli-2.0-SNAPSHOT.jar:../build/cli/hive-cli-0.7.0.jar:../ql/lib/antlr-runtime-3.0.1.jar export HADOOP_CLASSPATH=$HADOOP_CLASSPATH @@ -32,7 +32,7 @@ export HADOOP_OPTS=$HADOOP_OPTS -exec $HADOOP_HOME/bin/hadoop jar ${HOWL_JAR_LOC} org.apache.hcatalog.cli.HCatCli "$@" +exec $HADOOP_HOME/bin/hadoop jar ${HCAT_JAR_LOC} org.apache.hcatalog.cli.HCatCli "$@" # Above is the recommended way to launch hcatalog cli. If it doesnt work, you can try the following: -# java -Dhive.metastore.uris=thrift://localhost:9083 -cp ../lib/commons-logging-1.0.4.jar:../build/hadoopcore/hadoop-0.20.0/hadoop-0.20.0-core.jar:../lib/commons-cli-2.0-SNAPSHOT.jar:../build/cli/hive-cli-0.7.0.jar:../ql/lib/antlr-runtime-3.0.1.jar:$HOWL_JAR org.apache.hcatalog.cli.HCatCli "$@" +# java -Dhive.metastore.uris=thrift://localhost:9083 -cp ../lib/commons-logging-1.0.4.jar:../build/hadoopcore/hadoop-0.20.0/hadoop-0.20.0-core.jar:../lib/commons-cli-2.0-SNAPSHOT.jar:../build/cli/hive-cli-0.7.0.jar:../ql/lib/antlr-runtime-3.0.1.jar:$HCAT_JAR_LOC org.apache.hcatalog.cli.HCatCli "$@"