Index: shims/src/20/java/org/apache/hadoop/mapred/TempletonJobTracker.java =================================================================== --- shims/src/20/java/org/apache/hadoop/mapred/TempletonJobTracker.java (revision 0) +++ shims/src/20/java/org/apache/hadoop/mapred/TempletonJobTracker.java (revision 0) @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.net.InetSocketAddress; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UserGroupInformation; + +/* + * Communicate with the JobTracker as a specific user. + */ +public class TempletonJobTracker { + private JobSubmissionProtocol cnx; + + /** + * Create a connection to the Job Tracker. + */ + public TempletonJobTracker(InetSocketAddress addr, + Configuration conf) + throws IOException { + UserGroupInformation ugi = UserGroupInformation.getLoginUser(); + cnx = (JobSubmissionProtocol) + RPC.getProxy(JobSubmissionProtocol.class, + JobSubmissionProtocol.versionID, + addr, + ugi, + conf, + NetUtils.getSocketFactory(conf, + JobSubmissionProtocol.class)); + } + + /** + * Grab a handle to a job that is already known to the JobTracker. + * + * @return Profile of the job, or null if not found. + */ + public JobProfile getJobProfile(JobID jobid) + throws IOException { + return cnx.getJobProfile(jobid); + } + + /** + * Grab a handle to a job that is already known to the JobTracker. + * + * @return Status of the job, or null if not found. + */ + public JobStatus getJobStatus(JobID jobid) + throws IOException { + return cnx.getJobStatus(jobid); + } + + + /** + * Kill a job. + */ + public void killJob(JobID jobid) + throws IOException { + cnx.killJob(jobid); + } + + /** + * Get all the jobs submitted. + */ + public JobStatus[] getAllJobs() + throws IOException { + return cnx.getAllJobs(); + } + + /** + * Close the connection to the Job Tracker. + */ + public void close() { + RPC.stopProxy(cnx); + } +} Index: shims/src/23/java/org/apache/hadoop/mapred/TempletonJobTracker.java =================================================================== --- shims/src/23/java/org/apache/hadoop/mapred/TempletonJobTracker.java (revision 0) +++ shims/src/23/java/org/apache/hadoop/mapred/TempletonJobTracker.java (revision 0) @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.mapred; + +import java.io.IOException; +import java.net.InetSocketAddress; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; + +/* + * Communicate with the JobTracker as a specific user. + */ +public class TempletonJobTracker { + private JobClient jc; + + /** + * Create a connection to the Job Tracker. + */ + public TempletonJobTracker(InetSocketAddress addr, + Configuration conf) + throws IOException { + + jc = new JobClient(conf); + } + + /** + * Grab a handle to a job that is already known to the JobTracker. + * + * @return Profile of the job, or null if not found. + */ + public JobProfile getJobProfile(JobID jobid) + throws IOException { + RunningJob rj = jc.getJob(jobid); + JobStatus jobStatus = rj.getJobStatus(); + JobProfile jobProfile = new JobProfile(jobStatus.getUsername(), jobStatus.getJobID(), + jobStatus.getJobFile(), jobStatus.getTrackingUrl(), jobStatus.getJobName()); + return jobProfile; + } + + /** + * Grab a handle to a job that is already known to the JobTracker. + * + * @return Status of the job, or null if not found. + */ + public JobStatus getJobStatus(JobID jobid) + throws IOException { + RunningJob rj = jc.getJob(jobid); + JobStatus jobStatus = rj.getJobStatus(); + return jobStatus; + } + + + /** + * Kill a job. + */ + public void killJob(JobID jobid) + throws IOException { + RunningJob rj = jc.getJob(jobid); + rj.killJob(); + } + + /** + * Get all the jobs submitted. + */ + public JobStatus[] getAllJobs() + throws IOException { + return jc.getAllJobs(); + } + + /** + * Close the connection to the Job Tracker. + */ + public void close() { + try { + jc.close(); + } catch (IOException e) { + } + } +} Index: core/src/test/java/org/apache/hcatalog/HcatTestUtils.java =================================================================== --- core/src/test/java/org/apache/hcatalog/HcatTestUtils.java (revision 1458071) +++ core/src/test/java/org/apache/hcatalog/HcatTestUtils.java (working copy) @@ -97,11 +97,4 @@ } } - - public static boolean isHadoop23() { - String version = org.apache.hadoop.util.VersionInfo.getVersion(); - if (version.matches("\\b0\\.23\\..+\\b")) - return true; - return false; - } } Index: core/src/test/java/org/apache/hcatalog/mapreduce/TestHCatPartitionPublish.java =================================================================== --- core/src/test/java/org/apache/hcatalog/mapreduce/TestHCatPartitionPublish.java (revision 1458071) +++ core/src/test/java/org/apache/hcatalog/mapreduce/TestHCatPartitionPublish.java (working copy) @@ -75,6 +75,9 @@ @BeforeClass public static void setup() throws Exception { Configuration conf = new Configuration(true); + conf.set("yarn.scheduler.capacity.root.queues", "default"); + conf.set("yarn.scheduler.capacity.root.default.capacity", "100"); + fs = FileSystem.get(conf); System.setProperty("hadoop.log.dir", new File(fs.getWorkingDirectory() .toString(), "/logs").getAbsolutePath()); Index: core/src/test/java/org/apache/hcatalog/mapreduce/HCatMapReduceTest.java =================================================================== --- core/src/test/java/org/apache/hcatalog/mapreduce/HCatMapReduceTest.java (revision 1458071) +++ core/src/test/java/org/apache/hcatalog/mapreduce/HCatMapReduceTest.java (working copy) @@ -51,7 +51,6 @@ import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; -import org.apache.hcatalog.HcatTestUtils; import org.apache.hcatalog.common.HCatConstants; import org.apache.hcatalog.common.HCatUtil; import org.apache.hcatalog.data.DefaultHCatRecord; @@ -281,7 +280,7 @@ .findCounter("FILE_BYTES_READ").getValue() > 0); } - if (!HcatTestUtils.isHadoop23()) { + if (!HCatUtil.isHadoop23()) { // Local mode outputcommitter hook is not invoked in Hadoop 1.x if (success) { new FileOutputCommitterContainer(job, null).commitJob(job); Index: core/src/test/java/org/apache/hcatalog/mapreduce/TestSequenceFileReadWrite.java =================================================================== --- core/src/test/java/org/apache/hcatalog/mapreduce/TestSequenceFileReadWrite.java (revision 1458071) +++ core/src/test/java/org/apache/hcatalog/mapreduce/TestSequenceFileReadWrite.java (working copy) @@ -170,7 +170,7 @@ HCatOutputFormat.setSchema(job, getSchema()); job.setNumReduceTasks(0); assertTrue(job.waitForCompletion(true)); - if (!HcatTestUtils.isHadoop23()) { + if (!HCatUtil.isHadoop23()) { new FileOutputCommitterContainer(job, null).commitJob(job); } assertTrue(job.isSuccessful()); @@ -217,7 +217,7 @@ job.setOutputFormatClass(HCatOutputFormat.class); HCatOutputFormat.setSchema(job, getSchema()); assertTrue(job.waitForCompletion(true)); - if (!HcatTestUtils.isHadoop23()) { + if (!HCatUtil.isHadoop23()) { new FileOutputCommitterContainer(job, null).commitJob(job); } assertTrue(job.isSuccessful()); Index: core/src/test/java/org/apache/hcatalog/mapreduce/TestMultiOutputFormat.java =================================================================== --- core/src/test/java/org/apache/hcatalog/mapreduce/TestMultiOutputFormat.java (revision 1458071) +++ core/src/test/java/org/apache/hcatalog/mapreduce/TestMultiOutputFormat.java (working copy) @@ -23,6 +23,8 @@ import java.io.FileWriter; import java.io.IOException; import java.net.URI; +import java.util.Arrays; +import java.util.List; import java.util.Random; import java.util.StringTokenizer; @@ -70,6 +72,9 @@ public static void setup() throws IOException { createWorkDir(); Configuration conf = new Configuration(true); + conf.set("yarn.scheduler.capacity.root.queues", "default"); + conf.set("yarn.scheduler.capacity.root.default.capacity", "100"); + fs = FileSystem.get(conf); System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath()); // LocalJobRunner does not work with mapreduce OutputCommitter. So need @@ -134,12 +139,14 @@ // Verify if the configs are merged Path[] fileClassPaths = DistributedCache.getFileClassPaths(job.getConfiguration()); - Assert.assertArrayEquals(new Path[]{new Path(inputFile), new Path(dummyFile)}, - fileClassPaths); - URI[] expectedCacheFiles = new URI[]{new Path(inputFile).makeQualified(fs).toUri(), - new Path(dummyFile).makeQualified(fs).toUri()}; + List fileClassPathsList = Arrays.asList(fileClassPaths); + Assert.assertTrue(fileClassPathsList.contains(new Path(inputFile))); + Assert.assertTrue(fileClassPathsList.contains(new Path(dummyFile))); + URI[] cacheFiles = DistributedCache.getCacheFiles(job.getConfiguration()); - Assert.assertArrayEquals(expectedCacheFiles, cacheFiles); + List cacheFilesList = Arrays.asList(cacheFiles); + Assert.assertTrue(cacheFilesList.contains(new Path(inputFile).makeQualified(fs).toUri())); + Assert.assertTrue(cacheFilesList.contains(new Path(dummyFile).makeQualified(fs).toUri())); Assert.assertTrue(job.waitForCompletion(true)); Index: core/src/test/java/org/apache/hcatalog/mapreduce/TestHCatMultiOutputFormat.java =================================================================== --- core/src/test/java/org/apache/hcatalog/mapreduce/TestHCatMultiOutputFormat.java (revision 1458071) +++ core/src/test/java/org/apache/hcatalog/mapreduce/TestHCatMultiOutputFormat.java (working copy) @@ -175,6 +175,9 @@ // LocalJobRunner does not work with mapreduce OutputCommitter. So need // to use MiniMRCluster. MAPREDUCE-2350 Configuration conf = new Configuration(true); + conf.set("yarn.scheduler.capacity.root.queues", "default"); + conf.set("yarn.scheduler.capacity.root.default.capacity", "100"); + FileSystem fs = FileSystem.get(conf); System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath()); mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, Index: core/src/test/java/org/apache/hcatalog/mapreduce/TestHCatDynamicPartitioned.java =================================================================== --- core/src/test/java/org/apache/hcatalog/mapreduce/TestHCatDynamicPartitioned.java (revision 1458071) +++ core/src/test/java/org/apache/hcatalog/mapreduce/TestHCatDynamicPartitioned.java (working copy) @@ -23,11 +23,13 @@ import java.util.ArrayList; import java.util.List; +import junit.framework.Assert; + import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.mapreduce.Job; -import org.apache.hcatalog.HcatTestUtils; +import org.apache.hcatalog.common.HCatUtil; import org.apache.hcatalog.common.ErrorType; import org.apache.hcatalog.common.HCatConstants; import org.apache.hcatalog.common.HCatException; @@ -141,20 +143,23 @@ try { generateWriteRecords(NUM_RECORDS, NUM_PARTITIONS, 0); Job job = runMRCreate(null, dataColumns, writeRecords, NUM_RECORDS, false); - if (HcatTestUtils.isHadoop23()) { - new FileOutputCommitterContainer(job, null).cleanupJob(job); + + if (HCatUtil.isHadoop23()) { + Assert.assertTrue(job.isSuccessful()==false); } } catch (IOException e) { exc = e; } - assertTrue(exc != null); - assertTrue(exc instanceof HCatException); - assertTrue("Got exception of type [" + ((HCatException) exc).getErrorType().toString() - + "] Expected ERROR_PUBLISHING_PARTITION or ERROR_MOVE_FAILED", - (ErrorType.ERROR_PUBLISHING_PARTITION == ((HCatException) exc).getErrorType()) - || (ErrorType.ERROR_MOVE_FAILED == ((HCatException) exc).getErrorType()) - ); + if (!HCatUtil.isHadoop23()) { + assertTrue(exc != null); + assertTrue(exc instanceof HCatException); + assertTrue("Got exception of type [" + ((HCatException) exc).getErrorType().toString() + + "] Expected ERROR_PUBLISHING_PARTITION or ERROR_MOVE_FAILED", + (ErrorType.ERROR_PUBLISHING_PARTITION == ((HCatException) exc).getErrorType()) + || (ErrorType.ERROR_MOVE_FAILED == ((HCatException) exc).getErrorType()) + ); + } } //TODO 1.0 miniCluster is slow this test times out, make it work Index: core/src/main/java/org/apache/hcatalog/mapreduce/MultiOutputFormat.java =================================================================== --- core/src/main/java/org/apache/hcatalog/mapreduce/MultiOutputFormat.java (revision 1458071) +++ core/src/main/java/org/apache/hcatalog/mapreduce/MultiOutputFormat.java (working copy) @@ -47,6 +47,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.TaskInputOutputContext; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hcatalog.common.HCatUtil; import org.apache.hcatalog.shims.HCatHadoopShims; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -152,8 +153,14 @@ configsToMerge.put("tmparchives", COMMA_DELIM); configsToMerge.put(HCatHadoopShims.Instance.get().getPropertyName(HCatHadoopShims.PropertyName.CACHE_ARCHIVES), COMMA_DELIM); configsToMerge.put(HCatHadoopShims.Instance.get().getPropertyName(HCatHadoopShims.PropertyName.CACHE_FILES), COMMA_DELIM); - configsToMerge.put("mapred.job.classpath.archives", System.getProperty("path.separator")); - configsToMerge.put("mapred.job.classpath.files", System.getProperty("path.separator")); + String fileSep; + if (HCatUtil.isHadoop23()) { + fileSep = ","; + } else { + fileSep = System.getProperty("path.separator"); + } + configsToMerge.put("mapred.job.classpath.archives", fileSep); + configsToMerge.put("mapred.job.classpath.files", fileSep); } /** Index: core/src/main/java/org/apache/hcatalog/common/HCatUtil.java =================================================================== --- core/src/main/java/org/apache/hcatalog/common/HCatUtil.java (revision 1458071) +++ core/src/main/java/org/apache/hcatalog/common/HCatUtil.java (working copy) @@ -615,4 +615,12 @@ jobConf.set(entry.getKey(), entry.getValue()); } } + + + public static boolean isHadoop23() { + String version = org.apache.hadoop.util.VersionInfo.getVersion(); + if (version.matches("\\b0\\.23\\..+\\b")||version.matches("\\b2\\..*")) + return true; + return false; + } } Index: core/pom.xml =================================================================== --- core/pom.xml (revision 1458071) +++ core/pom.xml (working copy) @@ -89,13 +89,5 @@ ${hive.version} compile - - - - org.apache.pig - pig - ${pig.version} - test - Index: pom.xml =================================================================== --- pom.xml (revision 1458071) +++ pom.xml (working copy) @@ -27,18 +27,19 @@ 2.4 11.0.2 1.0.3 - 0.23.3 - 0.92.0 + 2.0.3-alpha + 0.94.5 ${project.version} - 0.10.0 + 0.11.0-SNAPSHOT 1.8.3 1.14 7.6.0.v20120127 1.1 - 0.8.0 + 0.10.1 1.6.1 1.4 3.4.3 + 2.1.2 @@ -83,6 +84,12 @@ ${hadoop20.version} test + + org.apache.pig + pig + ${pig.version} + compile + @@ -108,10 +115,63 @@ org.apache.hadoop + hadoop-hdfs + ${hadoop23.version} + tests + compile + + + org.apache.hadoop hadoop-mapreduce-client-core ${hadoop23.version} compile + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + ${hadoop23.version} + tests + compile + + + org.apache.hadoop + hadoop-yarn-server-tests + ${hadoop23.version} + tests + compile + + + org.apache.hadoop + hadoop-mapreduce-client-app + ${hadoop23.version} + compile + + + org.apache.hadoop + hadoop-common + ${hadoop23.version} + tests + compile + + + org.apache.hadoop + hadoop-mapreduce-client-hs + ${hadoop23.version} + compile + + + org.apache.hadoop + hadoop-minicluster + ${hadoop23.version} + compile + + + org.apache.pig + pig + ${pig.version} + h2 + compile + Index: build.xml =================================================================== --- build.xml (revision 1458071) +++ build.xml (working copy) @@ -69,13 +69,13 @@ - + - + @@ -192,7 +192,7 @@ - + Index: storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/ManyMiniCluster.java =================================================================== --- storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/ManyMiniCluster.java (revision 1458071) +++ storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/ManyMiniCluster.java (working copy) @@ -210,6 +210,8 @@ jobConf = new JobConf(); jobConf.setInt("mapred.submit.replication", 1); + jobConf.set("yarn.scheduler.capacity.root.queues", "default"); + jobConf.set("yarn.scheduler.capacity.root.default.capacity", "100"); //conf.set("hadoop.job.history.location",new File(workDir).getAbsolutePath()+"/history"); System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath()); Index: storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/SkeletonHBaseTest.java =================================================================== --- storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/SkeletonHBaseTest.java (revision 1458071) +++ storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/SkeletonHBaseTest.java (working copy) @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; @@ -203,10 +204,12 @@ } finally { System.out.println("Trying to cleanup: " + testDir); try { - FileUtil.fullyDelete(new File(testDir)); + FileSystem fs = FileSystem.get(jobConf); + fs.delete(new Path(testDir), true); } catch (IOException e) { throw new IllegalStateException("Failed to cleanup test dir", e); } + } } } Index: storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestHBaseBulkOutputFormat.java =================================================================== --- storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestHBaseBulkOutputFormat.java (revision 1458071) +++ storage-handlers/hbase/src/test/org/apache/hcatalog/hbase/TestHBaseBulkOutputFormat.java (working copy) @@ -183,6 +183,8 @@ //create table conf.set(HBaseConstants.PROPERTY_OUTPUT_TABLE_NAME_KEY, tableName); + conf.set("yarn.scheduler.capacity.root.queues", "default"); + conf.set("yarn.scheduler.capacity.root.default.capacity", "100"); createTable(tableName, new String[]{familyName}); String data[] = {"1,english:one,spanish:uno", Index: storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseBulkOutputFormat.java =================================================================== --- storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseBulkOutputFormat.java (revision 1458071) +++ storage-handlers/hbase/src/java/org/apache/hcatalog/hbase/HBaseBulkOutputFormat.java (working copy) @@ -144,7 +144,7 @@ @Override public void commitTask(TaskAttemptContext taskContext) throws IOException { - baseOutputCommitter.commitTask(taskContext); + // baseOutputCommitter.commitTask(taskContext); } @Override Index: storage-handlers/hbase/pom.xml =================================================================== --- storage-handlers/hbase/pom.xml (revision 1458071) +++ storage-handlers/hbase/pom.xml (working copy) @@ -61,6 +61,12 @@ ${zookeeper.version} compile + + org.apache.hbase + hbase + ${hbase.version} + compile + @@ -89,5 +95,11 @@ tests test + + com.yammer.metrics + metrics-core + ${metrics-core.version} + test + Index: webhcat/svr/src/main/java/org/apache/hcatalog/templeton/ListDelegator.java =================================================================== --- webhcat/svr/src/main/java/org/apache/hcatalog/templeton/ListDelegator.java (revision 1458071) +++ webhcat/svr/src/main/java/org/apache/hcatalog/templeton/ListDelegator.java (working copy) @@ -23,7 +23,6 @@ import java.util.ArrayList; import org.apache.hadoop.mapred.JobStatus; -import org.apache.hadoop.mapred.JobTracker; import org.apache.hadoop.mapred.TempletonJobTracker; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hcatalog.templeton.tool.JobState; @@ -42,7 +41,7 @@ UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user); TempletonJobTracker tracker = null; try { - tracker = new TempletonJobTracker(JobTracker.getAddress(appConf), + tracker = new TempletonJobTracker(getAddress(appConf), appConf); ArrayList ids = new ArrayList(); Index: webhcat/svr/src/main/java/org/apache/hcatalog/templeton/DeleteDelegator.java =================================================================== --- webhcat/svr/src/main/java/org/apache/hcatalog/templeton/DeleteDelegator.java (revision 1458071) +++ webhcat/svr/src/main/java/org/apache/hcatalog/templeton/DeleteDelegator.java (working copy) @@ -20,7 +20,6 @@ import java.io.IOException; import org.apache.hadoop.mapred.JobID; -import org.apache.hadoop.mapred.JobTracker; import org.apache.hadoop.mapred.TempletonJobTracker; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hcatalog.templeton.tool.JobState; @@ -40,7 +39,7 @@ TempletonJobTracker tracker = null; JobState state = null; try { - tracker = new TempletonJobTracker(JobTracker.getAddress(appConf), + tracker = new TempletonJobTracker(getAddress(appConf), appConf); JobID jobid = StatusDelegator.StringToJobID(id); if (jobid == null) Index: webhcat/svr/src/main/java/org/apache/hcatalog/templeton/TempletonDelegator.java =================================================================== --- webhcat/svr/src/main/java/org/apache/hcatalog/templeton/TempletonDelegator.java (revision 1458071) +++ webhcat/svr/src/main/java/org/apache/hcatalog/templeton/TempletonDelegator.java (working copy) @@ -18,6 +18,11 @@ */ package org.apache.hcatalog.templeton; +import java.net.InetSocketAddress; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.net.NetUtils; + /** * The helper class for all the Templeton delegator classes. A * delegator will call the underlying Templeton service such as hcat @@ -29,4 +34,10 @@ public TempletonDelegator(AppConfig appConf) { this.appConf = appConf; } + + public static InetSocketAddress getAddress(Configuration conf) { + String jobTrackerStr = + conf.get("mapred.job.tracker", "localhost:8012"); + return NetUtils.createSocketAddr(jobTrackerStr); + } } Index: webhcat/svr/src/main/java/org/apache/hcatalog/templeton/StatusDelegator.java =================================================================== --- webhcat/svr/src/main/java/org/apache/hcatalog/templeton/StatusDelegator.java (revision 1458071) +++ webhcat/svr/src/main/java/org/apache/hcatalog/templeton/StatusDelegator.java (working copy) @@ -25,7 +25,6 @@ import org.apache.hadoop.mapred.JobID; import org.apache.hadoop.mapred.JobProfile; import org.apache.hadoop.mapred.JobStatus; -import org.apache.hadoop.mapred.JobTracker; import org.apache.hadoop.mapred.TempletonJobTracker; import org.apache.hcatalog.templeton.tool.JobState; @@ -45,7 +44,7 @@ TempletonJobTracker tracker = null; JobState state = null; try { - tracker = new TempletonJobTracker(JobTracker.getAddress(appConf), + tracker = new TempletonJobTracker(getAddress(appConf), appConf); JobID jobid = StatusDelegator.StringToJobID(id); if (jobid == null) Index: webhcat/svr/src/main/java/org/apache/hadoop/mapred/TempletonJobTracker.java =================================================================== --- webhcat/svr/src/main/java/org/apache/hadoop/mapred/TempletonJobTracker.java (revision 1458071) +++ webhcat/svr/src/main/java/org/apache/hadoop/mapred/TempletonJobTracker.java (working copy) @@ -1,102 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.security.PrivilegedExceptionAction; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.UserGroupInformation; - -/* - * Communicate with the JobTracker as a specific user. - */ -public class TempletonJobTracker { - private final JobSubmissionProtocol cnx; - - /** - * Create a connection to the Job Tracker. - */ - public TempletonJobTracker(final InetSocketAddress addr, - final Configuration conf) - throws IOException, InterruptedException { - - UserGroupInformation ugi = UserGroupInformation.getLoginUser(); - cnx = - ugi.doAs(new PrivilegedExceptionAction() { - public JobSubmissionProtocol run () - throws IOException, InterruptedException { - return (JobSubmissionProtocol) - RPC.getProxy(JobSubmissionProtocol.class, - JobSubmissionProtocol.versionID, - addr, - conf, - NetUtils.getSocketFactory(conf, - JobSubmissionProtocol.class)); - } - }); - } - - /** - * Grab a handle to a job that is already known to the JobTracker. - * - * @return Profile of the job, or null if not found. - */ - public JobProfile getJobProfile(JobID jobid) - throws IOException { - return cnx.getJobProfile(jobid); - } - - /** - * Grab a handle to a job that is already known to the JobTracker. - * - * @return Status of the job, or null if not found. - */ - public JobStatus getJobStatus(JobID jobid) - throws IOException { - return cnx.getJobStatus(jobid); - } - - - /** - * Kill a job. - */ - public void killJob(JobID jobid) - throws IOException { - cnx.killJob(jobid); - } - - /** - * Get all the jobs submitted. - */ - public JobStatus[] getAllJobs() - throws IOException { - return cnx.getAllJobs(); - } - - /** - * Close the connection to the Job Tracker. - */ - public void close() { - RPC.stopProxy(cnx); - } -} Index: webhcat/svr/pom.xml =================================================================== --- webhcat/svr/pom.xml (revision 1458071) +++ webhcat/svr/pom.xml (working copy) @@ -48,12 +48,6 @@ ${hcatalog.version} provided - - org.apache.hadoop - hadoop-core - ${hadoop20.version} - provided - Index: hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatLoaderComplexSchema.java =================================================================== --- hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatLoaderComplexSchema.java (revision 1458071) +++ hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestHCatLoaderComplexSchema.java (working copy) @@ -112,7 +112,6 @@ @Test public void testSyntheticComplexSchema() throws Exception { String pigSchema = - "(" + "a: " + "(" + "aa: chararray, " + @@ -124,8 +123,7 @@ ")," + "b: chararray, " + "c: long, " + - "d: { t: (da:long, db: ( dba: chararray, dbb: long), dc: { t: (dca: long) } ) } " + - ")"; + "d: { t: (da:long, db: ( dba: chararray, dbb: long), dc: { t: (dca: long) } ) } "; // with extra structs String tableSchema = @@ -191,7 +189,7 @@ createTable(tablename, tableSchema); PigServer server = new PigServer(ExecType.LOCAL); server.setBatchOn(); - server.registerQuery("A = load '" + tablename + "Input' using org.apache.hcatalog.pig.MockLoader() AS " + pigSchema + ";"); + server.registerQuery("A = load '" + tablename + "Input' using org.apache.hcatalog.pig.MockLoader() AS (" + pigSchema + ");"); Schema dumpedASchema = server.dumpSchema("A"); server.registerQuery("STORE A into '" + tablename + "' using org.apache.hcatalog.pig.HCatStorer(" + (provideSchemaToStorer ? "'', '" + pigSchema + "'" : "") @@ -255,7 +253,7 @@ */ @Test public void testTupleInBagInTupleInBag() throws Exception { - String pigSchema = "(a: { b : ( c: { d: (i : long) } ) })"; + String pigSchema = "a: { b : ( c: { d: (i : long) } ) }"; String tableSchema = "a array< array< bigint > >"; @@ -279,7 +277,7 @@ @Test public void testMapWithComplexData() throws Exception { - String pigSchema = "(a: long, b: map[])"; + String pigSchema = "a: long, b: map[]"; String tableSchema = "a bigint, b map>"; List data = new ArrayList(); Index: hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestPigHCatUtil.java =================================================================== --- hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestPigHCatUtil.java (revision 1458071) +++ hcatalog-pig-adapter/src/test/java/org/apache/hcatalog/pig/TestPigHCatUtil.java (working copy) @@ -65,7 +65,7 @@ // When updating our pig dependency this will need updated. System.setProperty(HCatConstants.HCAT_PIG_INNER_TUPLE_NAME, "t"); System.setProperty(HCatConstants.HCAT_PIG_INNER_FIELD_NAME, "FIELDNAME_tuple"); - UDFContext.getUDFContext().setClientSystemProps(); + UDFContext.getUDFContext().setClientSystemProps(System.getProperties()); // Define the expected schema. ResourceFieldSchema[] bagSubFieldSchemas = new ResourceFieldSchema[1]; Index: hcatalog-pig-adapter/pom.xml =================================================================== --- hcatalog-pig-adapter/pom.xml (revision 1458071) +++ hcatalog-pig-adapter/pom.xml (working copy) @@ -43,11 +43,5 @@ ${hcatalog.version} compile - - org.apache.pig - pig - ${pig.version} - compile -