Index: shims/src/0.21/java/org/apache/hadoop/fs/ProxyLocalFileSystem.java =================================================================== --- shims/src/0.21/java/org/apache/hadoop/fs/ProxyLocalFileSystem.java (revision 0) +++ shims/src/0.21/java/org/apache/hadoop/fs/ProxyLocalFileSystem.java (revision 0) @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.*; +import java.net.URI; +import java.net.URISyntaxException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.util.Progressable; + +/**************************************************************** + * A Proxy for LocalFileSystem + * + * Serves uri's corresponding to 'pfile:///' namespace with using + * a LocalFileSystem + *****************************************************************/ + +public class ProxyLocalFileSystem extends FilterFileSystem { + + protected LocalFileSystem localFs; + + public ProxyLocalFileSystem() { + localFs = new LocalFileSystem(); + } + + public ProxyLocalFileSystem(FileSystem fs) { + throw new RuntimeException ("Unsupported Constructor"); + } + + @Override + public void initialize(URI name, Configuration conf) throws IOException { + // create a proxy for the local filesystem + // the scheme/authority serving as the proxy is derived + // from the supplied URI + + String scheme = name.getScheme(); + String authority = name.getAuthority() != null ? name.getAuthority() : ""; + String proxyUriString = name + "://" + authority + "/"; + fs = new ProxyFileSystem(localFs, URI.create(proxyUriString)); + + fs.initialize(name, conf); + } +} Index: shims/src/0.21/java/org/apache/hadoop/fs/ProxyFileSystem.java =================================================================== --- shims/src/0.21/java/org/apache/hadoop/fs/ProxyFileSystem.java (revision 0) +++ shims/src/0.21/java/org/apache/hadoop/fs/ProxyFileSystem.java (revision 0) @@ -0,0 +1,268 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.*; +import java.net.URI; +import java.net.URISyntaxException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.util.Progressable; + +/**************************************************************** + * A FileSystem that can serve a given scheme/authority using some + * other file system. In that sense, it serves as a proxy for the + * real/underlying file system + *****************************************************************/ + +public class ProxyFileSystem extends FilterFileSystem { + + protected String myScheme; + protected String myAuthority; + protected URI myUri; + + protected String realScheme; + protected String realAuthority; + protected URI realUri; + + + + private Path swizzleParamPath(Path p) { + return new Path (realScheme, realAuthority, p.toUri().getPath()); + } + + private Path swizzleReturnPath(Path p) { + return new Path (myScheme, myAuthority, p.toUri().getPath()); + } + + private FileStatus swizzleFileStatus(FileStatus orig, boolean isParam) { + FileStatus ret = + new FileStatus(orig.getLen(), orig.isDir(), orig.getReplication(), + orig.getBlockSize(), orig.getModificationTime(), + orig.getAccessTime(), orig.getPermission(), + orig.getOwner(), orig.getGroup(), + isParam ? swizzleParamPath(orig.getPath()) : + swizzleReturnPath(orig.getPath())); + return ret; + } + + public ProxyFileSystem() { + throw new RuntimeException ("Unsupported constructor"); + } + + public ProxyFileSystem(FileSystem fs) { + throw new RuntimeException ("Unsupported constructor"); + } + + /** + * Create a proxy file system for fs. + * + * @param fs FileSystem to create proxy for + * @param myUri URI to use as proxy. Only the scheme and authority from + * this are used right now + */ + public ProxyFileSystem(FileSystem fs, URI myUri) { + super(fs); + + URI realUri = fs.getUri(); + this.realScheme = realUri.getScheme(); + this.realAuthority=realUri.getAuthority(); + this.realUri = realUri; + + this.myScheme = myUri.getScheme(); + this.myAuthority=myUri.getAuthority(); + this.myUri = myUri; + } + + public void initialize(URI name, Configuration conf) throws IOException { + try { + URI realUri = new URI (realScheme, realAuthority, + name.getPath(), name.getQuery(), name.getFragment()); + super.initialize(realUri, conf); + } catch (URISyntaxException e) { + throw new RuntimeException(e); + } + } + + public URI getUri() { + return myUri; + } + + public String getName() { + return getUri().toString(); + } + + public Path makeQualified(Path path) { + return swizzleReturnPath(super.makeQualified(swizzleParamPath(path))); + } + + + protected void checkPath(Path path) { + super.checkPath(swizzleParamPath(path)); + } + + public BlockLocation[] getFileBlockLocations(FileStatus file, long start, + long len) throws IOException { + return super.getFileBlockLocations(swizzleFileStatus(file, true), + start, len); + } + + public FSDataInputStream open(Path f, int bufferSize) throws IOException { + return super.open(swizzleParamPath(f), bufferSize); + } + + public FSDataOutputStream append(Path f, int bufferSize, + Progressable progress) throws IOException { + return super.append(swizzleParamPath(f), bufferSize, progress); + } + + public FSDataOutputStream create(Path f, FsPermission permission, + boolean overwrite, int bufferSize, short replication, long blockSize, + Progressable progress) throws IOException { + return super.create(swizzleParamPath(f), permission, + overwrite, bufferSize, replication, blockSize, progress); + } + + public boolean setReplication(Path src, short replication) throws IOException { + return super.setReplication(swizzleParamPath(src), replication); + } + + public boolean rename(Path src, Path dst) throws IOException { + return super.rename(swizzleParamPath(src), swizzleParamPath(dst)); + } + + public boolean delete(Path f, boolean recursive) throws IOException { + return super.delete(swizzleParamPath(f), recursive); + } + + public boolean deleteOnExit(Path f) throws IOException { + return super.deleteOnExit(swizzleParamPath(f)); + } + + public FileStatus[] listStatus(Path f) throws IOException { + FileStatus[] orig = super.listStatus(swizzleParamPath(f)); + FileStatus[] ret = new FileStatus [orig.length]; + for (int i=0; i implements RecordReader { + + static final Class[] constructorSignature = new Class[] { + InputSplit.class, + Configuration.class, + Reporter.class, + Integer.class + }; + + protected CombineFileSplit split; + protected JobConf jc; + protected Reporter reporter; + protected Class> rrClass; + protected Constructor> rrConstructor; + protected FileSystem fs; + + protected int idx; + protected long progress; + protected RecordReader curReader; + + public boolean next(K key, V value) throws IOException { + + while ((curReader == null) || !curReader.next((K)((CombineHiveKey)key).getKey(), value)) { + if (!initNextRecordReader(key)) { + return false; + } + } + return true; + } + + public K createKey() { + K newKey = curReader.createKey(); + return (K)(new CombineHiveKey(newKey)); + } + + public V createValue() { + return curReader.createValue(); + } + + /** + * Return the amount of data processed. + */ + public long getPos() throws IOException { + return progress; + } + + public void close() throws IOException { + if (curReader != null) { + curReader.close(); + curReader = null; + } + } + + /** + * Return progress based on the amount of data processed so far. + */ + public float getProgress() throws IOException { + return Math.min(1.0f, progress / (float) (split.getLength())); + } + + /** + * A generic RecordReader that can hand out different recordReaders + * for each chunk in the CombineFileSplit. + */ + public CombineFileRecordReader(JobConf job, CombineFileSplit split, + Reporter reporter, + Class> rrClass) + throws IOException { + this.split = split; + this.jc = job; + this.rrClass = rrClass; + this.reporter = reporter; + this.idx = 0; + this.curReader = null; + this.progress = 0; + + try { + rrConstructor = rrClass.getDeclaredConstructor(constructorSignature); + rrConstructor.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException(rrClass.getName() + + " does not have valid constructor", e); + } + initNextRecordReader(null); + } + + /** + * Get the record reader for the next chunk in this CombineFileSplit. + */ + protected boolean initNextRecordReader(K key) throws IOException { + + if (curReader != null) { + curReader.close(); + curReader = null; + if (idx > 0) { + progress += split.getLength(idx - 1); // done processing so far + } + } + + // if all chunks have been processed, nothing more to do. + if (idx == split.getNumPaths()) { + return false; + } + + // get a record reader for the idx-th chunk + try { + curReader = rrConstructor.newInstance(new Object[] + {split, jc, reporter, Integer.valueOf(idx)}); + + // change the key if need be + if (key != null) { + K newKey = curReader.createKey(); + ((CombineHiveKey)key).setKey(newKey); + } + + // setup some helper config variables. + jc.set("map.input.file", split.getPath(idx).toString()); + jc.setLong("map.input.start", split.getOffset(idx)); + jc.setLong("map.input.length", split.getLength(idx)); + } catch (Exception e) { + throw new RuntimeException(e); + } + idx++; + return true; + } + } + + public abstract static class CombineFileInputFormatShim extends + CombineFileInputFormat + implements HadoopShims.CombineFileInputFormatShim { + + public Path[] getInputPathsShim(JobConf conf) { + try { + return FileInputFormat.getInputPaths(conf); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @Override + public void createPool(JobConf conf, PathFilter... filters) { + super.createPool(conf, filters); + } + + @Override + public InputSplitShim[] getSplits(JobConf job, int numSplits) throws IOException { + long minSize = job.getLong("mapred.min.split.size", 0); + + // For backward compatibility, let the above parameter be used + if (job.getLong("mapred.min.split.size.per.node", 0) == 0) { + super.setMinSplitSizeNode(minSize); + } + + if (job.getLong("mapred.min.split.size.per.rack", 0) == 0) { + super.setMinSplitSizeRack(minSize); + } + + if (job.getLong("mapred.max.split.size", 0) == 0) { + super.setMaxSplitSize(minSize); + } + + CombineFileSplit[] splits = (CombineFileSplit[]) super.getSplits(job, numSplits); + + InputSplitShim[] isplits = new InputSplitShim[splits.length]; + for (int pos = 0; pos < splits.length; pos++) { + isplits[pos] = new InputSplitShim(splits[pos]); + } + + return isplits; + } + + public InputSplitShim getInputSplitShim() throws IOException { + return new InputSplitShim(); + } + + public RecordReader getRecordReader(JobConf job, HadoopShims.InputSplitShim split, + Reporter reporter, + Class> rrClass) + throws IOException { + CombineFileSplit cfSplit = (CombineFileSplit) split; + return new CombineFileRecordReader(job, cfSplit, reporter, rrClass); + } + + } + + public String getInputFormatClassName() { + return "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat"; + } + + String[] ret = new String[2]; + + @Override + public String[] getTaskJobIDs(TaskCompletionEvent t) { + TaskID tid = t.getTaskAttemptId().getTaskID(); + ret[0] = tid.toString(); + ret[1] = tid.getJobID().toString(); + return ret; + } + + public void setFloatConf(Configuration conf, String varName, float val) { + conf.setFloat(varName, val); + } + + @Override + public int createHadoopArchive(Configuration conf, Path sourceDir, Path destDir, + String archiveName) throws Exception { + + HadoopArchives har = new HadoopArchives(conf); + List args = new ArrayList(); + + if (conf.get("hive.archive.har.parentdir.settable") == null) { + throw new RuntimeException("hive.archive.har.parentdir.settable is not set"); + } + boolean parentSettable = + conf.getBoolean("hive.archive.har.parentdir.settable", false); + + if (parentSettable) { + args.add("-archiveName"); + args.add(archiveName); + args.add("-p"); + args.add(sourceDir.toString()); + args.add(destDir.toString()); + } else { + args.add("-archiveName"); + args.add(archiveName); + args.add(sourceDir.toString()); + args.add(destDir.toString()); + } + + return ToolRunner.run(har, args.toArray(new String[0])); + } + + public static class NullOutputCommitter extends OutputCommitter { + @Override + public void setupJob(JobContext jobContext) { } + @Override + public void cleanupJob(JobContext jobContext) { } + + @Override + public void setupTask(TaskAttemptContext taskContext) { } + @Override + public boolean needsTaskCommit(TaskAttemptContext taskContext) { + return false; + } + @Override + public void commitTask(TaskAttemptContext taskContext) { } + @Override + public void abortTask(TaskAttemptContext taskContext) { } + } + + public void setNullOutputFormat(JobConf conf) { + conf.setOutputFormat(NullOutputFormat.class); + conf.setOutputCommitter(Hadoop21Shims.NullOutputCommitter.class); + + // option to bypass job setup and cleanup was introduced in hadoop-21 (MAPREDUCE-463) + // but can be backported. So we disable setup/cleanup in all versions >= 0.19 + conf.setBoolean("mapred.committer.job.setup.cleanup.needed", false); + } +} Index: shims/src/0.21/java/org/apache/hadoop/hive/shims/Jetty21Shims.java =================================================================== --- shims/src/0.21/java/org/apache/hadoop/hive/shims/Jetty21Shims.java (revision 0) +++ shims/src/0.21/java/org/apache/hadoop/hive/shims/Jetty21Shims.java (revision 0) @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.shims; + +import java.io.IOException; + +import org.mortbay.jetty.bio.SocketConnector; +import org.mortbay.jetty.handler.RequestLogHandler; +import org.mortbay.jetty.webapp.WebAppContext; + +/** + * Jetty20Shims. + * + */ +public class Jetty21Shims implements JettyShims { + public Server startServer(String listen, int port) throws IOException { + Server s = new Server(); + s.setupListenerHostPort(listen, port); + return s; + } + + private static class Server extends org.mortbay.jetty.Server implements JettyShims.Server { + public void addWar(String war, String contextPath) { + WebAppContext wac = new WebAppContext(); + wac.setContextPath(contextPath); + wac.setWar(war); + RequestLogHandler rlh = new RequestLogHandler(); + rlh.setHandler(wac); + this.addHandler(rlh); + } + + public void setupListenerHostPort(String listen, int port) + throws IOException { + + SocketConnector connector = new SocketConnector(); + connector.setPort(port); + connector.setHost(listen); + this.addConnector(connector); + } + } +} Index: shims/src/0.21/java/org/apache/hadoop/hive/shims/HiveHarFileSystem.java =================================================================== --- shims/src/0.21/java/org/apache/hadoop/hive/shims/HiveHarFileSystem.java (revision 0) +++ shims/src/0.21/java/org/apache/hadoop/hive/shims/HiveHarFileSystem.java (revision 0) @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.shims; + +import java.io.IOException; + +import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.HarFileSystem; +import org.apache.hadoop.fs.Path; + +/** + * HiveHarFileSystem - fixes issues with Hadoop's HarFileSystem + * + */ +public class HiveHarFileSystem extends HarFileSystem { + + @Override + public BlockLocation[] getFileBlockLocations(FileStatus file, long start, + long len) throws IOException { + + // In some places (e.g. FileInputFormat) this BlockLocation is used to + // figure out sizes/offsets and so a completely blank one will not work. + String [] hosts = {"DUMMY_HOST"}; + return new BlockLocation[]{new BlockLocation(null, hosts, 0, file.getLen())}; + } + + @Override + public ContentSummary getContentSummary(Path f) throws IOException { + // HarFileSystem has a bug where this method does not work properly + // if the underlying FS is HDFS. See MAPREDUCE-1877 for more + // information. This method is from FileSystem. + FileStatus status = getFileStatus(f); + if (!status.isDir()) { + // f is a file + return new ContentSummary(status.getLen(), 1, 0); + } + // f is a directory + long[] summary = {0, 0, 1}; + for(FileStatus s : listStatus(f)) { + ContentSummary c = s.isDir() ? getContentSummary(s.getPath()) : + new ContentSummary(s.getLen(), 1, 0); + summary[0] += c.getLength(); + summary[1] += c.getFileCount(); + summary[2] += c.getDirectoryCount(); + } + return new ContentSummary(summary[0], summary[1], summary[2]); + } +} Index: shims/src/common/java/org/apache/hadoop/hive/shims/ShimLoader.java =================================================================== --- shims/src/common/java/org/apache/hadoop/hive/shims/ShimLoader.java (revision 1000628) +++ shims/src/common/java/org/apache/hadoop/hive/shims/ShimLoader.java (working copy) @@ -41,6 +41,7 @@ HADOOP_SHIM_CLASSES.put("0.18", "org.apache.hadoop.hive.shims.Hadoop18Shims"); HADOOP_SHIM_CLASSES.put("0.19", "org.apache.hadoop.hive.shims.Hadoop19Shims"); HADOOP_SHIM_CLASSES.put("0.20", "org.apache.hadoop.hive.shims.Hadoop20Shims"); + HADOOP_SHIM_CLASSES.put("0.21", "org.apache.hadoop.hive.shims.Hadoop21Shims"); } /** @@ -55,6 +56,7 @@ JETTY_SHIM_CLASSES.put("0.18", "org.apache.hadoop.hive.shims.Jetty18Shims"); JETTY_SHIM_CLASSES.put("0.19", "org.apache.hadoop.hive.shims.Jetty19Shims"); JETTY_SHIM_CLASSES.put("0.20", "org.apache.hadoop.hive.shims.Jetty20Shims"); + JETTY_SHIM_CLASSES.put("0.21", "org.apache.hadoop.hive.shims.Jetty20Shims"); } /** Index: shims/build.xml =================================================================== --- shims/build.xml (revision 1000628) +++ shims/build.xml (working copy) @@ -34,6 +34,20 @@ + + + + + + + + + + + + + + @@ -53,6 +67,24 @@ + + + + + + + + + + + @@ -66,6 +98,9 @@ + + + - + Index: build.properties =================================================================== --- build.properties (revision 1000628) +++ build.properties (working copy) @@ -23,6 +23,15 @@ hadoop.jar=${hadoop.root}/hadoop-${hadoop.version.ant-internal}-core.jar hadoop.tools.jar=${hadoop.root}/hadoop-${hadoop.version.ant-internal}-tools.jar hadoop.test.jar=${hadoop.root}/hadoop-${hadoop.version.ant-internal}-test.jar + +hadoop-common-0.21.0.jar=${hadoop.root}/hadoop-common-${hadoop.version.ant-internal}.jar +hadoop-mapred-0.21.0.jar=${hadoop.root}/hadoop-mapred-${hadoop.version.ant-internal}.jar +hadoop-hdfs-0.21.0.jar=${hadoop.root}/hadoop-hdfs-${hadoop.version.ant-internal}.jar +hadoop-common-test-0.21.0.jar=${hadoop.root}/hadoop-common-test-${hadoop.version.ant-internal}.jar +hadoop-hdfs-test-0.21.0.jar=${hadoop.root}/hadoop-hdfs-test-${hadoop.version.ant-internal}.jar +hadoop-mapred-test-0.21.0.jar=${hadoop.root}/hadoop-mapred-test-${hadoop.version.ant-internal}.jar +hadoop-mapred-tools-0.21.0.jar=${hadoop.root}/hadoop-mapred-tools-${hadoop.version.ant-internal}.jar + jetty.test.jar=${hadoop.root}/lib/jetty-5.1.4.jar servlet.test.jar=${hadoop.root}/lib/servlet-api.jar jasper.test.jar=${hadoop.root}/lib/jetty-ext/jasper-runtime.jar Index: build-common.xml =================================================================== --- build-common.xml (revision 1000628) +++ build-common.xml (working copy) @@ -217,6 +217,7 @@ + + + + + + + + + + + + + + + Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java =================================================================== --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 1000628) +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy) @@ -575,15 +575,17 @@ * @throws IOException */ public String getUser() throws IOException { - try { - UserGroupInformation ugi = UserGroupInformation.readFrom(this); + // try { + //UserGroupInformation ugi = UserGroupInformation.readFrom(this); + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); if (ugi == null) { - ugi = UserGroupInformation.login(this); + //ugi = UserGroupInformation.login(this); + ugi = UserGroupInformation.getLoginUser(); } return ugi.getUserName(); - } catch (LoginException e) { - throw (IOException) new IOException().initCause(e); - } + //} catch (LoginException e) { + //throw (IOException) new IOException().initCause(e); + //} } public static String getColumnInternalName(int pos) { Index: ql/src/java/org/apache/hadoop/hive/ql/Driver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java (revision 1000628) +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java (working copy) @@ -81,7 +81,7 @@ import org.apache.hadoop.mapred.ClusterStatus; import org.apache.hadoop.mapred.JobClient; import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.security.UnixUserGroupInformation; +//import org.apache.hadoop.security.UnixUserGroupInformation; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.DummyPartition; @@ -281,21 +281,25 @@ */ public Driver(HiveConf conf) { this.conf = conf; + /* try { - UnixUserGroupInformation.login(conf, true); + //UnixUserGroupInformation.login(conf, true); } catch (Exception e) { LOG.warn("Ignoring " + e.getMessage()); } + */ } public Driver() { if (SessionState.get() != null) { conf = SessionState.get().getConf(); + /* try { UnixUserGroupInformation.login(conf, true); } catch (Exception e) { LOG.warn("Ignoring " + e.getMessage()); } + */ } } @@ -721,9 +725,9 @@ // Get all the pre execution hooks and execute them. for (PreExecute peh : getPreExecHooks()) { - peh.run(SessionState.get(), plan.getInputs(), plan.getOutputs(), - UnixUserGroupInformation.readFromConf(conf, - UnixUserGroupInformation.UGI_PROPERTY_NAME)); + peh.run(SessionState.get(), plan.getInputs(), plan.getOutputs(), null); + // UnixUserGroupInformation.readFromConf(conf, + // UnixUserGroupInformation.UGI_PROPERTY_NAME)); } int jobs = Utilities.getMRTasks(plan.getRootTasks()).size(); @@ -804,9 +808,9 @@ // Get all the post execution hooks and execute them. for (PostExecute peh : getPostExecHooks()) { peh.run(SessionState.get(), plan.getInputs(), plan.getOutputs(), - (SessionState.get() != null ? SessionState.get().getLineageState().getLineageInfo() : null), - UnixUserGroupInformation.readFromConf(conf, - UnixUserGroupInformation.UGI_PROPERTY_NAME)); + (SessionState.get() != null ? SessionState.get().getLineageState().getLineageInfo() : null), null); + // UnixUserGroupInformation.readFromConf(conf, + // UnixUserGroupInformation.UGI_PROPERTY_NAME)); } if (SessionState.get() != null) {