diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java index 39866a3..473dbd6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java @@ -754,7 +754,7 @@ private LocalResource createLocalResource(FileSystem remoteFs, Path file, URL resourceURL = ConverterUtils.getYarnUrlFromPath(file); long resourceSize = fstat.getLen(); long resourceModificationTime = fstat.getModificationTime(); - LOG.info("Resource modification time: " + resourceModificationTime); + LOG.info("Resource modification time: " + resourceModificationTime + " for " + file); LocalResource lr = Records.newRecord(LocalResource.class); lr.setResource(resourceURL); @@ -912,7 +912,7 @@ public FileStatus getHiveJarDirectory(Configuration conf) throws IOException, Lo if (fstatus == null) { Path destDir = getDefaultDestDir(conf); - LOG.info("Jar dir is null/directory doesn't exist. Choosing HIVE_INSTALL_DIR - " + destDir); + LOG.info("Jar dir is null / directory doesn't exist. Choosing HIVE_INSTALL_DIR - " + destDir); fstatus = validateTargetDir(destDir, conf); } @@ -1275,5 +1275,4 @@ public static String getUserSpecifiedDagName(Configuration conf) { private DagUtils() { // don't instantiate } - } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java index 6675f0d..3ea5ef9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java @@ -548,6 +548,8 @@ private Path createTezDir(String sessionId) throws IOException { private LocalResource createJarLocalResource(String localJarPath) throws IOException, LoginException, IllegalArgumentException, FileNotFoundException { + // TODO Reduce the number of lookups that happen here. This shouldn't go to HDFS for each call. + // The hiveJarDir can be determined once per client. FileStatus destDirStatus = utils.getHiveJarDirectory(conf); assert destDirStatus != null; Path destDirPath = destDirStatus.getPath(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java index 83defea..3789ce9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java @@ -173,9 +173,6 @@ public int execute(DriverContext driverContext) { // next we translate the TezWork to a Tez DAG DAG dag = build(jobConf, work, scratchDir, appJarLr, additionalLr, ctx); - if (driverContext.getCtx() == null) { - boolean a = false; - } CallerContext callerContext = CallerContext.create( "HIVE", queryPlan.getQueryId(), "HIVE_QUERY_ID", queryPlan.getQueryStr()); @@ -460,10 +457,10 @@ DAGClient submit(JobConf conf, DAG dag, Path scratchDir, } catch (Exception e) { // In case of any other exception, retry. If this also fails, report original error and exit. try { - TezSessionPoolManager.getInstance().closeAndOpen(sessionState, this.conf, inputOutputJars, - true); console.printInfo("Dag submit failed due to " + e.getMessage() + " stack trace: " + Arrays.toString(e.getStackTrace()) + " retrying..."); + TezSessionPoolManager.getInstance().closeAndOpen(sessionState, this.conf, inputOutputJars, + true); dagClient = sessionState.getSession().submitDAG(dag); } catch (Exception retryException) { // we failed to submit after retrying. Destroy session and bail. diff --git ql/src/java/org/apache/hadoop/hive/ql/io/NullScanFileSystem.java ql/src/java/org/apache/hadoop/hive/ql/io/NullScanFileSystem.java index 88e9445..5bfde18 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/NullScanFileSystem.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/NullScanFileSystem.java @@ -29,6 +29,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Progressable; /** @@ -45,6 +47,8 @@ public static String getBaseScheme() { return "nullscan"; } + private final Token[] DEFAULT_EMPTY_TOKEN_ARRAY = new Token[0]; + public NullScanFileSystem() { } @@ -113,4 +117,15 @@ public boolean mkdirs(Path f, FsPermission permission) throws IOException { public FileStatus getFileStatus(Path f) throws IOException { return new FileStatus(0, false, 0, 0, 0, f); } + + @Override + public Token[] addDelegationTokens(String renewer, Credentials credentials) throws + IOException { + return DEFAULT_EMPTY_TOKEN_ARRAY; + } + + @Override + public Token getDelegationToken(String renewer) throws IOException { + return null; + } } \ No newline at end of file