diff --git ql/src/java/org/apache/hadoop/hive/ql/Driver.java ql/src/java/org/apache/hadoop/hive/ql/Driver.java index fa40082..0226f28 100644 --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -1342,9 +1342,9 @@ public int execute() throws CommandNeedRetryException { } } - int jobs = Utilities.getMRTasks(plan.getRootTasks()).size() + - Utilities.getTezTasks(plan.getRootTasks()).size() + - Utilities.getSparkTasks(plan.getRootTasks()).size(); + int jobs = Utilities.getMRTasks(plan.getRootTasks()).size() + + Utilities.getTezTasks(plan.getRootTasks()).size() + + Utilities.getSparkTasks(plan.getRootTasks()).size(); if (jobs > 0) { console.printInfo("Query ID = " + plan.getQueryId()); console.printInfo("Total jobs = " + jobs); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java index b25a639..a562aa1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java @@ -424,12 +424,12 @@ else if (ent.getValue() instanceof List) { json.accumulate(ent.getKey().toString(), jsonDep); } } - } else if (ent.getValue() != null && !((List)ent.getValue()).isEmpty() - && ((List)ent.getValue()).get(0) != null && - ((List)ent.getValue()).get(0) instanceof SparkWork.Dependency) { + } else if (ent.getValue() != null && !((List) ent.getValue()).isEmpty() + && ((List) ent.getValue()).get(0) != null && + ((List) ent.getValue()).get(0) instanceof SparkWork.Dependency) { if (out != null) { boolean isFirst = true; - for (SparkWork.Dependency dep: (List)ent.getValue()) { + for (SparkWork.Dependency dep: (List) ent.getValue()) { if (!isFirst) { out.print(", "); } else { @@ -446,7 +446,7 @@ else if (ent.getValue() instanceof List) { out.println(); } if (jsonOutput) { - for (SparkWork.Dependency dep: (List)ent.getValue()) { + for (SparkWork.Dependency dep: (List) ent.getValue()) { JSONObject jsonDep = new JSONObject(); jsonDep.put("parent", dep.getName()); jsonDep.put("type", dep.getShuffleType()); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index ee42f4c..2e771ec 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -363,8 +363,8 @@ private static BaseWork getBaseWork(Configuration conf, String name) { path = getPlanPath(conf, name); LOG.info("PLAN PATH = " + path); assert path != null; - if (!gWorkMap.containsKey(path) || - HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) { + if (!gWorkMap.containsKey(path) + || HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) { Path localPath; if (conf.getBoolean("mapreduce.task.uberized", false) && name.equals(REDUCE_PLAN_NAME)) { localPath = new Path(name); @@ -2673,7 +2673,8 @@ private static void getTezTasks(List> tasks, List> tasks, List sparkTasks) { + private static void getSparkTasks(List> tasks, + List sparkTasks) { for (Task task : tasks) { if (task instanceof SparkTask && !sparkTasks.contains(task)) { sparkTasks.add((SparkTask) task); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java index abdb6af..adb50f0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java @@ -154,7 +154,7 @@ public MapJoinPersistableTableContainer load( return tableContainer; } catch (IOException e) { throw new HiveException("IO error while trying to create table container", e); - } catch(Exception e) { + } catch (Exception e) { throw new HiveException("Error while trying to create table container", e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HiveKey.java ql/src/java/org/apache/hadoop/hive/ql/io/HiveKey.java index 33aeda4..f9cf2bd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/HiveKey.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveKey.java @@ -24,7 +24,6 @@ /** * HiveKey is a simple wrapper on Text which allows us to set the hashCode * easily. hashCode is used for hadoop partitioner. - * */ public class HiveKey extends BytesWritable { diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 6f216c9..ae0addc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -861,10 +861,10 @@ public static void setKeyAndValueDescForTaskTree(Task ta } } } else if (task instanceof SparkTask) { - SparkWork work = (SparkWork)task.getWork(); + SparkWork work = (SparkWork) task.getWork(); for (BaseWork w : work.getAllWorkUnsorted()) { if (w instanceof MapWork) { - ((MapWork)w).deriveExplainAttributes(); + ((MapWork) w).deriveExplainAttributes(); } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java index a6d5c62..3482a47 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java @@ -102,10 +102,12 @@ public void initialize(HiveConf hiveConf) { } transformations.add(new SamplePruner()); - MapJoinProcessor mapJoinProcessor = isSparkExecEngine ? new SparkMapJoinProcessor() : new MapJoinProcessor(); + MapJoinProcessor mapJoinProcessor = isSparkExecEngine ? new SparkMapJoinProcessor() + : new MapJoinProcessor(); transformations.add(mapJoinProcessor); - if ((HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTBUCKETMAPJOIN)) && !isTezExecEngine && !isSparkExecEngine) { + if ((HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTBUCKETMAPJOIN)) + && !isTezExecEngine && !isSparkExecEngine) { transformations.add(new BucketMapJoinOptimizer()); bucketMapJoinOptimizer = true; } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcessor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcessor.java index fec6822..9937343 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcessor.java @@ -96,8 +96,8 @@ public ParseContext transform(ParseContext pCtx) throws SemanticException { // Walk the tree again to see if the union can be removed completely HiveConf conf = pCtx.getConf(); opRules.clear(); - if (conf.getBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_UNION_REMOVE) && - !conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_UNION_REMOVE) + && !conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) { if (!conf.getBoolVar(HiveConf.ConfVars.HIVE_HADOOP_SUPPORTS_SUBDIRECTORIES)) { throw new diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java index 1b6de64..1d75160 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java @@ -147,9 +147,7 @@ private void breakTaskTree(Task task) { } // loop over all the operators recursively - // TODO: changed from private to protected for SparkCompiler to use. It will be changed back onece SparkCompiler - // stands alone. - protected void breakOperatorTree(Operator topOp) { + private void breakOperatorTree(Operator topOp) { if (topOp instanceof ReduceSinkOperator) { topOp.setChildOperators(null); } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java index 1efbb12..1737a34 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java @@ -129,7 +129,7 @@ public void addDummyOp(HashTableDummyOperator dummyOp) { // add all children opStack.addAll(opSet); - while(!opStack.empty()) { + while (!opStack.empty()) { Operator op = opStack.pop(); if (op.getNumChild() == 0) { returnSet.add(op); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java index 4582678..3f07ea7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java @@ -149,7 +149,7 @@ public void setLbCtx(ListBucketingCtx lbCtx) { work = ((MapredWork) mrTask.getWork()).getMapWork(); } else if (mrTask.getWork() instanceof TezWork){ work = (MapWork) ((TezWork) mrTask.getWork()).getAllWork().get(0); - } else if (mrTask.getWork() instanceof SparkWork){ + } else if (mrTask.getWork() instanceof SparkWork) { work = (MapWork) ((SparkWork) mrTask.getWork()).getAllWork().get(0); } else { work = (MapWork) mrTask.getWork(); diff --git ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index 076d2fa..f45b20a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -1271,7 +1271,7 @@ public void close() throws IOException { if (sparkSession != null) { try { SparkSessionManagerImpl.getInstance().closeSession(sparkSession); - } catch(Exception ex) { + } catch (Exception ex) { LOG.error("Error closing spark session.", ex); } finally { sparkSession = null; diff --git shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java index f1743ae..ea4ee3a 100644 --- shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java +++ shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java @@ -518,14 +518,14 @@ public void checkFileAccess(FileSystem fs, FileStatus status, FsAction action) Path getCurrentTrashPath(Configuration conf, FileSystem fs); /** - * Returns a shim to wrap KerberosName + * Check whether file is directory. */ - public KerberosNameShim getKerberosNameShim(String name) throws IOException; + boolean isDirectory(FileStatus fileStatus); /** - * Check whether file is directory. + * Returns a shim to wrap KerberosName */ - boolean isDirectory(FileStatus fileStatus); + public KerberosNameShim getKerberosNameShim(String name) throws IOException; /** * Shim for KerberosName