diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java index 34157d91ba..369c663ca3 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java @@ -79,7 +79,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { context.getConf().set(HCatConstants.HCAT_CREATE_DB_NAME, databaseName); super.postAnalyze(context, rootTasks); } diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java index 3dad6d2033..540ecd1546 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java @@ -132,7 +132,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) + List> rootTasks) throws SemanticException { if (rootTasks.size() == 0) { diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java index 2a96e0594c..b86a65f7e5 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java @@ -152,7 +152,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { try { diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java index 970733c107..fffabe355f 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java @@ -55,7 +55,7 @@ public HiveAuthorizationProvider getAuthProvider() { @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { super.postAnalyze(context, rootTasks); //Authorize the operation. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 64375c159c..8f8c186331 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -1602,10 +1602,10 @@ private boolean requiresLock() { if (!HiveConf.getBoolVar(conf, ConfVars.HIVE_LOCK_MAPRED_ONLY)) { return true; } - Queue> taskQueue = new LinkedList>(); + Queue> taskQueue = new LinkedList>(); taskQueue.addAll(plan.getRootTasks()); while (taskQueue.peek() != null) { - Task tsk = taskQueue.remove(); + Task tsk = taskQueue.remove(); if (tsk.requireLock()) { return true; } @@ -1818,7 +1818,7 @@ private void execute() throws CommandProcessorResponse { SessionState.get().setLocalMapRedErrors(new HashMap<>()); // Add root Tasks to runnable - for (Task tsk : plan.getRootTasks()) { + for (Task tsk : plan.getRootTasks()) { // This should never happen, if it does, it's a bug with the potential to produce // incorrect results. assert tsk.getParentTasks() == null || tsk.getParentTasks().isEmpty(); @@ -1835,7 +1835,7 @@ private void execute() throws CommandProcessorResponse { // Loop while you either have tasks running, or tasks queued up while (driverCxt.isRunning()) { // Launch upto maxthreads tasks - Task task; + Task task; while ((task = driverCxt.getRunnable(maxthreads)) != null) { TaskRunner runner = launchTask(task, queryId, noName, jobname, jobs, driverCxt); if (!runner.isRunning()) { @@ -1863,14 +1863,14 @@ private void execute() throws CommandProcessorResponse { queryDisplay.setTaskResult(tskRun.getTask().getId(), tskRun.getTaskResult()); - Task tsk = tskRun.getTask(); + Task tsk = tskRun.getTask(); TaskResult result = tskRun.getTaskResult(); int exitVal = result.getExitVal(); checkInterrupted("when checking the execution result.", hookContext, perfLogger); if (exitVal != 0) { - Task backupTask = tsk.getAndInitBackupTask(); + Task backupTask = tsk.getAndInitBackupTask(); if (backupTask != null) { setErrorMsgAndDetail(exitVal, result.getTaskError(), tsk); console.printError(errorMessage); @@ -1920,7 +1920,7 @@ private void execute() throws CommandProcessorResponse { } if (tsk.getChildTasks() != null) { - for (Task child : tsk.getChildTasks()) { + for (Task child : tsk.getChildTasks()) { if (DriverContext.isLaunchable(child)) { driverCxt.addToRunnable(child); } @@ -2077,20 +2077,20 @@ private void releasePlan(QueryPlan plan) { } } - private void setQueryDisplays(List> tasks) { + private void setQueryDisplays(List> tasks) { if (tasks != null) { - Set> visited = new HashSet>(); + Set> visited = new HashSet>(); while (!tasks.isEmpty()) { tasks = setQueryDisplays(tasks, visited); } } } - private List> setQueryDisplays( - List> tasks, - Set> visited) { - List> childTasks = new ArrayList<>(); - for (Task task : tasks) { + private List> setQueryDisplays( + List> tasks, + Set> visited) { + List> childTasks = new ArrayList<>(); + for (Task task : tasks) { if (visited.contains(task)) { continue; } @@ -2156,7 +2156,7 @@ private void invokeFailureHooks(PerfLogger perfLogger, * @param cxt * the driver context */ - private TaskRunner launchTask(Task tsk, String queryId, boolean noName, + private TaskRunner launchTask(Task tsk, String queryId, boolean noName, String jobname, int jobs, DriverContext cxt) throws HiveException { if (SessionState.get() != null) { SessionState.get().getHiveHistory().startTask(queryId, tsk, tsk.getClass().getName()); @@ -2495,7 +2495,7 @@ public StatsSource getStatsSource() { public boolean hasResultSet() { // TODO explain should use a FetchTask for reading - for (Task task : plan.getRootTasks()) { + for (Task task : plan.getRootTasks()) { if (task.getClass() == ExplainTask.class) { return true; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/DriverContext.java b/ql/src/java/org/apache/hadoop/hive/ql/DriverContext.java index d5392ab804..1b8260aa68 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/DriverContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/DriverContext.java @@ -54,7 +54,7 @@ private static final int SLEEP_TIME = 2000; - private Queue> runnable; + private Queue> runnable; private Queue running; // how many jobs have been started @@ -69,7 +69,7 @@ public DriverContext() { } public DriverContext(Context ctx) { - this.runnable = new ConcurrentLinkedQueue>(); + this.runnable = new ConcurrentLinkedQueue>(); this.running = new LinkedBlockingQueue(); this.ctx = ctx; } @@ -82,7 +82,7 @@ public synchronized boolean isRunning() { return !shutdown && (!running.isEmpty() || !runnable.isEmpty()); } - public synchronized void remove(Task task) { + public synchronized void remove(Task task) { runnable.remove(task); } @@ -91,7 +91,7 @@ public synchronized void launching(TaskRunner runner) throws HiveException { running.add(runner); } - public synchronized Task getRunnable(int maxthreads) throws HiveException { + public synchronized Task getRunnable(int maxthreads) throws HiveException { checkShutdown(); if (runnable.peek() != null && running.size() < maxthreads) { return runnable.remove(); @@ -161,13 +161,13 @@ public synchronized void shutdown() { * @return true if the task is launchable, false otherwise */ - public static boolean isLaunchable(Task tsk) { + public static boolean isLaunchable(Task tsk) { // A launchable task is one that hasn't been queued, hasn't been // initialized, and is runnable. return tsk.isNotInitialized() && tsk.isRunnable(); } - public synchronized boolean addToRunnable(Task tsk) throws HiveException { + public synchronized boolean addToRunnable(Task tsk) throws HiveException { if (runnable.contains(tsk)) { return false; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java b/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java index a3105b631d..2ba170b949 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java @@ -228,7 +228,7 @@ public boolean hasPreAnalyzeHooks() { } public void runPostAnalyzeHooks(HiveSemanticAnalyzerHookContext hookCtx, - List> allRootTasks) throws HiveException { + List> allRootTasks) throws HiveException { initialize(); try { for (HiveSemanticAnalyzerHook hook : saHooks) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java index 7e49b6c883..1d01509c5f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java @@ -76,7 +76,7 @@ private String optimizedCBOPlan; private String optimizedQueryString; - private ArrayList> rootTasks; + private ArrayList> rootTasks; private FetchTask fetchTask; private final List reducerTimeStatsPerJobList; @@ -134,7 +134,7 @@ public QueryPlan(String queryString, BaseSemanticAnalyzer sem, Long startTime, S HiveOperation operation, Schema resultSchema) { this.queryString = queryString; - rootTasks = new ArrayList>(sem.getAllRootTasks()); + rootTasks = new ArrayList>(sem.getAllRootTasks()); reducerTimeStatsPerJobList = new ArrayList(); fetchTask = sem.getFetchTask(); // Note that inputs and outputs can be changed when the query gets executed @@ -264,12 +264,12 @@ private void populateQueryPlan() throws IOException { query.setStageGraph(new org.apache.hadoop.hive.ql.plan.api.Graph()); query.getStageGraph().setNodeType(NodeType.STAGE); - Queue> tasksToVisit = - new LinkedList>(); - Set> tasksVisited = new HashSet>(); + Queue> tasksToVisit = + new LinkedList>(); + Set> tasksVisited = new HashSet>(); tasksToVisit.addAll(rootTasks); while (tasksToVisit.size() != 0) { - Task task = tasksToVisit.remove(); + Task task = tasksToVisit.remove(); tasksVisited.add(task); // populate stage org.apache.hadoop.hive.ql.plan.api.Stage stage = @@ -315,14 +315,14 @@ private void populateQueryPlan() throws IOException { listEntry.setNode(task.getId()); ConditionalTask t = (ConditionalTask) task; - for (Task listTask : t.getListTasks()) { + for (Task listTask : t.getListTasks()) { if (t.getChildTasks() != null) { org.apache.hadoop.hive.ql.plan.api.Adjacency childEntry = new org.apache.hadoop.hive.ql.plan.api.Adjacency(); childEntry.setAdjacencyType(AdjacencyType.DISJUNCTIVE); childEntry.setNode(listTask.getId()); // done processing the task - for (Task childTask : t.getChildTasks()) { + for (Task childTask : t.getChildTasks()) { childEntry.addToChildren(childTask.getId()); if (!tasksVisited.contains(childTask)) { tasksToVisit.add(childTask); @@ -343,7 +343,7 @@ private void populateQueryPlan() throws IOException { entry.setAdjacencyType(AdjacencyType.CONJUNCTIVE); entry.setNode(task.getId()); // done processing the task - for (Task childTask : task.getChildTasks()) { + for (Task childTask : task.getChildTasks()) { entry.addToChildren(childTask.getId()); if (!tasksVisited.contains(childTask)) { tasksToVisit.add(childTask); @@ -399,17 +399,17 @@ private void updateCountersInQueryPlan() { * Extract all the counters from tasks and operators. */ private void extractCounters() throws IOException { - Queue> tasksToVisit = - new LinkedList>(); - Set> tasksVisited = - new HashSet>(); + Queue> tasksToVisit = + new LinkedList>(); + Set> tasksVisited = + new HashSet>(); tasksToVisit.addAll(rootTasks); while (tasksToVisit.peek() != null) { - Task task = tasksToVisit.remove(); + Task task = tasksToVisit.remove(); tasksVisited.add(task); // add children to tasksToVisit if (task.getChildTasks() != null) { - for (Task childTask : task.getChildTasks()) { + for (Task childTask : task.getChildTasks()) { if (!tasksVisited.contains(childTask)) { tasksToVisit.add(childTask); } @@ -450,7 +450,7 @@ private void extractCounters() throws IOException { } } else if (task instanceof ConditionalTask) { ConditionalTask cTask = (ConditionalTask) task; - for (Task listTask : cTask.getListTasks()) { + for (Task listTask : cTask.getListTasks()) { if (!tasksVisited.contains(listTask)) { tasksToVisit.add(listTask); } @@ -696,11 +696,11 @@ public void setDone() { return done; } - public ArrayList> getRootTasks() { + public ArrayList> getRootTasks() { return rootTasks; } - public void setRootTasks(ArrayList> rootTasks) { + public void setRootTasks(ArrayList> rootTasks) { this.rootTasks = rootTasks; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java index c2b058ae5f..ecdf368b52 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java @@ -33,11 +33,11 @@ public class ConditionalTask extends Task implements Serializable { private static final long serialVersionUID = 1L; - private List> listTasks; + private List> listTasks; private boolean resolved = false; - private List> resTasks; + private List> resTasks; private ConditionalResolver resolver; private Object resolverCtx; @@ -49,7 +49,7 @@ public ConditionalTask() { @Override public boolean isMapRedTask() { - for (Task task : listTasks) { + for (Task task : listTasks) { if (task.isMapRedTask()) { return true; } @@ -65,7 +65,7 @@ public boolean canExecuteInParallel() { @Override public boolean hasReduce() { - for (Task task : listTasks) { + for (Task task : listTasks) { if (task.hasReduce()) { return true; } @@ -89,7 +89,7 @@ public int execute(DriverContext driverContext) { } private void resolveTask(DriverContext driverContext) throws HiveException { - for (Task tsk : getListTasks()) { + for (Task tsk : getListTasks()) { if (!resTasks.contains(tsk)) { driverContext.remove(tsk); console.printInfo(tsk.getId() + " is filtered out by condition resolver."); @@ -101,7 +101,7 @@ private void resolveTask(DriverContext driverContext) throws HiveException { } else { if (getParentTasks() != null) { // This makes it so that we can go back up the tree later - for (Task task : getParentTasks()) { + for (Task task : getParentTasks()) { task.addDependentTask(tsk); } } @@ -140,20 +140,20 @@ public Object getResolverCtx() { @Override public boolean done() { boolean ret = true; - List> parentTasks = getParentTasks(); + List> parentTasks = getParentTasks(); if (parentTasks != null) { - for (Task par : parentTasks) { + for (Task par : parentTasks) { ret = ret && par.done(); } } - List> retTasks; + List> retTasks; if (resolved) { retTasks = resTasks; } else { retTasks = getListTasks(); } if (ret && retTasks != null) { - for (Task tsk : retTasks) { + for (Task tsk : retTasks) { ret = ret && tsk.done(); } } @@ -171,7 +171,7 @@ public void setResolverCtx(Object resolverCtx) { /** * @return the listTasks */ - public List> getListTasks() { + public List> getListTasks() { return listTasks; } @@ -179,7 +179,7 @@ public void setResolverCtx(Object resolverCtx) { * @param listTasks * the listTasks to set */ - public void setListTasks(List> listTasks) { + public void setListTasks(List> listTasks) { this.listTasks = listTasks; } @@ -200,11 +200,11 @@ public String getName() { * @return true if the task got added false if it already existed */ @Override - public boolean addDependentTask(Task dependent) { + public boolean addDependentTask(Task dependent) { boolean ret = false; if (getListTasks() != null) { ret = true; - for (Task tsk : getListTasks()) { + for (Task tsk : getListTasks()) { ret = ret & tsk.addDependentTask(dependent); } } @@ -212,7 +212,7 @@ public boolean addDependentTask(Task dependent) { } @Override - public List> getDependentTasks() { + public List> getDependentTasks() { return listTasks; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/NodeUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/NodeUtils.java index f76bfddc1e..fc058374c4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/NodeUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/NodeUtils.java @@ -34,7 +34,7 @@ public class NodeUtils { - public static void iterateTask(Collection> tasks, Class clazz, Function function) { + public static void iterateTask(Collection> tasks, Class clazz, Function function) { // Does a breadth first traversal of the tasks Set visited = new HashSet(); while (!tasks.isEmpty()) { @@ -43,7 +43,7 @@ return; } - private static Collection> iterateTask(Collection> tasks, + private static Collection> iterateTask(Collection> tasks, Class clazz, Function function, Set visited) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java index 13010aedb6..a5554c3004 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java @@ -33,7 +33,7 @@ **/ public class TaskRunner extends Thread { - protected Task tsk; + protected Task tsk; protected TaskResult result; protected SessionState ss; private static AtomicLong taskCounter = new AtomicLong(0); @@ -50,14 +50,14 @@ protected Long initialValue() { private final DriverContext driverCtx; - public TaskRunner(Task tsk, DriverContext ctx) { + public TaskRunner(Task tsk, DriverContext ctx) { this.tsk = tsk; this.result = new TaskResult(); ss = SessionState.get(); driverCtx = ctx; } - public Task getTask() { + public Task getTask() { return tsk; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 3d6859c1fb..e08191bbb0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -2597,24 +2597,24 @@ public static boolean isEmptyPath(Configuration job, Path dirPath) throws IOExce return true; } - public static List getTezTasks(List> tasks) { + public static List getTezTasks(List> tasks) { return getTasks(tasks, new TaskFilterFunction<>(TezTask.class)); } - public static List getSparkTasks(List> tasks) { + public static List getSparkTasks(List> tasks) { return getTasks(tasks, new TaskFilterFunction<>(SparkTask.class)); } - public static List getMRTasks(List> tasks) { + public static List getMRTasks(List> tasks) { return getTasks(tasks, new TaskFilterFunction<>(ExecDriver.class)); } - public static int getNumClusterJobs(List> tasks) { + public static int getNumClusterJobs(List> tasks) { return getMRTasks(tasks).size() + getTezTasks(tasks).size() + getSparkTasks(tasks).size(); } static class TaskFilterFunction implements DAGTraversal.Function { - private Set> visited = new HashSet<>(); + private Set> visited = new HashSet<>(); private Class requiredType; private List typeSpecificTasks = new ArrayList<>(); @@ -2623,7 +2623,7 @@ public static int getNumClusterJobs(List> tasks) { } @Override - public void process(Task task) { + public void process(Task task) { if (requiredType.isInstance(task) && !typeSpecificTasks.contains(task)) { typeSpecificTasks.add((T) task); } @@ -2635,12 +2635,12 @@ public void process(Task task) { } @Override - public boolean skipProcessing(Task task) { + public boolean skipProcessing(Task task) { return visited.contains(task); } } - private static List getTasks(List> tasks, + private static List getTasks(List> tasks, TaskFilterFunction function) { DAGTraversal.traverse(tasks, function); return function.getTasks(); @@ -2829,7 +2829,7 @@ public static double showTime(long time) { * @param conf * @throws SemanticException */ - public static void reworkMapRedWork(Task task, + public static void reworkMapRedWork(Task task, boolean reworkMapredWork, HiveConf conf) throws SemanticException { if (reworkMapredWork && (task instanceof MapRedTask)) { try { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java index 327628f8a0..33bf6a969c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java @@ -172,7 +172,7 @@ public void setTaskProperty(String queryId, String taskId, Keys propName, * * @param task */ - public void startTask(String queryId, Task task, + public void startTask(String queryId, Task task, String taskName); /** @@ -180,7 +180,7 @@ public void startTask(String queryId, Task task, * * @param task */ - public void endTask(String queryId, Task task); + public void endTask(String queryId, Task task); /** * Logs progress of a task if ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS is @@ -188,7 +188,7 @@ public void startTask(String queryId, Task task, * * @param task */ - public void progressTask(String queryId, Task task); + public void progressTask(String queryId, Task task); /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java index 0bfa78dda4..80eaf001b5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java @@ -263,7 +263,7 @@ public void endQuery(String queryId) { } @Override - public void startTask(String queryId, Task task, + public void startTask(String queryId, Task task, String taskName) { TaskInfo ti = new TaskInfo(); @@ -279,7 +279,7 @@ public void startTask(String queryId, Task task, } @Override - public void endTask(String queryId, Task task) { + public void endTask(String queryId, Task task) { String id = queryId + ":" + task.getId(); TaskInfo ti = taskInfoMap.get(id); @@ -291,7 +291,7 @@ public void endTask(String queryId, Task task) { } @Override - public void progressTask(String queryId, Task task) { + public void progressTask(String queryId, Task task) { String id = queryId + ":" + task.getId(); TaskInfo ti = taskInfoMap.get(id); if (ti == null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/NoOperatorReuseCheckerHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/NoOperatorReuseCheckerHook.java index 494459abd7..445c970885 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/NoOperatorReuseCheckerHook.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/NoOperatorReuseCheckerHook.java @@ -74,8 +74,8 @@ public void run(HookContext hookContext) throws Exception { List rootOps = Lists.newArrayList(); - ArrayList> roots = hookContext.getQueryPlan().getRootTasks(); - for (Task task : roots) { + ArrayList> roots = hookContext.getQueryPlan().getRootTasks(); + for (Task task : roots) { Object work = task.getWork(); if (work instanceof MapredWork) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java index edb7716b02..23cdb625c0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java @@ -148,31 +148,31 @@ public void walk(Node nd) throws SemanticException { opStack.push(nd); } - List> nextTaskList = null; - Set> nextTaskSet = new HashSet>(); - List> taskListInConditionalTask = null; + List> nextTaskList = null; + Set> nextTaskSet = new HashSet>(); + List> taskListInConditionalTask = null; if(nd instanceof ConditionalTask ){ //for conditional task, next task list should return the children tasks of each task, which //is contained in the conditional task. taskListInConditionalTask = ((ConditionalTask) nd).getListTasks(); - for(Task tsk: taskListInConditionalTask){ - List> childTask = tsk.getChildTasks(); + for(Task tsk: taskListInConditionalTask){ + List> childTask = tsk.getChildTasks(); if(childTask != null){ nextTaskSet.addAll(tsk.getChildTasks()); } } //convert the set into list if(nextTaskSet.size()>0){ - nextTaskList = new ArrayList>(); - for(Task tsk:nextTaskSet ){ + nextTaskList = new ArrayList>(); + for(Task tsk:nextTaskSet ){ nextTaskList.add(tsk); } } }else{ //for other tasks, just return its children tasks - nextTaskList = ((Task)nd).getChildTasks(); + nextTaskList = ((Task)nd).getChildTasks(); } if ((nextTaskList == null) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java index 25c6b24f46..10a0405eee 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java @@ -74,7 +74,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, Map, GenMapRedCtx> mapCurrCtx = ctx .getMapCurrCtx(); GenMapRedCtx mapredCtx = mapCurrCtx.get(fsOp.getParentOperators().get(0)); - Task currTask = mapredCtx.getCurrTask(); + Task currTask = mapredCtx.getCurrTask(); ctx.setCurrTask(currTask); ctx.addRootIfPossible(currTask); @@ -88,9 +88,9 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, // If this file sink desc has been processed due to a linked file sink desc, // use that task - Map> fileSinkDescs = ctx.getLinkedFileDescTasks(); + Map> fileSinkDescs = ctx.getLinkedFileDescTasks(); if (fileSinkDescs != null) { - Task childTask = fileSinkDescs.get(fsOp.getConf()); + Task childTask = fileSinkDescs.get(fsOp.getConf()); processLinkedFileDesc(ctx, childTask); return true; } @@ -119,10 +119,10 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, // There are linked file sink operators and child tasks are present if (fileSinkDesc.isLinkedFileSink() && (currTask.getChildTasks() != null) && (currTask.getChildTasks().size() == 1)) { - Map> linkedFileDescTasks = + Map> linkedFileDescTasks = ctx.getLinkedFileDescTasks(); if (linkedFileDescTasks == null) { - linkedFileDescTasks = new HashMap>(); + linkedFileDescTasks = new HashMap>(); ctx.setLinkedFileDescTasks(linkedFileDescTasks); } @@ -145,8 +145,8 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, * Use the task created by the first linked file descriptor */ private void processLinkedFileDesc(GenMRProcContext ctx, - Task childTask) throws SemanticException { - Task currTask = ctx.getCurrTask(); + Task childTask) throws SemanticException { + Task currTask = ctx.getCurrTask(); TableScanOperator currTopOp = ctx.getCurrTopOp(); if (currTopOp != null && !ctx.isSeenOp(currTask, currTopOp)) { String currAliasId = ctx.getCurrAliasId(); @@ -176,7 +176,7 @@ private Path processFS(FileSinkOperator fsOp, Stack stack, NodeProcessorCtx opProcCtx, boolean chDir) throws SemanticException { GenMRProcContext ctx = (GenMRProcContext) opProcCtx; - Task currTask = ctx.getCurrTask(); + Task currTask = ctx.getCurrTask(); // If the directory needs to be changed, send the new directory Path dest = null; @@ -195,7 +195,7 @@ private Path processFS(FileSinkOperator fsOp, Stack stack, TableScanOperator currTopOp = ctx.getCurrTopOp(); String currAliasId = ctx.getCurrAliasId(); - HashMap, Task> opTaskMap = + HashMap, Task> opTaskMap = ctx.getOpTaskMap(); // In case of multi-table insert, the path to alias mapping is needed for @@ -203,7 +203,7 @@ private Path processFS(FileSinkOperator fsOp, Stack stack, // reducer, treat it as a plan with null reducer // If it is a map-only job, the task needs to be processed if (currTopOp != null) { - Task mapTask = opTaskMap.get(null); + Task mapTask = opTaskMap.get(null); if (mapTask == null) { if (!ctx.isSeenOp(currTask, currTopOp)) { GenMapRedUtils.setTaskPlan(currAliasId, currTopOp, currTask, false, ctx); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java index a6e2f53b48..bbda668c08 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java @@ -56,7 +56,7 @@ * GenMapRedCtx is used to keep track of the current state. */ public static class GenMapRedCtx { - Task currTask; + Task currTask; String currAliasId; public GenMapRedCtx() { @@ -67,7 +67,7 @@ public GenMapRedCtx() { * the current task * @param currAliasId */ - public GenMapRedCtx(Task currTask, String currAliasId) { + public GenMapRedCtx(Task currTask, String currAliasId) { this.currTask = currTask; this.currAliasId = currAliasId; } @@ -75,7 +75,7 @@ public GenMapRedCtx(Task currTask, String currAliasId) { /** * @return current task */ - public Task getCurrTask() { + public Task getCurrTask() { return currTask; } @@ -92,19 +92,19 @@ public String getCurrAliasId() { * */ public static class GenMRUnionCtx { - final Task uTask; + final Task uTask; List taskTmpDir; List tt_desc; List listTopOperators; - public GenMRUnionCtx(Task uTask) { + public GenMRUnionCtx(Task uTask) { this.uTask = uTask; taskTmpDir = new ArrayList(); tt_desc = new ArrayList(); listTopOperators = new ArrayList<>(); } - public Task getUTask() { + public Task getUTask() { return uTask; } @@ -135,19 +135,19 @@ public void addListTopOperators(TableScanOperator topOperator) { private HiveConf conf; private - HashMap, Task> opTaskMap; + HashMap, Task> opTaskMap; private - HashMap, List>> taskToSeenOps; + HashMap, List>> taskToSeenOps; private HashMap unionTaskMap; private List seenFileSinkOps; private ParseContext parseCtx; private List> mvTask; - private List> rootTasks; + private List> rootTasks; private LinkedHashMap, GenMapRedCtx> mapCurrCtx; - private Task currTask; + private Task currTask; private TableScanOperator currTopOp; private UnionOperator currUnionOp; private String currAliasId; @@ -155,7 +155,7 @@ public void addListTopOperators(TableScanOperator topOperator) { // If many fileSinkDescs are linked to each other, it is a good idea to keep track of // tasks for first fileSinkDesc. others can use it - private Map> linkedFileDescTasks; + private Map> linkedFileDescTasks; /** * Set of read entities. This list is generated by the walker and is passed to @@ -191,10 +191,10 @@ public GenMRProcContext() { */ public GenMRProcContext( HiveConf conf, - HashMap, Task> opTaskMap, + HashMap, Task> opTaskMap, ParseContext parseCtx, List> mvTask, - List> rootTasks, + List> rootTasks, LinkedHashMap, GenMapRedCtx> mapCurrCtx, Set inputs, Set outputs) { this.conf = conf; @@ -210,7 +210,7 @@ public GenMRProcContext( currUnionOp = null; currAliasId = null; unionTaskMap = new HashMap(); - taskToSeenOps = new HashMap, + taskToSeenOps = new HashMap, List>>(); dependencyTaskForMultiInsert = null; linkedFileDescTasks = null; @@ -231,7 +231,7 @@ public void reset() { * @return reducer to task mapping */ public HashMap, - Task> getOpTaskMap() { + Task> getOpTaskMap() { return opTaskMap; } @@ -240,7 +240,7 @@ public void reset() { * reducer to task mapping */ public void setOpTaskMap( - HashMap, Task> opTaskMap) { + HashMap, Task> opTaskMap) { this.opTaskMap = opTaskMap; } @@ -305,7 +305,7 @@ public void setMvTask(List> mvTask) { /** * @return root tasks for the plan */ - public List> getRootTasks() { + public List> getRootTasks() { return rootTasks; } @@ -313,11 +313,11 @@ public void setMvTask(List> mvTask) { * @param rootTasks * root tasks for the plan */ - public void setRootTasks(List> rootTasks) { + public void setRootTasks(List> rootTasks) { this.rootTasks = rootTasks; } - public boolean addRootIfPossible(Task task) { + public boolean addRootIfPossible(Task task) { if (task.getParentTasks() == null || task.getParentTasks().isEmpty()) { if (!rootTasks.contains(task)) { return rootTasks.add(task); @@ -345,7 +345,7 @@ public void setMapCurrCtx( /** * @return current task */ - public Task getCurrTask() { + public Task getCurrTask() { return currTask; } @@ -353,7 +353,7 @@ public void setMapCurrCtx( * @param currTask * current task */ - public void setCurrTask(Task currTask) { + public void setCurrTask(Task currTask) { this.currTask = currTask; } @@ -454,12 +454,12 @@ public DependencyCollectionTask getDependencyTaskForMultiInsert() { return dependencyTaskForMultiInsert; } - public Map> getLinkedFileDescTasks() { + public Map> getLinkedFileDescTasks() { return linkedFileDescTasks; } public void setLinkedFileDescTasks( - Map> linkedFileDescTasks) { + Map> linkedFileDescTasks) { this.linkedFileDescTasks = linkedFileDescTasks; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java index 8b30c8226c..033cbdc807 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java @@ -61,7 +61,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, Map, GenMapRedCtx> mapCurrCtx = ctx .getMapCurrCtx(); GenMapRedCtx mapredCtx = mapCurrCtx.get(stack.get(stack.size() - 2)); - Task currTask = mapredCtx.getCurrTask(); + Task currTask = mapredCtx.getCurrTask(); MapredWork currPlan = (MapredWork) currTask.getWork(); String currAliasId = mapredCtx.getCurrAliasId(); @@ -70,7 +70,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, "But found multiple children : " + op.getChildOperators()); } Operator reducer = op.getChildOperators().get(0); - Task oldTask = ctx.getOpTaskMap().get(reducer); + Task oldTask = ctx.getOpTaskMap().get(reducer); ctx.setCurrAliasId(currAliasId); ctx.setCurrTask(currTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java index 35cdc0402a..8c997c7754 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java @@ -56,12 +56,12 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, Map, GenMapRedCtx> mapCurrCtx = ctx .getMapCurrCtx(); GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get(0)); - Task currTask = mapredCtx.getCurrTask(); + Task currTask = mapredCtx.getCurrTask(); String currAliasId = mapredCtx.getCurrAliasId(); Operator reducer = op.getChildOperators().get(0); - Map, Task> opTaskMap = ctx + Map, Task> opTaskMap = ctx .getOpTaskMap(); - Task oldTask = opTaskMap.get(reducer); + Task oldTask = opTaskMap.get(reducer); ctx.setCurrAliasId(currAliasId); ctx.setCurrTask(currTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java index fbab075a47..ed4bb30c08 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java @@ -67,7 +67,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, .getMapCurrCtx(); GenMapRedCtx mapredCtx = mapCurrCtx.get(union); - Task unionTask = null; + Task unionTask = null; if(mapredCtx != null) { unionTask = mapredCtx.getCurrTask(); } else { @@ -76,9 +76,9 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, MapredWork plan = (MapredWork) unionTask.getWork(); - HashMap, Task> opTaskMap = ctx + HashMap, Task> opTaskMap = ctx .getOpTaskMap(); - Task reducerTask = opTaskMap.get(reducer); + Task reducerTask = opTaskMap.get(reducer); ctx.setCurrTask(unionTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java index 8f01507a2f..abf363a348 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java @@ -65,7 +65,7 @@ private Object processMapOnlyUnion(UnionOperator union, Stack stack, GenMRUnionCtx uCtxTask = ctx.getUnionTask(union); if (uCtxTask != null) { // get task associated with this union - Task uTask = ctx.getUnionTask(union).getUTask(); + Task uTask = ctx.getUnionTask(union).getUTask(); if (uTask != null) { if (ctx.getCurrTask() != null && ctx.getCurrTask() != uTask) { // if ctx.getCurrTask() is in rootTasks, should be removed @@ -88,7 +88,7 @@ private Object processMapOnlyUnion(UnionOperator union, Stack stack, ctx.setUnionTask(union, uCtxTask); } - Task uTask = ctx.getCurrTask(); + Task uTask = ctx.getCurrTask(); if (uTask.getParentTasks() == null || uTask.getParentTasks().isEmpty()) { if (!ctx.getRootTasks().contains(uTask)) { @@ -115,7 +115,7 @@ private Object processMapOnlyUnion(UnionOperator union, Stack stack, private void processSubQueryUnionCreateIntermediate( Operator parent, Operator child, - Task uTask, GenMRProcContext ctx, + Task uTask, GenMRProcContext ctx, GenMRUnionCtx uCtxTask) { ParseContext parseCtx = ctx.getParseCtx(); @@ -141,7 +141,7 @@ private void processSubQueryUnionCreateIntermediate( // assembled in the union context and later used to initialize the union // plan - Task currTask = ctx.getCurrTask(); + Task currTask = ctx.getCurrTask(); currTask.addDependentTask(uTask); if (ctx.getRootTasks().contains(uTask)) { ctx.getRootTasks().remove(uTask); @@ -168,7 +168,7 @@ private void processSubQueryUnionMerge(GenMRProcContext ctx, throws SemanticException { // The current plan can be thrown away after being merged with the union // plan - Task uTask = uCtxTask.getUTask(); + Task uTask = uCtxTask.getUTask(); ctx.setCurrTask(uTask); TableScanOperator topOp = ctx.getCurrTopOp(); if (topOp != null && !ctx.isSeenOp(uTask, topOp)) { @@ -220,10 +220,10 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, assert uPrsCtx != null; - Task currTask = ctx.getCurrTask(); + Task currTask = ctx.getCurrTask(); int pos = UnionProcFactory.getPositionParent(union, stack); - Task uTask = null; + Task uTask = null; MapredWork uPlan = null; // union is encountered for the first time @@ -272,7 +272,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, } private boolean shouldBeRootTask( - Task currTask) { + Task currTask) { return currTask.getParentTasks() == null || (currTask.getParentTasks().size() == 0); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 5d6143d6a4..afbf80c02c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -152,9 +152,9 @@ public static void initPlan(ReduceSinkOperator op, GenMRProcContext opProcCtx) Map, GenMapRedCtx> mapCurrCtx = opProcCtx.getMapCurrCtx(); GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get(0)); - Task currTask = mapredCtx.getCurrTask(); + Task currTask = mapredCtx.getCurrTask(); MapredWork plan = (MapredWork) currTask.getWork(); - HashMap, Task> opTaskMap = + HashMap, Task> opTaskMap = opProcCtx.getOpTaskMap(); TableScanOperator currTopOp = opProcCtx.getCurrTopOp(); @@ -195,11 +195,11 @@ public static void initPlan(ReduceSinkOperator op, GenMRProcContext opProcCtx) */ public static void initUnionPlan(ReduceSinkOperator op, UnionOperator currUnionOp, GenMRProcContext opProcCtx, - Task unionTask) throws SemanticException { + Task unionTask) throws SemanticException { Operator reducer = op.getChildOperators().get(0); MapredWork plan = (MapredWork) unionTask.getWork(); - HashMap, Task> opTaskMap = + HashMap, Task> opTaskMap = opProcCtx.getOpTaskMap(); opTaskMap.put(reducer, unionTask); @@ -219,7 +219,7 @@ public static void initUnionPlan(ReduceSinkOperator op, UnionOperator currUnionO } private static void setUnionPlan(GenMRProcContext opProcCtx, - boolean local, Task currTask, GenMRUnionCtx uCtx, + boolean local, Task currTask, GenMRUnionCtx uCtx, boolean mergeTask) throws SemanticException { TableScanOperator currTopOp = opProcCtx.getCurrTopOp(); @@ -265,7 +265,7 @@ private static void setUnionPlan(GenMRProcContext opProcCtx, * for the union. The plan has already been created. */ public static void initUnionPlan(GenMRProcContext opProcCtx, UnionOperator currUnionOp, - Task currTask, boolean local) + Task currTask, boolean local) throws SemanticException { // In case of lateral views followed by a join, the same tree // can be traversed more than one @@ -281,8 +281,8 @@ public static void initUnionPlan(GenMRProcContext opProcCtx, UnionOperator currU */ public static void joinUnionPlan(GenMRProcContext opProcCtx, UnionOperator currUnionOp, - Task currentUnionTask, - Task existingTask, boolean local) + Task currentUnionTask, + Task existingTask, boolean local) throws SemanticException { assert currUnionOp != null; GenMRUnionCtx uCtx = opProcCtx.getUnionTask(currUnionOp); @@ -290,7 +290,7 @@ public static void joinUnionPlan(GenMRProcContext opProcCtx, setUnionPlan(opProcCtx, local, existingTask, uCtx, true); - List> parTasks = null; + List> parTasks = null; if (opProcCtx.getRootTasks().contains(currentUnionTask)) { opProcCtx.getRootTasks().remove(currentUnionTask); if (!opProcCtx.getRootTasks().contains(existingTask) && @@ -301,17 +301,17 @@ public static void joinUnionPlan(GenMRProcContext opProcCtx, if ((currentUnionTask != null) && (currentUnionTask.getParentTasks() != null) && !currentUnionTask.getParentTasks().isEmpty()) { - parTasks = new ArrayList>(); + parTasks = new ArrayList>(); parTasks.addAll(currentUnionTask.getParentTasks()); Object[] parTaskArr = parTasks.toArray(); for (Object parTask : parTaskArr) { - ((Task) parTask) + ((Task) parTask) .removeDependentTask(currentUnionTask); } } if ((currentUnionTask != null) && (parTasks != null)) { - for (Task parTask : parTasks) { + for (Task parTask : parTasks) { parTask.addDependentTask(existingTask); if (opProcCtx.getRootTasks().contains(existingTask)) { opProcCtx.getRootTasks().remove(existingTask); @@ -332,22 +332,22 @@ public static void joinUnionPlan(GenMRProcContext opProcCtx, * @param opProcCtx * processing context */ - public static void joinPlan(Task currTask, - Task oldTask, GenMRProcContext opProcCtx) + public static void joinPlan(Task currTask, + Task oldTask, GenMRProcContext opProcCtx) throws SemanticException { assert currTask != null && oldTask != null; TableScanOperator currTopOp = opProcCtx.getCurrTopOp(); - List> parTasks = null; + List> parTasks = null; // terminate the old task and make current task dependent on it if (currTask.getParentTasks() != null && !currTask.getParentTasks().isEmpty()) { - parTasks = new ArrayList>(); + parTasks = new ArrayList>(); parTasks.addAll(currTask.getParentTasks()); Object[] parTaskArr = parTasks.toArray(); for (Object element : parTaskArr) { - ((Task) element).removeDependentTask(currTask); + ((Task) element).removeDependentTask(currTask); } } @@ -356,7 +356,7 @@ public static void joinPlan(Task currTask, } if (parTasks != null) { - for (Task parTask : parTasks) { + for (Task parTask : parTasks) { parTask.addDependentTask(oldTask); } } @@ -374,7 +374,7 @@ public static void joinPlan(Task currTask, * If currTopOp is not set for input of the task, add input for to the task */ static boolean mergeInput(TableScanOperator currTopOp, - GenMRProcContext opProcCtx, Task task, boolean local) + GenMRProcContext opProcCtx, Task task, boolean local) throws SemanticException { if (!opProcCtx.isSeenOp(task, currTopOp)) { String currAliasId = opProcCtx.getCurrAliasId(); @@ -389,7 +389,7 @@ static boolean mergeInput(TableScanOperator currTopOp, * Split and link two tasks by temporary file : pRS-FS / TS-cRS-OP */ static void splitPlan(ReduceSinkOperator cRS, - Task parentTask, Task childTask, + Task parentTask, Task childTask, GenMRProcContext opProcCtx) throws SemanticException { assert parentTask != null && childTask != null; splitTasks(cRS, parentTask, childTask, opProcCtx); @@ -408,10 +408,10 @@ static void splitPlan(ReduceSinkOperator cRS, GenMRProcContext opProcCtx) throws SemanticException { // Generate a new task ParseContext parseCtx = opProcCtx.getParseCtx(); - Task parentTask = opProcCtx.getCurrTask(); + Task parentTask = opProcCtx.getCurrTask(); MapredWork childPlan = getMapRedWork(parseCtx); - Task childTask = TaskFactory.get(childPlan); + Task childTask = TaskFactory.get(childPlan); Operator reducer = cRS.getChildOperators().get(0); // Add the reducer @@ -850,12 +850,12 @@ public static void setKeyAndValueDesc(ReduceWork plan, * * @param task */ - public static void setKeyAndValueDescForTaskTree(Task task) { + public static void setKeyAndValueDescForTaskTree(Task task) { if (task instanceof ConditionalTask) { - List> listTasks = ((ConditionalTask) task) + List> listTasks = ((ConditionalTask) task) .getListTasks(); - for (Task tsk : listTasks) { + for (Task tsk : listTasks) { setKeyAndValueDescForTaskTree(tsk); } } else if (task instanceof ExecDriver) { @@ -887,7 +887,7 @@ public static void setKeyAndValueDescForTaskTree(Task ta return; } - for (Task childTask : task.getChildTasks()) { + for (Task childTask : task.getChildTasks()) { setKeyAndValueDescForTaskTree(childTask); } } @@ -919,7 +919,7 @@ public static void setKeyAndValueDescForTaskTree(Task ta * for an older release will also require picking HIVE-17195 at the least. */ public static void finalMapWorkChores( - List> tasks, Configuration conf, + List> tasks, Configuration conf, Interner interner) { List mrTasks = Utilities.getMRTasks(tasks); if (!mrTasks.isEmpty()) { @@ -1062,7 +1062,7 @@ public static TableScanOperator createTemporaryFile( * @param opProcCtx context **/ private static void splitTasks(ReduceSinkOperator op, - Task parentTask, Task childTask, + Task parentTask, Task childTask, GenMRProcContext opProcCtx) throws SemanticException { if (op.getNumParent() != 1) { throw new IllegalStateException("Expecting operator " + op + " to have one parent. " + @@ -1074,7 +1074,7 @@ private static void splitTasks(ReduceSinkOperator op, // Root Task cannot depend on any other task, therefore childTask cannot be // a root Task - List> rootTasks = opProcCtx.getRootTasks(); + List> rootTasks = opProcCtx.getRootTasks(); if (rootTasks.contains(childTask)) { rootTasks.remove(childTask); } @@ -1262,7 +1262,7 @@ public static void replaceMapWork(String sourceAlias, String targetAlias, public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, Path finalName, DependencyCollectionTask dependencyTask, List> mvTasks, HiveConf conf, - Task currTask, LineageState lineageState) + Task currTask, LineageState lineageState) throws SemanticException { // @@ -1404,7 +1404,7 @@ public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, * @param dependencyTask */ private static void linkMoveTask(Task mvTask, - Task task, HiveConf hconf, + Task task, HiveConf hconf, DependencyCollectionTask dependencyTask) { if (task.getDependentTasks() == null || task.getDependentTasks().isEmpty()) { @@ -1412,7 +1412,7 @@ private static void linkMoveTask(Task mvTask, addDependentMoveTasks(mvTask, hconf, task, dependencyTask); } else { // Otherwise, for each child run this method recursively - for (Task childTask : task.getDependentTasks()) { + for (Task childTask : task.getDependentTasks()) { linkMoveTask(mvTask, childTask, hconf, dependencyTask); } } @@ -1430,7 +1430,7 @@ private static void linkMoveTask(Task mvTask, * @param dependencyTask */ public static void addDependentMoveTasks(Task mvTask, HiveConf hconf, - Task parentTask, DependencyCollectionTask dependencyTask) { + Task parentTask, DependencyCollectionTask dependencyTask) { if (mvTask != null) { if (dependencyTask != null) { @@ -1483,7 +1483,7 @@ private static Path getTableLocationPath(final HiveConf hconf, final TableDesc t * HiveConf */ public static void addStatsTask(FileSinkOperator nd, MoveTask mvTask, - Task currTask, HiveConf hconf) { + Task currTask, HiveConf hconf) { MoveWork mvWork = mvTask.getWork(); BasicStatsWork statsWork = null; @@ -1552,7 +1552,7 @@ public static void addStatsTask(FileSinkOperator nd, MoveTask mvTask, columnStatsWork.truncateExisting(truncate); columnStatsWork.setSourceTask(currTask); - Task statsTask = TaskFactory.get(columnStatsWork); + Task statsTask = TaskFactory.get(columnStatsWork); // subscribe feeds from the MoveTask so that MoveTask can forward the list // of dynamic partition list to the StatsTask @@ -1780,7 +1780,7 @@ protected static MoveWork mergeMovePaths(Path condInputPath, MoveWork linkedMove */ @SuppressWarnings("unchecked") private static ConditionalTask createCondTask(HiveConf conf, - Task currTask, MoveWork mvWork, Serializable mergeWork, + Task currTask, MoveWork mvWork, Serializable mergeWork, Path condInputPath, Path condOutputPath, Task moveTaskToLink, DependencyCollectionTask dependencyTask, LineageState lineageState) { if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { @@ -1807,10 +1807,10 @@ private static ConditionalTask createCondTask(HiveConf conf, // conflicts. // TODO: if we are not dealing with concatenate DDL, we should not create a merge+move path // because it should be impossible to get incompatible outputs. - Task mergeOnlyMergeTask = TaskFactory.get(mergeWork); - Task moveOnlyMoveTask = TaskFactory.get(workForMoveOnlyTask); - Task mergeAndMoveMergeTask = TaskFactory.get(mergeWork); - Task mergeAndMoveMoveTask = TaskFactory.get(moveWork); + Task mergeOnlyMergeTask = TaskFactory.get(mergeWork); + Task moveOnlyMoveTask = TaskFactory.get(workForMoveOnlyTask); + Task mergeAndMoveMergeTask = TaskFactory.get(mergeWork); + Task mergeAndMoveMoveTask = TaskFactory.get(moveWork); // NOTE! It is necessary merge task is the parent of the move task, and not // the other way around, for the proper execution of the execute method of @@ -1823,7 +1823,7 @@ private static ConditionalTask createCondTask(HiveConf conf, ConditionalWork cndWork = new ConditionalWork(listWorks); - List> listTasks = new ArrayList>(); + List> listTasks = new ArrayList>(); listTasks.add(moveOnlyMoveTask); listTasks.add(mergeOnlyMergeTask); listTasks.add(mergeAndMoveMergeTask); @@ -1902,7 +1902,7 @@ public static boolean isSkewedStoredAsDirs(FileSinkDesc fsInputDesc) { * Returns true iff the fsOp requires a merge */ public static boolean isMergeRequired(List> mvTasks, HiveConf hconf, - FileSinkOperator fsOp, Task currTask, boolean isInsertTable) { + FileSinkOperator fsOp, Task currTask, boolean isInsertTable) { // Has the user enabled merging of files for map-only jobs or for all jobs if (mvTasks == null || mvTasks.isEmpty()) { return false; @@ -1939,7 +1939,7 @@ public static boolean isMergeRequired(List> mvTasks, HiveConf hco } private static boolean isMergeRequiredForMr(HiveConf hconf, - FileSinkOperator fsOp, Task currTask) { + FileSinkOperator fsOp, Task currTask) { if (fsOp.getConf().isLinkedFileSink()) { // If the user has HIVEMERGEMAPREDFILES set to false, the idea was the // number of reducers are few, so the number of files anyway are small. @@ -1977,7 +1977,7 @@ private static boolean isMergeRequiredForMr(HiveConf hconf, * @param dependencyTask * @return */ - public static Path createMoveTask(Task currTask, boolean chDir, + public static Path createMoveTask(Task currTask, boolean chDir, FileSinkOperator fsOp, ParseContext parseCtx, List> mvTasks, HiveConf hconf, DependencyCollectionTask dependencyTask) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java index 6bf4deb0da..21d792e2ee 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java @@ -146,7 +146,7 @@ public static void setupBucketMapJoinInfo(MapWork plan, * position of the parent */ private static void initMapJoinPlan(AbstractMapJoinOperator op, - Task currTask, + Task currTask, GenMRProcContext opProcCtx, boolean local) throws SemanticException { @@ -171,7 +171,7 @@ private static void initMapJoinPlan(AbstractMapJoinOperator oldTask, + private static void joinMapJoinPlan(Task oldTask, GenMRProcContext opProcCtx, boolean local) throws SemanticException { TableScanOperator currTopOp = opProcCtx.getCurrTopOp(); @@ -199,12 +199,12 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Map, GenMapRedCtx> mapCurrCtx = ctx .getMapCurrCtx(); GenMapRedCtx mapredCtx = mapCurrCtx.get(mapJoin.getParentOperators().get(pos)); - Task currTask = mapredCtx.getCurrTask(); + Task currTask = mapredCtx.getCurrTask(); MapredWork currPlan = (MapredWork) currTask.getWork(); String currAliasId = mapredCtx.getCurrAliasId(); - HashMap, Task> opTaskMap = + HashMap, Task> opTaskMap = ctx.getOpTaskMap(); - Task oldTask = opTaskMap.get(mapJoin); + Task oldTask = opTaskMap.get(mapJoin); ctx.setCurrAliasId(currAliasId); ctx.setCurrTask(currTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/AbstractSemanticAnalyzerHook.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/AbstractSemanticAnalyzerHook.java index 32b89a7299..4971b066ce 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/AbstractSemanticAnalyzerHook.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/AbstractSemanticAnalyzerHook.java @@ -32,6 +32,6 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context,ASTNode ast) } public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 370697380e..ec3d9d0c56 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -120,7 +120,7 @@ protected final Hive db; protected final HiveConf conf; protected final QueryState queryState; - protected List> rootTasks; + protected List> rootTasks; protected FetchTask fetchTask; protected final Logger LOG; protected final LogHelper console; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java index 76c69cf24b..66cd6977a6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java @@ -93,11 +93,11 @@ private Hive db; private HashSet inputs; private HashSet outputs; - private List> tasks; + private List> tasks; private Logger LOG; private Context ctx; private DumpType eventType = DumpType.EVENT_UNKNOWN; - private Task openTxnTask = null; + private Task openTxnTask = null; public HiveConf getConf() { return conf; @@ -115,7 +115,7 @@ public Hive getHive() { return outputs; } - public List> getTasks() { + public List> getTasks() { return tasks; } @@ -138,7 +138,7 @@ public DumpType getEventType() { public SemanticAnalyzerWrapperContext(HiveConf conf, Hive db, HashSet inputs, HashSet outputs, - List> tasks, + List> tasks, Logger LOG, Context ctx){ this.conf = conf; this.db = db; @@ -149,10 +149,10 @@ public SemanticAnalyzerWrapperContext(HiveConf conf, Hive db, this.ctx = ctx; } - public Task getOpenTxnTask() { + public Task getOpenTxnTask() { return openTxnTask; } - public void setOpenTxnTask(Task openTxnTask) { + public void setOpenTxnTask(Task openTxnTask) { this.openTxnTask = openTxnTask; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java index f977fc1b59..2c0d21a852 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java @@ -68,7 +68,7 @@ public final List> moveTask; // rootTasks is the entry point for all generated tasks - public final List> rootTasks; + public final List> rootTasks; public final Set inputs; public final Set outputs; @@ -164,7 +164,7 @@ @SuppressWarnings("unchecked") public GenTezProcContext(HiveConf conf, ParseContext parseContext, - List> moveTask, List> rootTasks, + List> moveTask, List> rootTasks, Set inputs, Set outputs) { this.conf = conf; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHook.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHook.java index 08aa7e0589..65d08d752d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHook.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHook.java @@ -72,5 +72,5 @@ public ASTNode preAnalyze( */ public void postAnalyze( HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException; + List> rootTasks) throws SemanticException; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index e955989d92..ec75fa4e9a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -548,7 +548,7 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName, return TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), dropTblDesc), x.getConf()); } - private static Task alterTableTask(ImportTableDesc tableDesc, + private static Task alterTableTask(ImportTableDesc tableDesc, EximUtil.SemanticAnalyzerWrapperContext x, ReplicationSpec replicationSpec) { tableDesc.setReplaceMode(true); @@ -558,7 +558,7 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName, return tableDesc.getCreateTableTask(x.getInputs(), x.getOutputs(), x.getConf()); } - private static Task alterSinglePartition( + private static Task alterSinglePartition( ImportTableDesc tblDesc, Table table, Warehouse wh, AlterTableAddPartitionDesc addPartitionDesc, ReplicationSpec replicationSpec, org.apache.hadoop.hive.ql.metadata.Partition ptn, EximUtil.SemanticAnalyzerWrapperContext x) throws MetaException, IOException, HiveException { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java index 42637dfa77..bcba4d7670 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java @@ -93,7 +93,7 @@ public void init(QueryState queryState, LogHelper console, Hive db) { // loop over all the tasks recursively @Override - protected void setInputFormat(Task task) { + protected void setInputFormat(Task task) { if (task instanceof ExecDriver) { MapWork work = ((MapredWork) task.getWork()).getMapWork(); Map> opMap = work.getAliasToWork(); @@ -103,15 +103,15 @@ protected void setInputFormat(Task task) { } } } else if (task instanceof ConditionalTask) { - List> listTasks + List> listTasks = ((ConditionalTask) task).getListTasks(); - for (Task tsk : listTasks) { + for (Task tsk : listTasks) { setInputFormat(tsk); } } if (task.getChildTasks() != null) { - for (Task childTask : task.getChildTasks()) { + for (Task childTask : task.getChildTasks()) { setInputFormat(childTask); } } @@ -137,7 +137,7 @@ private void setInputFormat(MapWork work, Operator op) { } // loop over all the tasks recursively - private void breakTaskTree(Task task) { + private void breakTaskTree(Task task) { if (task instanceof ExecDriver) { Map> opMap = @@ -148,9 +148,9 @@ private void breakTaskTree(Task task) { } } } else if (task instanceof ConditionalTask) { - List> listTasks = ((ConditionalTask) task) + List> listTasks = ((ConditionalTask) task) .getListTasks(); - for (Task tsk : listTasks) { + for (Task tsk : listTasks) { breakTaskTree(tsk); } } @@ -159,7 +159,7 @@ private void breakTaskTree(Task task) { return; } - for (Task childTask : task.getChildTasks()) { + for (Task childTask : task.getChildTasks()) { breakTaskTree(childTask); } } @@ -191,7 +191,7 @@ private static int getNumberOfReducers(MapredWork mrwork, HiveConf conf) { } @Override - protected void decideExecMode(List> rootTasks, Context ctx, + protected void decideExecMode(List> rootTasks, Context ctx, GlobalLimitCtx globalLimitCtx) throws SemanticException { @@ -271,13 +271,13 @@ public boolean accept(Path file) { } @Override - protected void optimizeTaskPlan(List> rootTasks, + protected void optimizeTaskPlan(List> rootTasks, ParseContext pCtx, Context ctx) throws SemanticException { // reduce sink does not have any kids - since the plan by now has been // broken up into multiple // tasks, iterate over all tasks. // For each task, go over all operators recursively - for (Task rootTask : rootTasks) { + for (Task rootTask : rootTasks) { breakTaskTree(rootTask); } @@ -291,7 +291,7 @@ protected void optimizeTaskPlan(List> rootTasks, } @Override - protected void generateTaskTree(List> rootTasks, ParseContext pCtx, + protected void generateTaskTree(List> rootTasks, ParseContext pCtx, List> mvTask, Set inputs, Set outputs) throws SemanticException { // generate map reduce plans @@ -299,7 +299,7 @@ protected void generateTaskTree(List> rootTasks, Pa GenMRProcContext procCtx = new GenMRProcContext( conf, // Must be deterministic order map for consistent q-test output across Java versions - new LinkedHashMap, Task>(), + new LinkedHashMap, Task>(), tempParseContext, mvTask, rootTasks, new LinkedHashMap, GenMapRedCtx>(), inputs, outputs); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java index 67b4901b0c..feecda5bdf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java @@ -110,7 +110,7 @@ private GlobalLimitCtx globalLimitCtx; private HashSet semanticInputs; - private List> rootTasks; + private List> rootTasks; private FetchTask fetchTask; private QueryProperties queryProperties; @@ -194,7 +194,7 @@ public ParseContext( HashMap opToSamplePruner, GlobalLimitCtx globalLimitCtx, HashMap nameToSplitSample, - HashSet semanticInputs, List> rootTasks, + HashSet semanticInputs, List> rootTasks, Map> opToPartToSkewedPruner, Map viewAliasToInput, List reduceSinkOperatorsAddedByEnforceBucketingSorting, @@ -511,8 +511,8 @@ public void setGlobalLimitCtx(GlobalLimitCtx globalLimitCtx) { return semanticInputs; } - public void replaceRootTask(Task rootTask, - List> tasks) { + public void replaceRootTask(Task rootTask, + List> tasks) { this.rootTasks.remove(rootTask); this.rootTasks.addAll(tasks); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 0b55a39a8a..cf62953027 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -121,7 +121,7 @@ public void init(QueryState queryState, LogHelper console, Hive db) { @SuppressWarnings("nls") public void compile(final ParseContext pCtx, - final List> rootTasks, + final List> rootTasks, final HashSet inputs, final HashSet outputs) throws SemanticException { Context ctx = pCtx.getContext(); @@ -225,7 +225,7 @@ public void compile(final ParseContext pCtx, // The idea here is to keep an object reference both in FileSink and in FetchTask for list of files // to be fetched. During Job close file sink will populate the list and fetch task later will use it // to fetch the results. - Collection> tableScanOps = + Collection> tableScanOps = Lists.>newArrayList(pCtx.getTopOps().values()); Set fsOps = OperatorUtils.findOperators(tableScanOps, FileSinkOperator.class); if(fsOps != null && fsOps.size() == 1) { @@ -278,13 +278,13 @@ public void compile(final ParseContext pCtx, generateTaskTree(rootTasks, pCtx, mvTask, inputs, outputs); // For each task, set the key descriptor for the reducer - for (Task rootTask : rootTasks) { + for (Task rootTask : rootTasks) { GenMapRedUtils.setKeyAndValueDescForTaskTree(rootTask); } // If a task contains an operator which instructs bucketizedhiveinputformat // to be used, please do so - for (Task rootTask : rootTasks) { + for (Task rootTask : rootTasks) { setInputFormat(rootTask); } @@ -308,7 +308,7 @@ public void compile(final ParseContext pCtx, throw new SemanticException("Can not find correct root task!"); } try { - Task root = rootTasks.iterator().next(); + Task root = rootTasks.iterator().next(); StatsTask tsk = (StatsTask) genTableStats(pCtx, pCtx.getTopOps().values() .iterator().next(), root, outputs); root.addDependentTask(tsk); @@ -318,10 +318,10 @@ public void compile(final ParseContext pCtx, } genColumnStatsTask(pCtx.getAnalyzeRewrite(), loadFileWork, map, outerQueryLimit, 0); } else { - Set> leafTasks = new LinkedHashSet>(); + Set> leafTasks = new LinkedHashSet>(); getLeafTasks(rootTasks, leafTasks); - List> nonStatsLeafTasks = new ArrayList<>(); - for (Task tsk : leafTasks) { + List> nonStatsLeafTasks = new ArrayList<>(); + for (Task tsk : leafTasks) { // map table name to the correct ColumnStatsTask if (tsk instanceof StatsTask) { map.put(extractTableFullName((StatsTask) tsk), (StatsTask) tsk); @@ -330,8 +330,8 @@ public void compile(final ParseContext pCtx, } } // add cStatsTask as a dependent of all the nonStatsLeafTasks - for (Task tsk : nonStatsLeafTasks) { - for (Task cStatsTask : map.values()) { + for (Task tsk : nonStatsLeafTasks) { + for (Task cStatsTask : map.values()) { tsk.addDependentTask(cStatsTask); } } @@ -360,13 +360,13 @@ public void compile(final ParseContext pCtx, // generate a DDL task and make it a dependent task of the leaf CreateTableDesc crtTblDesc = pCtx.getCreateTable(); crtTblDesc.validate(conf); - Task crtTblTask = TaskFactory.get(new DDLWork(inputs, outputs, crtTblDesc)); + Task crtTblTask = TaskFactory.get(new DDLWork(inputs, outputs, crtTblDesc)); patchUpAfterCTASorMaterializedView(rootTasks, inputs, outputs, crtTblTask, CollectionUtils.isEmpty(crtTblDesc.getPartColNames())); } else if (pCtx.getQueryProperties().isMaterializedView()) { // generate a DDL task and make it a dependent task of the leaf CreateViewDesc viewDesc = pCtx.getCreateViewDesc(); - Task crtViewTask = TaskFactory.get(new DDLWork( + Task crtViewTask = TaskFactory.get(new DDLWork( inputs, outputs, viewDesc)); patchUpAfterCTASorMaterializedView(rootTasks, inputs, outputs, crtViewTask, CollectionUtils.isEmpty(viewDesc.getPartColNames())); @@ -375,10 +375,10 @@ public void compile(final ParseContext pCtx, // of the tree. MaterializedViewUpdateDesc materializedViewDesc = pCtx.getMaterializedViewUpdateDesc(); DDLWork ddlWork = new DDLWork(inputs, outputs, materializedViewDesc); - Set> leafTasks = new LinkedHashSet>(); + Set> leafTasks = new LinkedHashSet>(); getLeafTasks(rootTasks, leafTasks); - Task materializedViewTask = TaskFactory.get(ddlWork, conf); - for (Task task : leafTasks) { + Task materializedViewTask = TaskFactory.get(ddlWork, conf); + for (Task task : leafTasks) { task.addDependentTask(materializedViewTask); } } @@ -494,8 +494,8 @@ private Path getDefaultCtasLocation(final ParseContext pCtx) throws SemanticExce } } - private void patchUpAfterCTASorMaterializedView(List> rootTasks, - Set inputs, Set outputs, Task createTask, + private void patchUpAfterCTASorMaterializedView(List> rootTasks, + Set inputs, Set outputs, Task createTask, boolean createTaskAfterMoveTask) { // clear the mapredWork output file from outputs for CTAS // DDLWork at the tail of the chain will have the output @@ -512,15 +512,15 @@ private void patchUpAfterCTASorMaterializedView(List> leaves = new LinkedHashSet<>(); + Set> leaves = new LinkedHashSet<>(); getLeafTasks(rootTasks, leaves); assert (leaves.size() > 0); // Target task is supposed to be the last task - Task targetTask = createTask; - for (Task task : leaves) { + Task targetTask = createTask; + for (Task task : leaves) { if (task instanceof StatsTask) { // StatsTask require table to already exist - for (Task parentOfStatsTask : task.getParentTasks()) { + for (Task parentOfStatsTask : task.getParentTasks()) { if (parentOfStatsTask instanceof MoveTask && !createTaskAfterMoveTask) { // For partitioned CTAS, we need to create the table before the move task // as we need to create the partitions in metastore and for that we should @@ -530,7 +530,7 @@ private void patchUpAfterCTASorMaterializedView(List parentOfCrtTblTask : createTask.getParentTasks()) { + for (Task parentOfCrtTblTask : createTask.getParentTasks()) { parentOfCrtTblTask.removeDependentTask(task); } createTask.addDependentTask(task); @@ -577,11 +577,11 @@ private void patchUpAfterCTASorMaterializedView(List dependentTask, Task task) { - for (Task parentOfStatsTask : dependentTask.getParentTasks()) { + private void interleaveTask(Task dependentTask, Task task) { + for (Task parentOfStatsTask : dependentTask.getParentTasks()) { parentOfStatsTask.addDependentTask(task); } - for (Task parentOfCrtTblTask : task.getParentTasks()) { + for (Task parentOfCrtTblTask : task.getParentTasks()) { parentOfCrtTblTask.removeDependentTask(dependentTask); } task.addDependentTask(dependentTask); @@ -638,16 +638,16 @@ protected void genColumnStatsTask(AnalyzeRewriteContext analyzeRewrite, /** * Find all leaf tasks of the list of root tasks. */ - private void getLeafTasks(List> rootTasks, - Set> leaves) { + private void getLeafTasks(List> rootTasks, + Set> leaves) { - for (Task root : rootTasks) { + for (Task root : rootTasks) { getLeafTasks(root, leaves); } } - private void getLeafTasks(Task task, - Set> leaves) { + private void getLeafTasks(Task task, + Set> leaves) { if (task.getDependentTasks() == null) { if (!leaves.contains(task)) { leaves.add(task); @@ -660,7 +660,7 @@ private void getLeafTasks(Task task, /* * Called to transform tasks into local tasks where possible/desirable */ - protected abstract void decideExecMode(List> rootTasks, Context ctx, + protected abstract void decideExecMode(List> rootTasks, Context ctx, GlobalLimitCtx globalLimitCtx) throws SemanticException; /* @@ -673,18 +673,18 @@ protected void optimizeOperatorPlan(ParseContext pCtxSet, Set inputs /* * Called after the tasks have been generated to run another round of optimization */ - protected abstract void optimizeTaskPlan(List> rootTasks, + protected abstract void optimizeTaskPlan(List> rootTasks, ParseContext pCtx, Context ctx) throws SemanticException; /* * Called to set the appropriate input format for tasks */ - protected abstract void setInputFormat(Task rootTask); + protected abstract void setInputFormat(Task rootTask); /* * Called to generate the taks tree from the parse context/operator tree */ - protected abstract void generateTaskTree(List> rootTasks, ParseContext pCtx, + protected abstract void generateTaskTree(List> rootTasks, ParseContext pCtx, List> mvTask, Set inputs, Set outputs) throws SemanticException; /* @@ -714,7 +714,7 @@ protected void runDynPartitionSortOptimizations(ParseContext parseContext, HiveC /** * Create a clone of the parse context */ - public ParseContext getParseContext(ParseContext pCtx, List> rootTasks) { + public ParseContext getParseContext(ParseContext pCtx, List> rootTasks) { ParseContext clone = new ParseContext(queryState, pCtx.getOpToPartPruner(), pCtx.getOpToPartList(), pCtx.getTopOps(), pCtx.getJoinOps(), pCtx.getSmbMapJoinOps(), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java index 5000ba47b3..24487568fb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java @@ -597,7 +597,7 @@ private void runDynamicPartitionPruning(OptimizeTezProcContext procCtx, Set> rootTasks, ParseContext pCtx, + protected void generateTaskTree(List> rootTasks, ParseContext pCtx, List> mvTask, Set inputs, Set outputs) throws SemanticException { @@ -690,7 +690,7 @@ protected void generateTaskTree(List> rootTasks, Pa } @Override - protected void setInputFormat(Task task) { + protected void setInputFormat(Task task) { if (task instanceof TezTask) { TezWork work = ((TezTask)task).getWork(); List all = work.getAllWork(); @@ -706,15 +706,15 @@ protected void setInputFormat(Task task) { } } } else if (task instanceof ConditionalTask) { - List> listTasks + List> listTasks = ((ConditionalTask) task).getListTasks(); - for (Task tsk : listTasks) { + for (Task tsk : listTasks) { setInputFormat(tsk); } } if (task.getChildTasks() != null) { - for (Task childTask : task.getChildTasks()) { + for (Task childTask : task.getChildTasks()) { setInputFormat(childTask); } } @@ -737,7 +737,7 @@ private void setInputFormat(MapWork work, Operator op) { } @Override - protected void decideExecMode(List> rootTasks, Context ctx, + protected void decideExecMode(List> rootTasks, Context ctx, GlobalLimitCtx globalLimitCtx) throws SemanticException { // currently all Tez work is on the cluster @@ -745,7 +745,7 @@ protected void decideExecMode(List> rootTasks, Cont } @Override - protected void optimizeTaskPlan(List> rootTasks, ParseContext pCtx, + protected void optimizeTaskPlan(List> rootTasks, ParseContext pCtx, Context ctx) throws SemanticException { PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolver.java index 1983ab75fc..e07794f2b8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolver.java @@ -38,6 +38,6 @@ * opaque context * @return position of the task */ - List> getTasks(HiveConf conf, Object ctx); + List> getTasks(HiveConf conf, Object ctx); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java index cc5baeef9c..c1b9b27ff4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java @@ -50,10 +50,10 @@ public static class ConditionalResolverCommonJoinCtx implements Serializable { private static final long serialVersionUID = 1L; - private HashMap, Set> taskToAliases; + private HashMap, Set> taskToAliases; Map> pathToAliases; Map aliasToKnownSize; - private Task commonJoinTask; + private Task commonJoinTask; private Path localTmpDir; private Path hdfsTmpDir; @@ -61,19 +61,19 @@ public ConditionalResolverCommonJoinCtx() { } - public HashMap, Set> getTaskToAliases() { + public HashMap, Set> getTaskToAliases() { return taskToAliases; } - public void setTaskToAliases(HashMap, Set> taskToAliases) { + public void setTaskToAliases(HashMap, Set> taskToAliases) { this.taskToAliases = taskToAliases; } - public Task getCommonJoinTask() { + public Task getCommonJoinTask() { return commonJoinTask; } - public void setCommonJoinTask(Task commonJoinTask) { + public void setCommonJoinTask(Task commonJoinTask) { this.commonJoinTask = commonJoinTask; } @@ -129,12 +129,12 @@ public ConditionalResolverCommonJoin() { } @Override - public List> getTasks(HiveConf conf, Object objCtx) { + public List> getTasks(HiveConf conf, Object objCtx) { ConditionalResolverCommonJoinCtx ctx = ((ConditionalResolverCommonJoinCtx) objCtx).clone(); - List> resTsks = new ArrayList>(); + List> resTsks = new ArrayList>(); // get aliasToPath and pass it to the heuristic - Task task = resolveDriverAlias(ctx, conf); + Task task = resolveDriverAlias(ctx, conf); if (task == null) { // run common join task @@ -151,7 +151,7 @@ public ConditionalResolverCommonJoin() { return resTsks; } - private Task resolveDriverAlias(ConditionalResolverCommonJoinCtx ctx, HiveConf conf) { + private Task resolveDriverAlias(ConditionalResolverCommonJoinCtx ctx, HiveConf conf) { try { resolveUnknownSizes(ctx, conf); return resolveMapJoinTask(ctx, conf); @@ -161,20 +161,20 @@ public ConditionalResolverCommonJoin() { return null; } - protected Task resolveMapJoinTask( + protected Task resolveMapJoinTask( ConditionalResolverCommonJoinCtx ctx, HiveConf conf) throws Exception { Set participants = getParticipants(ctx); Map aliasToKnownSize = ctx.getAliasToKnownSize(); - Map, Set> taskToAliases = ctx.getTaskToAliases(); + Map, Set> taskToAliases = ctx.getTaskToAliases(); long threshold = HiveConf.getLongVar(conf, HiveConf.ConfVars.HIVESMALLTABLESFILESIZE); Long bigTableSize = null; Long smallTablesSize = null; - Map.Entry, Set> nextTask = null; - for (Map.Entry, Set> entry : taskToAliases.entrySet()) { + Map.Entry, Set> nextTask = null; + for (Map.Entry, Set> entry : taskToAliases.entrySet()) { Set aliases = entry.getValue(); long sumOfOthers = Utilities.sumOfExcept(aliasToKnownSize, participants, aliases); if (sumOfOthers < 0 || sumOfOthers > threshold) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java index a828809349..da1376c0e9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java @@ -56,7 +56,7 @@ public ConditionalResolverMergeFiles() { */ public static class ConditionalResolverMergeFilesCtx implements Serializable { private static final long serialVersionUID = 1L; - List> listTasks; + List> listTasks; private String dir; private DynamicPartitionCtx dpCtx; // merge task could be after dynamic partition insert private ListBucketingCtx lbCtx; @@ -68,7 +68,7 @@ public ConditionalResolverMergeFilesCtx() { * @param dir */ public ConditionalResolverMergeFilesCtx( - List> listTasks, String dir) { + List> listTasks, String dir) { this.listTasks = listTasks; this.dir = dir; } @@ -83,7 +83,7 @@ public String getDir() { /** * @return the listTasks */ - public List> getListTasks() { + public List> getListTasks() { return listTasks; } @@ -91,7 +91,7 @@ public String getDir() { * @param listTasks * the listTasks to set */ - public void setListTasks(List> listTasks) { + public void setListTasks(List> listTasks) { this.listTasks = listTasks; } @@ -118,11 +118,11 @@ public void setLbCtx(ListBucketingCtx lbCtx) { } } - public List> getTasks(HiveConf conf, Object objCtx) { + public List> getTasks(HiveConf conf, Object objCtx) { ConditionalResolverMergeFilesCtx ctx = (ConditionalResolverMergeFilesCtx) objCtx; String dirName = ctx.getDir(); - List> resTsks = new ArrayList>(); + List> resTsks = new ArrayList>(); // check if a map-reduce job is needed to merge the files // If the current size is smaller than the target, merge long trgtSize = conf.getLongVar(HiveConf.ConfVars.HIVEMERGEMAPFILESSIZE); @@ -130,9 +130,9 @@ public void setLbCtx(ListBucketingCtx lbCtx) { .getLongVar(HiveConf.ConfVars.HIVEMERGEMAPFILESAVGSIZE); trgtSize = Math.max(trgtSize, avgConditionSize); - Task mvTask = ctx.getListTasks().get(0); - Task mrTask = ctx.getListTasks().get(1); - Task mrAndMvTask = ctx.getListTasks().get(2); + Task mvTask = ctx.getListTasks().get(0); + Task mrTask = ctx.getListTasks().get(1); + Task mrAndMvTask = ctx.getListTasks().get(2); try { Path dirPath = new Path(dirName); @@ -228,9 +228,9 @@ public void setLbCtx(ListBucketingCtx lbCtx) { * @param dpLbLevel * @throws IOException */ - private void generateActualTasks(HiveConf conf, List> resTsks, - long trgtSize, long avgConditionSize, Task mvTask, - Task mrTask, Task mrAndMvTask, Path dirPath, + private void generateActualTasks(HiveConf conf, List> resTsks, + long trgtSize, long avgConditionSize, Task mvTask, + Task mrTask, Task mrAndMvTask, Path dirPath, FileSystem inpFs, ConditionalResolverMergeFilesCtx ctx, MapWork work, int dpLbLevel) throws IOException { DynamicPartitionCtx dpCtx = ctx.getDPCtx(); @@ -301,7 +301,7 @@ private void generateActualTasks(HiveConf conf, List mergeAndMoveMoveTask = mrAndMvTask.getChildTasks().get(0); + Task mergeAndMoveMoveTask = mrAndMvTask.getChildTasks().get(0); MoveWork mvWork = (MoveWork) mergeAndMoveMoveTask.getWork(); LoadFileDesc lfd = mvWork.getLoadFileWork(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverSkewJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverSkewJoin.java index 5dd7a258dc..0ca5caf1b7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverSkewJoin.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverSkewJoin.java @@ -54,8 +54,8 @@ // tables into corresponding different dirs (one dir per table). // this map stores mapping from "big key dir" to its corresponding mapjoin // task. - private HashMap> dirToTaskMap; - private List> noSkewTask; + private HashMap> dirToTaskMap; + private List> noSkewTask; /** * For serialization use only. @@ -64,27 +64,27 @@ public ConditionalResolverSkewJoinCtx() { } public ConditionalResolverSkewJoinCtx( - HashMap> dirToTaskMap, - List> noSkewTask) { + HashMap> dirToTaskMap, + List> noSkewTask) { super(); this.dirToTaskMap = dirToTaskMap; this.noSkewTask = noSkewTask; } - public HashMap> getDirToTaskMap() { + public HashMap> getDirToTaskMap() { return dirToTaskMap; } public void setDirToTaskMap( - HashMap> dirToTaskMap) { + HashMap> dirToTaskMap) { this.dirToTaskMap = dirToTaskMap; } - public List> getNoSkewTask() { + public List> getNoSkewTask() { return noSkewTask; } - public void setNoSkewTask(List> noSkewTask) { + public void setNoSkewTask(List> noSkewTask) { this.noSkewTask = noSkewTask; } } @@ -93,26 +93,26 @@ public ConditionalResolverSkewJoin() { } @Override - public List> getTasks(HiveConf conf, + public List> getTasks(HiveConf conf, Object objCtx) { ConditionalResolverSkewJoinCtx ctx = (ConditionalResolverSkewJoinCtx) objCtx; - List> resTsks = new ArrayList>(); + List> resTsks = new ArrayList>(); - Map> dirToTaskMap = ctx + Map> dirToTaskMap = ctx .getDirToTaskMap(); - Iterator>> bigKeysPathsIter = dirToTaskMap + Iterator>> bigKeysPathsIter = dirToTaskMap .entrySet().iterator(); try { while (bigKeysPathsIter.hasNext()) { - Entry> entry = bigKeysPathsIter.next(); + Entry> entry = bigKeysPathsIter.next(); Path dirPath = entry.getKey(); FileSystem inpFs = dirPath.getFileSystem(conf); FileStatus[] fstatus = Utilities.listStatusIfExists(dirPath, inpFs); if (fstatus != null && fstatus.length > 0) { - Task task = entry.getValue(); - List> parentOps = task.getParentTasks(); + Task task = entry.getValue(); + List> parentOps = task.getParentTasks(); if(parentOps!=null){ - for(Task parentOp: parentOps){ + for(Task parentOp: parentOps){ //right now only one parent resTsks.add(parentOp); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java index 6e2754ae1f..c0957be200 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java @@ -326,7 +326,7 @@ public String getDatabaseName() { return dbName; } - public Task getCreateTableTask(HashSet inputs, HashSet outputs, + public Task getCreateTableTask(HashSet inputs, HashSet outputs, HiveConf conf) { switch (getDescType()) { case TABLE: diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java index fbf948c4a2..18b5f270d8 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java @@ -574,7 +574,7 @@ public void testGetInputPathsPoolAndFailure() throws IOException, ExecutionExcep verify(pool).shutdownNow(); } - private Task getDependencyCollectionTask(){ + private Task getDependencyCollectionTask(){ return TaskFactory.get(new DependencyCollectionWork()); } @@ -587,7 +587,7 @@ public void testGetInputPathsPoolAndFailure() throws IOException, ExecutionExcep * \ / * ---->DTc---- */ - private List> getTestDiamondTaskGraph(Task providedTask){ + private List> getTestDiamondTaskGraph(Task providedTask){ // Note: never instantiate a task without TaskFactory.get() if you're not // okay with .equals() breaking. Doing it via TaskFactory.get makes sure // that an id is generated, and two tasks of the same type don't show @@ -595,12 +595,12 @@ public void testGetInputPathsPoolAndFailure() throws IOException, ExecutionExcep // array. Without this, DTa, DTb, and DTc would show up as one item in // the list of children. Thus, we're instantiating via a helper method // that instantiates via TaskFactory.get() - Task root = getDependencyCollectionTask(); - Task DTa = getDependencyCollectionTask(); - Task DTb = getDependencyCollectionTask(); - Task DTc = getDependencyCollectionTask(); - Task DTd = getDependencyCollectionTask(); - Task DTe = getDependencyCollectionTask(); + Task root = getDependencyCollectionTask(); + Task DTa = getDependencyCollectionTask(); + Task DTb = getDependencyCollectionTask(); + Task DTc = getDependencyCollectionTask(); + Task DTd = getDependencyCollectionTask(); + Task DTe = getDependencyCollectionTask(); root.addDependentTask(DTa); root.addDependentTask(DTb); @@ -614,7 +614,7 @@ public void testGetInputPathsPoolAndFailure() throws IOException, ExecutionExcep providedTask.addDependentTask(DTe); - List> retVals = new ArrayList>(); + List> retVals = new ArrayList>(); retVals.add(root); return retVals; } @@ -626,21 +626,21 @@ public void testGetInputPathsPoolAndFailure() throws IOException, ExecutionExcep */ public class CountingWrappingTask extends DependencyCollectionTask { int count; - Task wrappedDep = null; + Task wrappedDep = null; - public CountingWrappingTask(Task dep) { + public CountingWrappingTask(Task dep) { count = 0; wrappedDep = dep; super.addDependentTask(wrappedDep); } @Override - public boolean addDependentTask(Task dependent) { + public boolean addDependentTask(Task dependent) { return wrappedDep.addDependentTask(dependent); } @Override - public List> getDependentTasks() { + public List> getDependentTasks() { count++; System.err.println("YAH:getDepTasks got called!"); (new Exception()).printStackTrace(System.err); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/AddDependencyToLeavesTest.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/AddDependencyToLeavesTest.java index 166cf874d7..bf11362e92 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/AddDependencyToLeavesTest.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/AddDependencyToLeavesTest.java @@ -53,21 +53,21 @@ public void shouldNotSkipIntermediateDependencyCollectionTasks() { Task collectionWorkTaskThree = TaskFactory.get(new DependencyCollectionWork()); - @SuppressWarnings("unchecked") Task rootTask = mock(Task.class); + @SuppressWarnings("unchecked") Task rootTask = mock(Task.class); when(rootTask.getDependentTasks()) .thenReturn( Arrays.asList(collectionWorkTaskOne, collectionWorkTaskTwo, collectionWorkTaskThree)); - @SuppressWarnings("unchecked") List> tasksPostCurrentGraph = + @SuppressWarnings("unchecked") List> tasksPostCurrentGraph = Arrays.asList(mock(Task.class), mock(Task.class)); DAGTraversal.traverse(Collections.singletonList(rootTask), new AddDependencyToLeaves(tasksPostCurrentGraph)); - List> dependentTasksForOne = + List> dependentTasksForOne = collectionWorkTaskOne.getDependentTasks(); - List> dependentTasksForTwo = + List> dependentTasksForTwo = collectionWorkTaskTwo.getDependentTasks(); - List> dependentTasksForThree = + List> dependentTasksForThree = collectionWorkTaskThree.getDependentTasks(); assertEquals(dependentTasksForOne.size(), 2); @@ -83,4 +83,4 @@ public void shouldNotSkipIntermediateDependencyCollectionTasks() { } -} \ No newline at end of file +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/TestTaskTracker.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/TestTaskTracker.java index 41ab447de8..bb9999db61 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/TestTaskTracker.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/TestTaskTracker.java @@ -32,7 +32,7 @@ @RunWith(PowerMockRunner.class) public class TestTaskTracker { @Mock - private Task task; + private Task task; @Test public void taskTrackerCompositionInitializesTheMaxTasksCorrectly() { @@ -44,4 +44,4 @@ public void taskTrackerCompositionInitializesTheMaxTasksCorrectly() { TaskTracker taskTracker2 = new TaskTracker(taskTracker); assertFalse(taskTracker2.canAddMoreTasks()); } -} \ No newline at end of file +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/util/DAGTraversalTest.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/util/DAGTraversalTest.java index 6dcecde1dd..f482e3be6e 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/util/DAGTraversalTest.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/util/DAGTraversalTest.java @@ -37,24 +37,24 @@ int count = 0; @Override - public void process(Task task) { + public void process(Task task) { if (task.getDependentTasks() == null || task.getDependentTasks().isEmpty()) { count++; } } @Override - public boolean skipProcessing(Task task) { + public boolean skipProcessing(Task task) { return false; } } @Test public void shouldCountNumberOfLeafNodesCorrectly() { - Task taskWith5NodeTree = linearTree(5); - Task taskWith1NodeTree = linearTree(1); - Task taskWith3NodeTree = linearTree(3); - @SuppressWarnings("unchecked") Task rootTask = mock(Task.class); + Task taskWith5NodeTree = linearTree(5); + Task taskWith1NodeTree = linearTree(1); + Task taskWith3NodeTree = linearTree(3); + @SuppressWarnings("unchecked") Task rootTask = mock(Task.class); when(rootTask.getDependentTasks()) .thenReturn(Arrays.asList(taskWith1NodeTree, taskWith3NodeTree, taskWith5NodeTree)); @@ -63,10 +63,10 @@ public void shouldCountNumberOfLeafNodesCorrectly() { assertEquals(3, function.count); } - private Task linearTree(int numOfNodes) { - Task current = null, head = null; + private Task linearTree(int numOfNodes) { + Task current = null, head = null; for (int i = 0; i < numOfNodes; i++) { - @SuppressWarnings("unchecked") Task task = mock(Task.class); + @SuppressWarnings("unchecked") Task task = mock(Task.class); if (current != null) { when(current.getDependentTasks()).thenReturn(Collections.singletonList(task)); } @@ -78,4 +78,4 @@ public void shouldCountNumberOfLeafNodesCorrectly() { return head; } -} \ No newline at end of file +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java index a40ad247e7..a96d93e042 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java @@ -59,7 +59,7 @@ Licensed to the Apache Software Foundation (ASF) under one public class TestGenMapRedUtilsCreateConditionalTask { private static HiveConf hiveConf; - private Task dummyMRTask; + private Task dummyMRTask; @BeforeClass public static void initializeSessionState() { @@ -187,9 +187,9 @@ public void testConditionalMoveTaskIsOptimized() throws SemanticException { GenMapRedUtils.createMRWorkForMergingFiles(fileSinkOperator, finalDirName, null, moveTaskList, hiveConf, dummyMRTask, new LineageState()); ConditionalTask conditionalTask = (ConditionalTask)dummyMRTask.getChildTasks().get(0); - Task moveOnlyTask = conditionalTask.getListTasks().get(0); - Task mergeOnlyTask = conditionalTask.getListTasks().get(1); - Task mergeAndMoveTask = conditionalTask.getListTasks().get(2); + Task moveOnlyTask = conditionalTask.getListTasks().get(0); + Task mergeOnlyTask = conditionalTask.getListTasks().get(1); + Task mergeAndMoveTask = conditionalTask.getListTasks().get(2); /* * OPTIMIZATION @@ -228,9 +228,9 @@ public void testConditionalMoveTaskIsNotOptimized() throws SemanticException { GenMapRedUtils.createMRWorkForMergingFiles(fileSinkOperator, finalDirName, null, moveTaskList, hiveConf, dummyMRTask, new LineageState()); ConditionalTask conditionalTask = (ConditionalTask)dummyMRTask.getChildTasks().get(0); - Task moveOnlyTask = conditionalTask.getListTasks().get(0); - Task mergeOnlyTask = conditionalTask.getListTasks().get(1); - Task mergeAndMoveTask = conditionalTask.getListTasks().get(2); + Task moveOnlyTask = conditionalTask.getListTasks().get(0); + Task mergeOnlyTask = conditionalTask.getListTasks().get(1); + Task mergeAndMoveTask = conditionalTask.getListTasks().get(2); // Verify moveOnlyTask is NOT optimized assertEquals(1, moveOnlyTask.getChildTasks().size()); @@ -263,9 +263,9 @@ public void testConditionalMoveOnHdfsIsNotOptimized() throws SemanticException { GenMapRedUtils.createMRWorkForMergingFiles(fileSinkOperator, finalDirName, null, moveTaskList, hiveConf, dummyMRTask, new LineageState()); ConditionalTask conditionalTask = (ConditionalTask)dummyMRTask.getChildTasks().get(0); - Task moveOnlyTask = conditionalTask.getListTasks().get(0); - Task mergeOnlyTask = conditionalTask.getListTasks().get(1); - Task mergeAndMoveTask = conditionalTask.getListTasks().get(2); + Task moveOnlyTask = conditionalTask.getListTasks().get(0); + Task mergeOnlyTask = conditionalTask.getListTasks().get(1); + Task mergeAndMoveTask = conditionalTask.getListTasks().get(2); // Verify moveOnlyTask is NOT optimized assertEquals(1, moveOnlyTask.getChildTasks().size()); @@ -309,7 +309,7 @@ private FileSinkOperator createFileSinkOperator(Path finalDirName) { return moveTask; } - private void verifyMoveTask(Task task, Path source, Path target) { + private void verifyMoveTask(Task task, Path source, Path target) { MoveTask moveTask = (MoveTask)task; assertEquals(source, moveTask.getWork().getLoadFileWork().getSourcePath()); assertEquals(target, moveTask.getWork().getLoadFileWork().getTargetDir()); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java index b5d3b8f55a..888e4efcbc 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java @@ -80,7 +80,7 @@ public void setUp() throws Exception { conf, pctx, Collections.EMPTY_LIST, - new ArrayList>(), + new ArrayList>(), Collections.EMPTY_SET, Collections.EMPTY_SET); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java index 3fc82adcf8..780fb2a58e 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java @@ -58,8 +58,8 @@ public void testResolvingDriverAlias() throws Exception { // joins alias1, alias2, alias3 (alias1 was not eligible for big pos) // Must be deterministic order map for consistent q-test output across Java versions - HashMap, Set> taskToAliases = - new LinkedHashMap, Set>(); + HashMap, Set> taskToAliases = + new LinkedHashMap, Set>(); taskToAliases.put(task1, new HashSet(Arrays.asList("alias2"))); taskToAliases.put(task2, new HashSet(Arrays.asList("alias3"))); @@ -88,4 +88,4 @@ public void testResolvingDriverAlias() throws Exception { resolved = resolver.resolveMapJoinTask(ctx, conf); Assert.assertNull(resolved); } -} \ No newline at end of file +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestReadEntityDirect.java b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestReadEntityDirect.java index 51f610d9eb..bfa0efef77 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestReadEntityDirect.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestReadEntityDirect.java @@ -196,7 +196,7 @@ private static Driver createDriver() { @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { readEntities = context.getInputs(); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java index 97ef3c4de3..78366259b2 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java @@ -45,7 +45,7 @@ @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { readEntities = context.getInputs().toArray(new ReadEntity[0]); } diff --git a/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java b/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java index fa61d3c653..c9a57c53ed 100644 --- a/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java +++ b/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java @@ -366,7 +366,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { } }