diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java index 34157d91ba..369c663ca3 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateDatabaseHook.java @@ -79,7 +79,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { context.getConf().set(HCatConstants.HCAT_CREATE_DB_NAME, databaseName); super.postAnalyze(context, rootTasks); } diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java index 3dad6d2033..540ecd1546 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java @@ -132,7 +132,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) + List> rootTasks) throws SemanticException { if (rootTasks.size() == 0) { diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java index 2a96e0594c..b86a65f7e5 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java @@ -152,7 +152,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { try { diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java index 970733c107..8487e3a915 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzerBase.java @@ -55,7 +55,7 @@ public HiveAuthorizationProvider getAuthProvider() { @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { super.postAnalyze(context, rootTasks); //Authorize the operation. @@ -86,7 +86,7 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, * @see https://issues.apache.org/jira/browse/HCATALOG-245 */ protected void authorizeDDL(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { if (!HCatAuthUtil.isAuthorizationEnabled(context.getConf())) { return; @@ -96,7 +96,7 @@ protected void authorizeDDL(HiveSemanticAnalyzerHookContext context, try { hive = context.getHive(); - for (Task task : rootTasks) { + for (Task task : rootTasks) { if (task.getWork() instanceof DDLWork) { DDLWork work = (DDLWork) task.getWork(); if (work != null) { diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java index 509b178880..7d441b6acc 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java @@ -121,7 +121,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { try { userName = context.getUserName(); ipAddress = context.getIpAddress(); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index ce70952de3..36e0fcf6d7 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -356,11 +356,11 @@ public boolean hasTask(Task rootTask) { if (validate(rootTask)) { return true; } - List> childTasks = rootTask.getChildTasks(); + List> childTasks = rootTask.getChildTasks(); if (childTasks == null) { return false; } - for (Task childTask : childTasks) { + for (Task childTask : childTasks) { if (hasTask(childTask)) { return true; } diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHs2Metrics.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHs2Metrics.java index 9686445f2b..d6631729d1 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHs2Metrics.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHs2Metrics.java @@ -67,7 +67,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { } } diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyHiveSortedInputFormatUsedHook.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyHiveSortedInputFormatUsedHook.java index b1c5521ae7..b87e497b20 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyHiveSortedInputFormatUsedHook.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyHiveSortedInputFormatUsedHook.java @@ -33,9 +33,9 @@ public void run(HookContext hookContext) { // Go through the root tasks, and verify the input format of the map reduce task(s) is // HiveSortedInputFormat - ArrayList> rootTasks = + ArrayList> rootTasks = hookContext.getQueryPlan().getRootTasks(); - for (Task rootTask : rootTasks) { + for (Task rootTask : rootTasks) { if (rootTask.getWork() instanceof MapredWork) { Assert.assertTrue("The root map reduce task's input was not marked as sorted.", ((MapredWork)rootTask.getWork()).getMapWork().isInputFormatSorted()); diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyHooksRunInOrder.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyHooksRunInOrder.java index 623d845677..f272a1504a 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyHooksRunInOrder.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyHooksRunInOrder.java @@ -110,7 +110,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context,ASTNode ast) @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { LogHelper console = SessionState.getConsole(); if (console == null) { @@ -145,7 +145,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context,ASTNode ast) @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { LogHelper console = SessionState.getConsole(); if (console == null) { diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook.java index 88c3bd1943..8ccbf97067 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook.java @@ -61,7 +61,7 @@ public DummySemanticAnalyzerHook() { @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { if(hook != null) { hook.postAnalyze(context, rootTasks); @@ -91,7 +91,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { CreateTableDesc desc = (CreateTableDesc) ((DDLTask)rootTasks.get(rootTasks.size()-1)).getWork().getDDLDesc(); Map tblProps = desc.getTblProps(); if(tblProps == null) { diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook1.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook1.java index 59c340641a..00e7582ecb 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook1.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/metadata/DummySemanticAnalyzerHook1.java @@ -56,7 +56,7 @@ public DummySemanticAnalyzerHook1() { @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { count = 0; if (!isCreateTable) { return; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 64375c159c..8f8c186331 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -1602,10 +1602,10 @@ private boolean requiresLock() { if (!HiveConf.getBoolVar(conf, ConfVars.HIVE_LOCK_MAPRED_ONLY)) { return true; } - Queue> taskQueue = new LinkedList>(); + Queue> taskQueue = new LinkedList>(); taskQueue.addAll(plan.getRootTasks()); while (taskQueue.peek() != null) { - Task tsk = taskQueue.remove(); + Task tsk = taskQueue.remove(); if (tsk.requireLock()) { return true; } @@ -1818,7 +1818,7 @@ private void execute() throws CommandProcessorResponse { SessionState.get().setLocalMapRedErrors(new HashMap<>()); // Add root Tasks to runnable - for (Task tsk : plan.getRootTasks()) { + for (Task tsk : plan.getRootTasks()) { // This should never happen, if it does, it's a bug with the potential to produce // incorrect results. assert tsk.getParentTasks() == null || tsk.getParentTasks().isEmpty(); @@ -1835,7 +1835,7 @@ private void execute() throws CommandProcessorResponse { // Loop while you either have tasks running, or tasks queued up while (driverCxt.isRunning()) { // Launch upto maxthreads tasks - Task task; + Task task; while ((task = driverCxt.getRunnable(maxthreads)) != null) { TaskRunner runner = launchTask(task, queryId, noName, jobname, jobs, driverCxt); if (!runner.isRunning()) { @@ -1863,14 +1863,14 @@ private void execute() throws CommandProcessorResponse { queryDisplay.setTaskResult(tskRun.getTask().getId(), tskRun.getTaskResult()); - Task tsk = tskRun.getTask(); + Task tsk = tskRun.getTask(); TaskResult result = tskRun.getTaskResult(); int exitVal = result.getExitVal(); checkInterrupted("when checking the execution result.", hookContext, perfLogger); if (exitVal != 0) { - Task backupTask = tsk.getAndInitBackupTask(); + Task backupTask = tsk.getAndInitBackupTask(); if (backupTask != null) { setErrorMsgAndDetail(exitVal, result.getTaskError(), tsk); console.printError(errorMessage); @@ -1920,7 +1920,7 @@ private void execute() throws CommandProcessorResponse { } if (tsk.getChildTasks() != null) { - for (Task child : tsk.getChildTasks()) { + for (Task child : tsk.getChildTasks()) { if (DriverContext.isLaunchable(child)) { driverCxt.addToRunnable(child); } @@ -2077,20 +2077,20 @@ private void releasePlan(QueryPlan plan) { } } - private void setQueryDisplays(List> tasks) { + private void setQueryDisplays(List> tasks) { if (tasks != null) { - Set> visited = new HashSet>(); + Set> visited = new HashSet>(); while (!tasks.isEmpty()) { tasks = setQueryDisplays(tasks, visited); } } } - private List> setQueryDisplays( - List> tasks, - Set> visited) { - List> childTasks = new ArrayList<>(); - for (Task task : tasks) { + private List> setQueryDisplays( + List> tasks, + Set> visited) { + List> childTasks = new ArrayList<>(); + for (Task task : tasks) { if (visited.contains(task)) { continue; } @@ -2156,7 +2156,7 @@ private void invokeFailureHooks(PerfLogger perfLogger, * @param cxt * the driver context */ - private TaskRunner launchTask(Task tsk, String queryId, boolean noName, + private TaskRunner launchTask(Task tsk, String queryId, boolean noName, String jobname, int jobs, DriverContext cxt) throws HiveException { if (SessionState.get() != null) { SessionState.get().getHiveHistory().startTask(queryId, tsk, tsk.getClass().getName()); @@ -2495,7 +2495,7 @@ public StatsSource getStatsSource() { public boolean hasResultSet() { // TODO explain should use a FetchTask for reading - for (Task task : plan.getRootTasks()) { + for (Task task : plan.getRootTasks()) { if (task.getClass() == ExplainTask.class) { return true; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/DriverContext.java b/ql/src/java/org/apache/hadoop/hive/ql/DriverContext.java index d5392ab804..1b8260aa68 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/DriverContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/DriverContext.java @@ -54,7 +54,7 @@ private static final int SLEEP_TIME = 2000; - private Queue> runnable; + private Queue> runnable; private Queue running; // how many jobs have been started @@ -69,7 +69,7 @@ public DriverContext() { } public DriverContext(Context ctx) { - this.runnable = new ConcurrentLinkedQueue>(); + this.runnable = new ConcurrentLinkedQueue>(); this.running = new LinkedBlockingQueue(); this.ctx = ctx; } @@ -82,7 +82,7 @@ public synchronized boolean isRunning() { return !shutdown && (!running.isEmpty() || !runnable.isEmpty()); } - public synchronized void remove(Task task) { + public synchronized void remove(Task task) { runnable.remove(task); } @@ -91,7 +91,7 @@ public synchronized void launching(TaskRunner runner) throws HiveException { running.add(runner); } - public synchronized Task getRunnable(int maxthreads) throws HiveException { + public synchronized Task getRunnable(int maxthreads) throws HiveException { checkShutdown(); if (runnable.peek() != null && running.size() < maxthreads) { return runnable.remove(); @@ -161,13 +161,13 @@ public synchronized void shutdown() { * @return true if the task is launchable, false otherwise */ - public static boolean isLaunchable(Task tsk) { + public static boolean isLaunchable(Task tsk) { // A launchable task is one that hasn't been queued, hasn't been // initialized, and is runnable. return tsk.isNotInitialized() && tsk.isRunnable(); } - public synchronized boolean addToRunnable(Task tsk) throws HiveException { + public synchronized boolean addToRunnable(Task tsk) throws HiveException { if (runnable.contains(tsk)) { return false; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java b/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java index a3105b631d..2ba170b949 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java @@ -228,7 +228,7 @@ public boolean hasPreAnalyzeHooks() { } public void runPostAnalyzeHooks(HiveSemanticAnalyzerHookContext hookCtx, - List> allRootTasks) throws HiveException { + List> allRootTasks) throws HiveException { initialize(); try { for (HiveSemanticAnalyzerHook hook : saHooks) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java index 79cfd8466c..ddeb954d9b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java @@ -55,14 +55,14 @@ private final LinkedHashMap tasks = new LinkedHashMap(); - public synchronized void updateTaskStatus(Task tTask) { + public void updateTaskStatus(Task tTask) { if (!tasks.containsKey(tTask.getId())) { tasks.put(tTask.getId(), new TaskDisplay(tTask)); } tasks.get(tTask.getId()).updateStatus(tTask); } - public synchronized void updateTaskStatistics(MapRedStats mapRedStats, + public synchronized void updateTaskStatistics(MapRedStats mapRedStats, RunningJob rj, String taskId) throws IOException, JSONException { if (tasks.containsKey(taskId)) { tasks.get(taskId).updateMapRedStatsJson(mapRedStats, rj); @@ -232,7 +232,7 @@ public synchronized String getExternalHandle() { return externalHandle; } - public synchronized void updateStatus(Task tTask) { + public void updateStatus(Task tTask) { this.taskState = tTask.getTaskState(); if (externalHandle == null && tTask.getExternalHandle() != null) { this.externalHandle = tTask.getExternalHandle(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java index 7e49b6c883..1d01509c5f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java @@ -76,7 +76,7 @@ private String optimizedCBOPlan; private String optimizedQueryString; - private ArrayList> rootTasks; + private ArrayList> rootTasks; private FetchTask fetchTask; private final List reducerTimeStatsPerJobList; @@ -134,7 +134,7 @@ public QueryPlan(String queryString, BaseSemanticAnalyzer sem, Long startTime, S HiveOperation operation, Schema resultSchema) { this.queryString = queryString; - rootTasks = new ArrayList>(sem.getAllRootTasks()); + rootTasks = new ArrayList>(sem.getAllRootTasks()); reducerTimeStatsPerJobList = new ArrayList(); fetchTask = sem.getFetchTask(); // Note that inputs and outputs can be changed when the query gets executed @@ -264,12 +264,12 @@ private void populateQueryPlan() throws IOException { query.setStageGraph(new org.apache.hadoop.hive.ql.plan.api.Graph()); query.getStageGraph().setNodeType(NodeType.STAGE); - Queue> tasksToVisit = - new LinkedList>(); - Set> tasksVisited = new HashSet>(); + Queue> tasksToVisit = + new LinkedList>(); + Set> tasksVisited = new HashSet>(); tasksToVisit.addAll(rootTasks); while (tasksToVisit.size() != 0) { - Task task = tasksToVisit.remove(); + Task task = tasksToVisit.remove(); tasksVisited.add(task); // populate stage org.apache.hadoop.hive.ql.plan.api.Stage stage = @@ -315,14 +315,14 @@ private void populateQueryPlan() throws IOException { listEntry.setNode(task.getId()); ConditionalTask t = (ConditionalTask) task; - for (Task listTask : t.getListTasks()) { + for (Task listTask : t.getListTasks()) { if (t.getChildTasks() != null) { org.apache.hadoop.hive.ql.plan.api.Adjacency childEntry = new org.apache.hadoop.hive.ql.plan.api.Adjacency(); childEntry.setAdjacencyType(AdjacencyType.DISJUNCTIVE); childEntry.setNode(listTask.getId()); // done processing the task - for (Task childTask : t.getChildTasks()) { + for (Task childTask : t.getChildTasks()) { childEntry.addToChildren(childTask.getId()); if (!tasksVisited.contains(childTask)) { tasksToVisit.add(childTask); @@ -343,7 +343,7 @@ private void populateQueryPlan() throws IOException { entry.setAdjacencyType(AdjacencyType.CONJUNCTIVE); entry.setNode(task.getId()); // done processing the task - for (Task childTask : task.getChildTasks()) { + for (Task childTask : task.getChildTasks()) { entry.addToChildren(childTask.getId()); if (!tasksVisited.contains(childTask)) { tasksToVisit.add(childTask); @@ -399,17 +399,17 @@ private void updateCountersInQueryPlan() { * Extract all the counters from tasks and operators. */ private void extractCounters() throws IOException { - Queue> tasksToVisit = - new LinkedList>(); - Set> tasksVisited = - new HashSet>(); + Queue> tasksToVisit = + new LinkedList>(); + Set> tasksVisited = + new HashSet>(); tasksToVisit.addAll(rootTasks); while (tasksToVisit.peek() != null) { - Task task = tasksToVisit.remove(); + Task task = tasksToVisit.remove(); tasksVisited.add(task); // add children to tasksToVisit if (task.getChildTasks() != null) { - for (Task childTask : task.getChildTasks()) { + for (Task childTask : task.getChildTasks()) { if (!tasksVisited.contains(childTask)) { tasksToVisit.add(childTask); } @@ -450,7 +450,7 @@ private void extractCounters() throws IOException { } } else if (task instanceof ConditionalTask) { ConditionalTask cTask = (ConditionalTask) task; - for (Task listTask : cTask.getListTasks()) { + for (Task listTask : cTask.getListTasks()) { if (!tasksVisited.contains(listTask)) { tasksToVisit.add(listTask); } @@ -696,11 +696,11 @@ public void setDone() { return done; } - public ArrayList> getRootTasks() { + public ArrayList> getRootTasks() { return rootTasks; } - public void setRootTasks(ArrayList> rootTasks) { + public void setRootTasks(ArrayList> rootTasks) { this.rootTasks = rootTasks; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java index c2b058ae5f..ecdf368b52 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java @@ -33,11 +33,11 @@ public class ConditionalTask extends Task implements Serializable { private static final long serialVersionUID = 1L; - private List> listTasks; + private List> listTasks; private boolean resolved = false; - private List> resTasks; + private List> resTasks; private ConditionalResolver resolver; private Object resolverCtx; @@ -49,7 +49,7 @@ public ConditionalTask() { @Override public boolean isMapRedTask() { - for (Task task : listTasks) { + for (Task task : listTasks) { if (task.isMapRedTask()) { return true; } @@ -65,7 +65,7 @@ public boolean canExecuteInParallel() { @Override public boolean hasReduce() { - for (Task task : listTasks) { + for (Task task : listTasks) { if (task.hasReduce()) { return true; } @@ -89,7 +89,7 @@ public int execute(DriverContext driverContext) { } private void resolveTask(DriverContext driverContext) throws HiveException { - for (Task tsk : getListTasks()) { + for (Task tsk : getListTasks()) { if (!resTasks.contains(tsk)) { driverContext.remove(tsk); console.printInfo(tsk.getId() + " is filtered out by condition resolver."); @@ -101,7 +101,7 @@ private void resolveTask(DriverContext driverContext) throws HiveException { } else { if (getParentTasks() != null) { // This makes it so that we can go back up the tree later - for (Task task : getParentTasks()) { + for (Task task : getParentTasks()) { task.addDependentTask(tsk); } } @@ -140,20 +140,20 @@ public Object getResolverCtx() { @Override public boolean done() { boolean ret = true; - List> parentTasks = getParentTasks(); + List> parentTasks = getParentTasks(); if (parentTasks != null) { - for (Task par : parentTasks) { + for (Task par : parentTasks) { ret = ret && par.done(); } } - List> retTasks; + List> retTasks; if (resolved) { retTasks = resTasks; } else { retTasks = getListTasks(); } if (ret && retTasks != null) { - for (Task tsk : retTasks) { + for (Task tsk : retTasks) { ret = ret && tsk.done(); } } @@ -171,7 +171,7 @@ public void setResolverCtx(Object resolverCtx) { /** * @return the listTasks */ - public List> getListTasks() { + public List> getListTasks() { return listTasks; } @@ -179,7 +179,7 @@ public void setResolverCtx(Object resolverCtx) { * @param listTasks * the listTasks to set */ - public void setListTasks(List> listTasks) { + public void setListTasks(List> listTasks) { this.listTasks = listTasks; } @@ -200,11 +200,11 @@ public String getName() { * @return true if the task got added false if it already existed */ @Override - public boolean addDependentTask(Task dependent) { + public boolean addDependentTask(Task dependent) { boolean ret = false; if (getListTasks() != null) { ret = true; - for (Task tsk : getListTasks()) { + for (Task tsk : getListTasks()) { ret = ret & tsk.addDependentTask(dependent); } } @@ -212,7 +212,7 @@ public boolean addDependentTask(Task dependent) { } @Override - public List> getDependentTasks() { + public List> getDependentTasks() { return listTasks; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/NodeUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/NodeUtils.java index f76bfddc1e..fc058374c4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/NodeUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/NodeUtils.java @@ -34,7 +34,7 @@ public class NodeUtils { - public static void iterateTask(Collection> tasks, Class clazz, Function function) { + public static void iterateTask(Collection> tasks, Class clazz, Function function) { // Does a breadth first traversal of the tasks Set visited = new HashSet(); while (!tasks.isEmpty()) { @@ -43,7 +43,7 @@ return; } - private static Collection> iterateTask(Collection> tasks, + private static Collection> iterateTask(Collection> tasks, Class clazz, Function function, Set visited) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java index f54210568c..8eea9cfbea 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java @@ -64,8 +64,8 @@ protected transient DriverContext driverContext; protected transient boolean clonedConf = false; protected transient String jobID; - protected Task backupTask; - protected List> backupChildrenTasks = new ArrayList>(); + protected Task backupTask; + protected List> backupChildrenTasks = new ArrayList>(); protected static transient Logger LOG = LoggerFactory.getLogger(Task.class); protected int taskTag; private boolean isLocalMode =false; @@ -84,7 +84,7 @@ public static final int CONVERTED_SORTMERGEJOIN = 8; public QueryDisplay queryDisplay = null; // Descendants tasks who subscribe feeds from this task - protected transient List> feedSubscribers; + protected transient List> feedSubscribers; protected String id; protected T work; @@ -138,8 +138,8 @@ public String getStatusMessage() { protected boolean rootTask; - protected List> childTasks; - protected List> parentTasks; + protected List> childTasks; + protected List> parentTasks; /** * this can be set by the Task, to provide more info about the failure in TaskResult * where the Driver can find it. This is checked if {@link Task#execute(org.apache.hadoop.hive.ql.DriverContext)} @@ -235,7 +235,7 @@ public void setRootTask(boolean rootTask) { this.rootTask = rootTask; } - public void setChildTasks(List> childTasks) { + public void setChildTasks(List> childTasks) { this.childTasks = childTasks; } @@ -244,7 +244,7 @@ public void setChildTasks(List> childTasks) { return getChildTasks(); } - public List> getChildTasks() { + public List> getChildTasks() { return childTasks; } @@ -252,11 +252,11 @@ public int getNumChild() { return childTasks == null ? 0 : childTasks.size(); } - public void setParentTasks(List> parentTasks) { + public void setParentTasks(List> parentTasks) { this.parentTasks = parentTasks; } - public List> getParentTasks() { + public List> getParentTasks() { return parentTasks; } @@ -264,27 +264,27 @@ public int getNumParent() { return parentTasks == null ? 0 : parentTasks.size(); } - public Task getBackupTask() { + public Task getBackupTask() { return backupTask; } - public void setBackupTask(Task backupTask) { + public void setBackupTask(Task backupTask) { this.backupTask = backupTask; } - public List> getBackupChildrenTasks() { + public List> getBackupChildrenTasks() { return backupChildrenTasks; } - public void setBackupChildrenTasks(List> backupChildrenTasks) { + public void setBackupChildrenTasks(List> backupChildrenTasks) { this.backupChildrenTasks = backupChildrenTasks; } - public Task getAndInitBackupTask() { + public Task getAndInitBackupTask() { if (backupTask != null) { // first set back the backup task with its children task. if( backupChildrenTasks!= null) { - for (Task backupChild : backupChildrenTasks) { + for (Task backupChild : backupChildrenTasks) { backupChild.getParentTasks().add(backupTask); } } @@ -297,17 +297,17 @@ public void setBackupChildrenTasks(List> backupChil public void removeFromChildrenTasks() { - List> childrenTasks = this.getChildTasks(); + List> childrenTasks = this.getChildTasks(); if (childrenTasks == null) { return; } - for (Task childTsk : childrenTasks) { + for (Task childTsk : childrenTasks) { // remove this task from its children tasks childTsk.getParentTasks().remove(this); // recursively remove non-parent task from its children - List> siblingTasks = childTsk.getParentTasks(); + List> siblingTasks = childTsk.getParentTasks(); if (siblingTasks == null || siblingTasks.size() == 0) { childTsk.removeFromChildrenTasks(); } @@ -321,7 +321,7 @@ public void removeFromChildrenTasks() { * * @return a list of tasks that are dependent on this task. */ - public List> getDependentTasks() { + public List> getDependentTasks() { return getChildTasks(); } @@ -331,16 +331,16 @@ public void removeFromChildrenTasks() { * * @return true if the task got added false if it already existed */ - public boolean addDependentTask(Task dependent) { + public boolean addDependentTask(Task dependent) { boolean ret = false; if (getChildTasks() == null) { - setChildTasks(new ArrayList>()); + setChildTasks(new ArrayList>()); } if (!getChildTasks().contains(dependent)) { ret = true; getChildTasks().add(dependent); if (dependent.getParentTasks() == null) { - dependent.setParentTasks(new ArrayList>()); + dependent.setParentTasks(new ArrayList>()); } if (!dependent.getParentTasks().contains(this)) { dependent.getParentTasks().add(this); @@ -350,7 +350,7 @@ public boolean addDependentTask(Task dependent) { } @SuppressWarnings({"unchecked", "rawtypes"}) - public static List> + public static List> findLeafs(List> rootTasks) { final List> leafTasks = new ArrayList>(); @@ -372,7 +372,7 @@ public void apply(Task task) { * @param dependent * the task to remove */ - public void removeDependentTask(Task dependent) { + public void removeDependentTask(Task dependent) { if ((getChildTasks() != null) && (getChildTasks().contains(dependent))) { getChildTasks().remove(dependent); if ((dependent.getParentTasks() != null) && (dependent.getParentTasks().contains(this))) { @@ -421,7 +421,7 @@ public synchronized boolean isNotInitialized() { public boolean isRunnable() { boolean isrunnable = true; if (parentTasks != null) { - for (Task parent : parentTasks) { + for (Task parent : parentTasks) { if (!parent.done()) { isrunnable = false; break; @@ -509,23 +509,23 @@ public boolean hasReduce() { * @param publisher * this feed provider. */ - public void subscribeFeed(Task publisher) { + public void subscribeFeed(Task publisher) { if (publisher != this && publisher.ancestorOrSelf(this)) { if (publisher.getFeedSubscribers() == null) { - publisher.setFeedSubscribers(new LinkedList>()); + publisher.setFeedSubscribers(new LinkedList>()); } publisher.getFeedSubscribers().add(this); } } // return true if this task is an ancestor of itself of parameter desc - private boolean ancestorOrSelf(Task desc) { + private boolean ancestorOrSelf(Task desc) { if (this == desc) { return true; } - List> deps = getDependentTasks(); + List> deps = getDependentTasks(); if (deps != null) { - for (Task d : deps) { + for (Task d : deps) { if (d.ancestorOrSelf(desc)) { return true; } @@ -534,18 +534,18 @@ private boolean ancestorOrSelf(Task desc) { return false; } - public List> getFeedSubscribers() { + public List> getFeedSubscribers() { return feedSubscribers; } - public void setFeedSubscribers(List> s) { + public void setFeedSubscribers(List> s) { feedSubscribers = s; } // push the feed to its subscribers protected void pushFeed(FeedType feedType, Object feedValue) { if (feedSubscribers != null) { - for (Task s : feedSubscribers) { + for (Task s : feedSubscribers) { s.receiveFeed(feedType, feedValue); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java index f70726409b..d6b266e7c9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java @@ -181,12 +181,12 @@ public static void resetId() { @SafeVarargs public static void makeChild(Task ret, - Task... tasklist) { + Task... tasklist) { // Add the new task as child of each of the passed in tasks - for (Task tsk : tasklist) { - List> children = tsk.getChildTasks(); + for (Task tsk : tasklist) { + List> children = tsk.getChildTasks(); if (children == null) { - children = new ArrayList>(); + children = new ArrayList>(); } children.add(ret); tsk.setChildTasks(children); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java index 13010aedb6..a5554c3004 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java @@ -33,7 +33,7 @@ **/ public class TaskRunner extends Thread { - protected Task tsk; + protected Task tsk; protected TaskResult result; protected SessionState ss; private static AtomicLong taskCounter = new AtomicLong(0); @@ -50,14 +50,14 @@ protected Long initialValue() { private final DriverContext driverCtx; - public TaskRunner(Task tsk, DriverContext ctx) { + public TaskRunner(Task tsk, DriverContext ctx) { this.tsk = tsk; this.result = new TaskResult(); ss = SessionState.get(); driverCtx = ctx; } - public Task getTask() { + public Task getTask() { return tsk; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 3d6859c1fb..e08191bbb0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -2597,24 +2597,24 @@ public static boolean isEmptyPath(Configuration job, Path dirPath) throws IOExce return true; } - public static List getTezTasks(List> tasks) { + public static List getTezTasks(List> tasks) { return getTasks(tasks, new TaskFilterFunction<>(TezTask.class)); } - public static List getSparkTasks(List> tasks) { + public static List getSparkTasks(List> tasks) { return getTasks(tasks, new TaskFilterFunction<>(SparkTask.class)); } - public static List getMRTasks(List> tasks) { + public static List getMRTasks(List> tasks) { return getTasks(tasks, new TaskFilterFunction<>(ExecDriver.class)); } - public static int getNumClusterJobs(List> tasks) { + public static int getNumClusterJobs(List> tasks) { return getMRTasks(tasks).size() + getTezTasks(tasks).size() + getSparkTasks(tasks).size(); } static class TaskFilterFunction implements DAGTraversal.Function { - private Set> visited = new HashSet<>(); + private Set> visited = new HashSet<>(); private Class requiredType; private List typeSpecificTasks = new ArrayList<>(); @@ -2623,7 +2623,7 @@ public static int getNumClusterJobs(List> tasks) { } @Override - public void process(Task task) { + public void process(Task task) { if (requiredType.isInstance(task) && !typeSpecificTasks.contains(task)) { typeSpecificTasks.add((T) task); } @@ -2635,12 +2635,12 @@ public void process(Task task) { } @Override - public boolean skipProcessing(Task task) { + public boolean skipProcessing(Task task) { return visited.contains(task); } } - private static List getTasks(List> tasks, + private static List getTasks(List> tasks, TaskFilterFunction function) { DAGTraversal.traverse(tasks, function); return function.getTasks(); @@ -2829,7 +2829,7 @@ public static double showTime(long time) { * @param conf * @throws SemanticException */ - public static void reworkMapRedWork(Task task, + public static void reworkMapRedWork(Task task, boolean reworkMapredWork, HiveConf conf) throws SemanticException { if (reworkMapredWork && (task instanceof MapRedTask)) { try { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java index d2ca33d96c..c365d41a82 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java @@ -64,7 +64,7 @@ static final private org.slf4j.Logger LOG = LoggerFactory.getLogger(HadoopJobExecHelper.class.getName()); protected transient JobConf job; - protected Task task; + protected Task task; protected transient int mapProgress = -1; protected transient int reduceProgress = -1; @@ -142,7 +142,7 @@ public void setJobId(JobID jobId) { } public HadoopJobExecHelper(JobConf job, LogHelper console, - Task task, HadoopJobExecHook hookCallBack) { + Task task, HadoopJobExecHook hookCallBack) { this.queryId = HiveConf.getVar(job, HiveConf.ConfVars.HIVEQUERYID, "unknown-" + System.currentTimeMillis()); this.job = job; this.console = console; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ExternalTableCopyTaskBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ExternalTableCopyTaskBuilder.java index 0ee7425161..1af92271f3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ExternalTableCopyTaskBuilder.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ExternalTableCopyTaskBuilder.java @@ -56,8 +56,8 @@ this.conf = conf; } - List> tasks(TaskTracker tracker) { - List> tasks = new ArrayList<>(); + List> tasks(TaskTracker tracker) { + List> tasks = new ArrayList<>(); Iterator itr = work.getPathsToCopyIterator(); while (tracker.canAddMoreTasks() && itr.hasNext()) { DirCopyWork dirCopyWork = itr.next(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java index 4e6be1ca40..9a541d2873 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java @@ -93,12 +93,12 @@ public StageType getType() { */ private static class Scope { boolean database = false, table = false, partition = false; - List> rootTasks = new ArrayList<>(); + List> rootTasks = new ArrayList<>(); } @Override public int execute(DriverContext driverContext) { - Task rootTask = work.getRootTask(); + Task rootTask = work.getRootTask(); if (rootTask != null) { rootTask.setChildTasks(null); } @@ -466,19 +466,19 @@ private void partitionsPostProcessing(BootstrapEventsIterator iterator, */ private void setUpDependencies(TaskTracker parentTasks, TaskTracker childTasks) { if (parentTasks.hasTasks()) { - for (Task parentTask : parentTasks.tasks()) { - for (Task childTask : childTasks.tasks()) { + for (Task parentTask : parentTasks.tasks()) { + for (Task childTask : childTasks.tasks()) { parentTask.addDependentTask(childTask); } } } else { - for (Task childTask : childTasks.tasks()) { + for (Task childTask : childTasks.tasks()) { parentTasks.addTask(childTask); } } } - private void createBuilderTask(List> rootTasks) { + private void createBuilderTask(List> rootTasks) { // Use loadTask as dependencyCollection Task loadTask = TaskFactory.get(work, conf); DAGTraversal.traverse(rootTasks, new AddDependencyToLeaves(loadTask)); @@ -507,7 +507,7 @@ private int executeIncrementalLoad(DriverContext driverContext) { } } - List> childTasks = new ArrayList<>(); + List> childTasks = new ArrayList<>(); int maxTasks = conf.getIntVar(HiveConf.ConfVars.REPL_APPROX_MAX_LOAD_TASKS); // First start the distcp tasks to copy the files related to external table. The distcp tasks should be @@ -548,7 +548,7 @@ private int executeIncrementalLoad(DriverContext driverContext) { AlterDatabaseSetPropertiesDesc alterDbDesc = new AlterDatabaseSetPropertiesDesc(dbName, mapProp, new ReplicationSpec(lastEventid, lastEventid)); - Task updateReplIdTask = + Task updateReplIdTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc), conf); DAGTraversal.traverse(childTasks, new AddDependencyToLeaves(updateReplIdTask)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java index 1d63cd89e7..44d2535a44 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java @@ -53,7 +53,7 @@ private DatabaseEvent.State state = null; private final transient BootstrapEventsIterator bootstrapIterator; private transient IncrementalLoadTasksBuilder incrementalLoadTasksBuilder; - private transient Task rootTask; + private transient Task rootTask; private final transient Iterator pathsToCopyIterator; /* @@ -143,11 +143,11 @@ IncrementalLoadTasksBuilder incrementalLoadTasksBuilder() { return incrementalLoadTasksBuilder; } - public Task getRootTask() { + public Task getRootTask() { return rootTask; } - public void setRootTask(Task rootTask) { + public void setRootTask(Task rootTask) { this.rootTask = rootTask; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java index d603e69c9c..bc12b267b5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java @@ -85,7 +85,7 @@ public TaskTracker tasks() throws IOException, SemanticException { String fksString = json.getString("fks"); String uksString = json.getString("uks"); String nnsString = json.getString("nns"); - List> tasks = new ArrayList>(); + List> tasks = new ArrayList>(); if (pksString != null && !pksString.isEmpty() && !isPrimaryKeysAlreadyLoaded(pksString)) { AddPrimaryKeyHandler pkHandler = new AddPrimaryKeyHandler(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java index c5378b4422..52777f3b20 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java @@ -60,7 +60,7 @@ public LoadDatabase(Context context, DatabaseEvent event, String dbNameToLoadIn, public TaskTracker tasks() throws Exception { Database dbInMetadata = readDbMetadata(); String dbName = dbInMetadata.getName(); - Task dbRootTask = null; + Task dbRootTask = null; ReplLoadOpType loadDbType = getLoadDbType(dbName); switch (loadDbType) { case LOAD_NEW: @@ -115,7 +115,7 @@ private boolean isDbEmpty(String dbName) throws HiveException { return allTables.isEmpty() && allFunctions.isEmpty(); } - private Task createDbTask(Database dbObj) { + private Task createDbTask(Database dbObj) { // note that we do not set location - for repl load, we want that auto-created. CreateDatabaseDesc createDbDesc = new CreateDatabaseDesc(dbObj.getName(), dbObj.getDescription(), null, false, updateDbProps(dbObj, context.dumpDirectory)); @@ -126,12 +126,12 @@ private boolean isDbEmpty(String dbName) throws HiveException { return TaskFactory.get(work, context.hiveConf); } - private Task alterDbTask(Database dbObj) { + private Task alterDbTask(Database dbObj) { return alterDbTask(dbObj.getName(), updateDbProps(dbObj, context.dumpDirectory), context.hiveConf); } - private Task setOwnerInfoTask(Database dbObj) { + private Task setOwnerInfoTask(Database dbObj) { AlterDatabaseSetOwnerDesc alterDbDesc = new AlterDatabaseSetOwnerDesc(dbObj.getName(), new PrincipalDesc(dbObj.getOwnerName(), dbObj.getOwnerType()), null); DDLWork work = new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc); @@ -160,7 +160,7 @@ private boolean isDbEmpty(String dbName) throws HiveException { return parameters; } - private static Task alterDbTask(String dbName, Map props, + private static Task alterDbTask(String dbName, Map props, HiveConf hiveConf) { AlterDatabaseSetPropertiesDesc alterDbDesc = new AlterDatabaseSetPropertiesDesc(dbName, props, null); DDLWork work = new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java index 2c053ef717..19e1a8b0eb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java @@ -65,7 +65,7 @@ public LoadFunction(Context context, ReplLogger replLogger, FunctionEvent event, this.tracker = new TaskTracker(existingTracker); } - private void createFunctionReplLogTask(List> functionTasks, + private void createFunctionReplLogTask(List> functionTasks, String functionName) { ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, functionName); Task replLogTask = TaskFactory.get(replLogWork, context.hiveConf); @@ -82,7 +82,7 @@ public TaskTracker tasks() throws IOException, SemanticException { return tracker; } CreateFunctionHandler handler = new CreateFunctionHandler(); - List> tasks = handler.handle( + List> tasks = handler.handle( new MessageHandler.Context( dbNameToLoadIn, fromPath.toString(), null, null, context.hiveConf, context.hiveDb, context.nestedContext, LOG) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java index 40020ed257..e45116115f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java @@ -117,7 +117,7 @@ public TaskTracker tasks() throws Exception { updateReplicationState(initialReplicationState()); if (!forNewTable().hasReplicationState()) { // Add ReplStateLogTask only if no pending table load tasks left for next cycle - Task replLogTask + Task replLogTask = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf); tracker.addDependentTask(replLogTask); } @@ -131,7 +131,7 @@ public TaskTracker tasks() throws Exception { updateReplicationState(initialReplicationState()); if (!forExistingTable(lastReplicatedPartition).hasReplicationState()) { // Add ReplStateLogTask only if no pending table load tasks left for next cycle - Task replLogTask + Task replLogTask = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf); tracker.addDependentTask(replLogTask); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java index 02993fce6a..8da2b2ed13 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java @@ -151,7 +151,7 @@ public TaskTracker tasks(boolean isBootstrapDuringInc) throws Exception { context.hiveConf ); if (!isPartitioned(tableDesc)) { - Task replLogTask + Task replLogTask = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf); ckptTask.addDependentTask(replLogTask); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java index 964b7920aa..ed75df88f0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java @@ -89,10 +89,10 @@ public IncrementalLoadTasksBuilder(String dbName, String loadPath, numIteration = 0; } - public Task build(DriverContext driverContext, Hive hive, Logger log, + public Task build(DriverContext driverContext, Hive hive, Logger log, TaskTracker tracker) throws Exception { - Task evTaskRoot = TaskFactory.get(new DependencyCollectionWork()); - Task taskChainTail = evTaskRoot; + Task evTaskRoot = TaskFactory.get(new DependencyCollectionWork()); + Task taskChainTail = evTaskRoot; Long lastReplayedEvent = null; this.log = log; numIteration++; @@ -133,13 +133,13 @@ public IncrementalLoadTasksBuilder(String dbName, String loadPath, MessageHandler.Context context = new MessageHandler.Context(dbName, location, taskChainTail, eventDmd, conf, hive, driverContext.getCtx(), this.log); - List> evTasks = analyzeEventLoad(context); + List> evTasks = analyzeEventLoad(context); if ((evTasks != null) && (!evTasks.isEmpty())) { ReplStateLogWork replStateLogWork = new ReplStateLogWork(replLogger, dir.getPath().getName(), eventDmd.getDumpType().toString()); - Task barrierTask = TaskFactory.get(replStateLogWork, conf); + Task barrierTask = TaskFactory.get(replStateLogWork, conf); AddDependencyToLeaves function = new AddDependencyToLeaves(barrierTask); DAGTraversal.traverse(evTasks, function); this.log.debug("Updated taskChainTail from {}:{} to {}:{}", @@ -152,14 +152,14 @@ public IncrementalLoadTasksBuilder(String dbName, String loadPath, if (!hasMoreWork()) { ReplRemoveFirstIncLoadPendFlagDesc desc = new ReplRemoveFirstIncLoadPendFlagDesc(dbName); - Task updateIncPendTask = TaskFactory.get(new DDLWork(inputs, outputs, desc), conf); + Task updateIncPendTask = TaskFactory.get(new DDLWork(inputs, outputs, desc), conf); taskChainTail.addDependentTask(updateIncPendTask); taskChainTail = updateIncPendTask; Map dbProps = new HashMap<>(); dbProps.put(ReplicationSpec.KEY.CURR_STATE_ID.toString(), String.valueOf(lastReplayedEvent)); ReplStateLogWork replStateLogWork = new ReplStateLogWork(replLogger, dbProps); - Task barrierTask = TaskFactory.get(replStateLogWork, conf); + Task barrierTask = TaskFactory.get(replStateLogWork, conf); taskChainTail.addDependentTask(barrierTask); this.log.debug("Added {}:{} as a precursor of barrier task {}:{}", taskChainTail.getClass(), taskChainTail.getId(), @@ -200,12 +200,12 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa } } - private List> analyzeEventLoad(MessageHandler.Context context) throws SemanticException { + private List> analyzeEventLoad(MessageHandler.Context context) throws SemanticException { MessageHandler messageHandler = context.dmd.getDumpType().handler(); - List> tasks = messageHandler.handle(context); + List> tasks = messageHandler.handle(context); if (context.precursor != null) { - for (Task t : tasks) { + for (Task t : tasks) { context.precursor.addDependentTask(t); log.debug("Added {}:{} as a precursor of {}:{}", context.precursor.getClass(), context.precursor.getId(), t.getClass(), t.getId()); @@ -217,9 +217,9 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa return addUpdateReplStateTasks(messageHandler.getUpdatedMetadata(), tasks); } - private Task getMigrationCommitTxnTask(String dbName, String tableName, + private Task getMigrationCommitTxnTask(String dbName, String tableName, List> partSpec, String replState, - Task preCursor) throws SemanticException { + Task preCursor) throws SemanticException { ReplLastIdInfo replLastIdInfo = new ReplLastIdInfo(dbName, Long.parseLong(replState)); replLastIdInfo.setTable(tableName); if (partSpec != null && !partSpec.isEmpty()) { @@ -234,7 +234,7 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa replLastIdInfo.setPartitionList(partitionList); } - Task updateReplIdTxnTask = TaskFactory.get(new ReplTxnWork(replLastIdInfo, ReplTxnWork + Task updateReplIdTxnTask = TaskFactory.get(new ReplTxnWork(replLastIdInfo, ReplTxnWork .OperationType.REPL_MIGRATION_COMMIT_TXN), conf); if (preCursor != null) { @@ -245,9 +245,9 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa return updateReplIdTxnTask; } - private Task tableUpdateReplStateTask(String dbName, String tableName, + private Task tableUpdateReplStateTask(String dbName, String tableName, Map partSpec, String replState, - Task preCursor) throws SemanticException { + Task preCursor) throws SemanticException { HashMap mapProp = new HashMap<>(); mapProp.put(ReplicationSpec.KEY.CURR_STATE_ID.toString(), replState); @@ -255,7 +255,7 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa AlterTableSetPropertiesDesc alterTblDesc = new AlterTableSetPropertiesDesc(fqTableName, partSpec, new ReplicationSpec(replState, replState), false, mapProp, false, false, null); - Task updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterTblDesc), conf); + Task updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterTblDesc), conf); // Link the update repl state task with dependency collection task if (preCursor != null) { @@ -266,14 +266,14 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa return updateReplIdTask; } - private Task dbUpdateReplStateTask(String dbName, String replState, - Task preCursor) { + private Task dbUpdateReplStateTask(String dbName, String replState, + Task preCursor) { HashMap mapProp = new HashMap<>(); mapProp.put(ReplicationSpec.KEY.CURR_STATE_ID.toString(), replState); AlterDatabaseSetPropertiesDesc alterDbDesc = new AlterDatabaseSetPropertiesDesc(dbName, mapProp, new ReplicationSpec(replState, replState)); - Task updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterDbDesc), conf); + Task updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterDbDesc), conf); // Link the update repl state task with dependency collection task if (preCursor != null) { @@ -284,9 +284,9 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa return updateReplIdTask; } - private List> addUpdateReplStateTasks( + private List> addUpdateReplStateTasks( UpdatedMetaDataTracker updatedMetaDataTracker, - List> importTasks) throws SemanticException { + List> importTasks) throws SemanticException { // If no import tasks generated by the event then no need to update the repl state to any object. if (importTasks.isEmpty()) { log.debug("No objects need update of repl state: 0 import tasks"); @@ -302,10 +302,10 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa } // Create a barrier task for dependency collection of import tasks - Task barrierTask = TaskFactory.get(new DependencyCollectionWork(), conf); + Task barrierTask = TaskFactory.get(new DependencyCollectionWork(), conf); - List> tasks = new ArrayList<>(); - Task updateReplIdTask; + List> tasks = new ArrayList<>(); + Task updateReplIdTask; for (UpdatedMetaDataTracker.UpdateMetaData updateMetaData : updatedMetaDataTracker.getUpdateMetaDataList()) { String replState = updateMetaData.getReplState(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/AddDependencyToLeaves.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/AddDependencyToLeaves.java index 284796f695..f0a6cfb4ff 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/AddDependencyToLeaves.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/AddDependencyToLeaves.java @@ -26,26 +26,26 @@ import java.util.List; public class AddDependencyToLeaves implements DAGTraversal.Function { - private List> postDependencyCollectionTasks; + private List> postDependencyCollectionTasks; - public AddDependencyToLeaves(List> postDependencyCollectionTasks) { + public AddDependencyToLeaves(List> postDependencyCollectionTasks) { this.postDependencyCollectionTasks = postDependencyCollectionTasks; } - public AddDependencyToLeaves(Task postDependencyTask) { + public AddDependencyToLeaves(Task postDependencyTask) { this(Collections.singletonList(postDependencyTask)); } @Override - public void process(Task task) { + public void process(Task task) { if (task.getChildTasks() == null) { postDependencyCollectionTasks.forEach(task::addDependentTask); } } @Override - public boolean skipProcessing(Task task) { + public boolean skipProcessing(Task task) { return postDependencyCollectionTasks.contains(task); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java index 23127c946c..98a0fa6fba 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java @@ -186,22 +186,22 @@ public static boolean isTableMigratingToTransactional(HiveConf conf, private static void addOpenTxnTaskForMigration(String actualDbName, String actualTblName, HiveConf conf, UpdatedMetaDataTracker updatedMetaDataTracker, - List> taskList, - Task childTask) { - Task replTxnTask = TaskFactory.get(new ReplTxnWork(actualDbName, actualTblName, + List> taskList, + Task childTask) { + Task replTxnTask = TaskFactory.get(new ReplTxnWork(actualDbName, actualTblName, ReplTxnWork.OperationType.REPL_MIGRATION_OPEN_TXN), conf); replTxnTask.addDependentTask(childTask); updatedMetaDataTracker.setNeedCommitTxn(true); taskList.add(replTxnTask); } - public static List> addOpenTxnTaskForMigration(String actualDbName, + public static List> addOpenTxnTaskForMigration(String actualDbName, String actualTblName, HiveConf conf, UpdatedMetaDataTracker updatedMetaDataTracker, - Task childTask, + Task childTask, org.apache.hadoop.hive.metastore.api.Table tableObj) throws IOException, TException { - List> taskList = new ArrayList<>(); + List> taskList = new ArrayList<>(); taskList.add(childTask); if (isTableMigratingToTransactional(conf, tableObj) && updatedMetaDataTracker != null) { addOpenTxnTaskForMigration(actualDbName, actualTblName, conf, updatedMetaDataTracker, @@ -210,13 +210,13 @@ private static void addOpenTxnTaskForMigration(String actualDbName, String actua return taskList; } - public static List> addTasksForLoadingColStats(ColumnStatistics colStats, + public static List> addTasksForLoadingColStats(ColumnStatistics colStats, HiveConf conf, UpdatedMetaDataTracker updatedMetadata, org.apache.hadoop.hive.metastore.api.Table tableObj, long writeId) throws IOException, TException { - List> taskList = new ArrayList<>(); + List> taskList = new ArrayList<>(); boolean isMigratingToTxn = ReplUtils.isTableMigratingToTransactional(conf, tableObj); ColumnStatsUpdateWork work = new ColumnStatsUpdateWork(colStats, isMigratingToTxn); work.setWriteId(writeId); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/TaskTracker.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/TaskTracker.java index 20ede9c406..83aea8b3c6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/TaskTracker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/TaskTracker.java @@ -39,7 +39,7 @@ * used to identify the list of tasks at root level for a given level like table / db / partition. * this does not include the task dependency notion of "table tasks < ---- partition task" */ - private final List> tasks = new ArrayList<>(); + private final List> tasks = new ArrayList<>(); private ReplicationState replicationState = null; // since tasks themselves can be graphs we want to limit the number of created // tasks including all of dependencies. @@ -59,16 +59,16 @@ public TaskTracker(TaskTracker existing) { * the graph however might get created in a disjoint fashion, in which case we can just update * the number of tasks using the "update" method. */ - public void addTask(Task task) { + public void addTask(Task task) { tasks.add(task); - List > visited = new ArrayList<>(); + List > visited = new ArrayList<>(); updateTaskCount(task, visited); } - public void addTaskList(List > taskList) { - List > visited = new ArrayList<>(); - for (Task task : taskList) { + public void addTaskList(List > taskList) { + List > visited = new ArrayList<>(); + for (Task task : taskList) { if (!visited.contains(task)) { tasks.add(task); updateTaskCount(task, visited); @@ -78,23 +78,23 @@ public void addTaskList(List > taskList) { // This method is used to traverse the DAG created in tasks list and add the dependent task to // the tail of each task chain. - public void addDependentTask(Task dependent) { + public void addDependentTask(Task dependent) { if (tasks.isEmpty()) { addTask(dependent); } else { DAGTraversal.traverse(tasks, new AddDependencyToLeaves(dependent)); - List> visited = new ArrayList<>(); + List> visited = new ArrayList<>(); updateTaskCount(dependent, visited); } } - private void updateTaskCount(Task task, - List > visited) { + private void updateTaskCount(Task task, + List > visited) { numberOfTasks += 1; visited.add(task); if (task.getChildTasks() != null) { - for (Task childTask : task.getChildTasks()) { + for (Task childTask : task.getChildTasks()) { if (visited.contains(childTask)) { continue; } @@ -130,7 +130,7 @@ public ReplicationState replicationState() { return replicationState; } - public List> tasks() { + public List> tasks() { return tasks; } @@ -142,4 +142,4 @@ public void debugLog(String forEventType) { public int numberOfTasks() { return numberOfTasks; } -} \ No newline at end of file +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/util/DAGTraversal.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/util/DAGTraversal.java index 40f5f55ccb..ae49def829 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/util/DAGTraversal.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/util/DAGTraversal.java @@ -32,8 +32,8 @@ * stack overflow's, hence iteration based. */ public class DAGTraversal { - public static void traverse(List> tasks, Function function) { - List> listOfTasks = new ArrayList<>(tasks); + public static void traverse(List> tasks, Function function) { + List> listOfTasks = new ArrayList<>(tasks); while (!listOfTasks.isEmpty()) { // HashSet will make sure that no duplicate children are added. If a task is added multiple // time to the children list then it may cause the list to grow exponentially. Lets take an example of @@ -49,8 +49,8 @@ public static void traverse(List> tasks, Function f // the children list and in next iteration ev2.task1 will be added 3 times and ev2.task2 will be added // 3 times. So in next iteration ev2.barrierTask will be added 6 times. As it goes like this, the next barrier // task will be added 12-15 times and may reaches millions with large number of events. - Set> children = new HashSet<>(); - for (Task task : listOfTasks) { + Set> children = new HashSet<>(); + for (Task task : listOfTasks) { // skip processing has to be done first before continuing if (function.skipProcessing(task)) { continue; @@ -69,8 +69,8 @@ public static void traverse(List> tasks, Function f } public interface Function { - void process(Task task); + void process(Task task); - boolean skipProcessing(Task task); + boolean skipProcessing(Task task); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java index 327628f8a0..33bf6a969c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java @@ -172,7 +172,7 @@ public void setTaskProperty(String queryId, String taskId, Keys propName, * * @param task */ - public void startTask(String queryId, Task task, + public void startTask(String queryId, Task task, String taskName); /** @@ -180,7 +180,7 @@ public void startTask(String queryId, Task task, * * @param task */ - public void endTask(String queryId, Task task); + public void endTask(String queryId, Task task); /** * Logs progress of a task if ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS is @@ -188,7 +188,7 @@ public void startTask(String queryId, Task task, * * @param task */ - public void progressTask(String queryId, Task task); + public void progressTask(String queryId, Task task); /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java index 0bfa78dda4..80eaf001b5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java @@ -263,7 +263,7 @@ public void endQuery(String queryId) { } @Override - public void startTask(String queryId, Task task, + public void startTask(String queryId, Task task, String taskName) { TaskInfo ti = new TaskInfo(); @@ -279,7 +279,7 @@ public void startTask(String queryId, Task task, } @Override - public void endTask(String queryId, Task task) { + public void endTask(String queryId, Task task) { String id = queryId + ":" + task.getId(); TaskInfo ti = taskInfoMap.get(id); @@ -291,7 +291,7 @@ public void endTask(String queryId, Task task) { } @Override - public void progressTask(String queryId, Task task) { + public void progressTask(String queryId, Task task) { String id = queryId + ":" + task.getId(); TaskInfo ti = taskInfoMap.get(id); if (ti == null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/NoOperatorReuseCheckerHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/NoOperatorReuseCheckerHook.java index 494459abd7..445c970885 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/NoOperatorReuseCheckerHook.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/NoOperatorReuseCheckerHook.java @@ -74,8 +74,8 @@ public void run(HookContext hookContext) throws Exception { List rootOps = Lists.newArrayList(); - ArrayList> roots = hookContext.getQueryPlan().getRootTasks(); - for (Task task : roots) { + ArrayList> roots = hookContext.getQueryPlan().getRootTasks(); + for (Task task : roots) { Object work = task.getWork(); if (work instanceof MapredWork) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java index edb7716b02..23cdb625c0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java @@ -148,31 +148,31 @@ public void walk(Node nd) throws SemanticException { opStack.push(nd); } - List> nextTaskList = null; - Set> nextTaskSet = new HashSet>(); - List> taskListInConditionalTask = null; + List> nextTaskList = null; + Set> nextTaskSet = new HashSet>(); + List> taskListInConditionalTask = null; if(nd instanceof ConditionalTask ){ //for conditional task, next task list should return the children tasks of each task, which //is contained in the conditional task. taskListInConditionalTask = ((ConditionalTask) nd).getListTasks(); - for(Task tsk: taskListInConditionalTask){ - List> childTask = tsk.getChildTasks(); + for(Task tsk: taskListInConditionalTask){ + List> childTask = tsk.getChildTasks(); if(childTask != null){ nextTaskSet.addAll(tsk.getChildTasks()); } } //convert the set into list if(nextTaskSet.size()>0){ - nextTaskList = new ArrayList>(); - for(Task tsk:nextTaskSet ){ + nextTaskList = new ArrayList>(); + for(Task tsk:nextTaskSet ){ nextTaskList.add(tsk); } } }else{ //for other tasks, just return its children tasks - nextTaskList = ((Task)nd).getChildTasks(); + nextTaskList = ((Task)nd).getChildTasks(); } if ((nextTaskList == null) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java index 25c6b24f46..10a0405eee 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java @@ -74,7 +74,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, Map, GenMapRedCtx> mapCurrCtx = ctx .getMapCurrCtx(); GenMapRedCtx mapredCtx = mapCurrCtx.get(fsOp.getParentOperators().get(0)); - Task currTask = mapredCtx.getCurrTask(); + Task currTask = mapredCtx.getCurrTask(); ctx.setCurrTask(currTask); ctx.addRootIfPossible(currTask); @@ -88,9 +88,9 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, // If this file sink desc has been processed due to a linked file sink desc, // use that task - Map> fileSinkDescs = ctx.getLinkedFileDescTasks(); + Map> fileSinkDescs = ctx.getLinkedFileDescTasks(); if (fileSinkDescs != null) { - Task childTask = fileSinkDescs.get(fsOp.getConf()); + Task childTask = fileSinkDescs.get(fsOp.getConf()); processLinkedFileDesc(ctx, childTask); return true; } @@ -119,10 +119,10 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, // There are linked file sink operators and child tasks are present if (fileSinkDesc.isLinkedFileSink() && (currTask.getChildTasks() != null) && (currTask.getChildTasks().size() == 1)) { - Map> linkedFileDescTasks = + Map> linkedFileDescTasks = ctx.getLinkedFileDescTasks(); if (linkedFileDescTasks == null) { - linkedFileDescTasks = new HashMap>(); + linkedFileDescTasks = new HashMap>(); ctx.setLinkedFileDescTasks(linkedFileDescTasks); } @@ -145,8 +145,8 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, * Use the task created by the first linked file descriptor */ private void processLinkedFileDesc(GenMRProcContext ctx, - Task childTask) throws SemanticException { - Task currTask = ctx.getCurrTask(); + Task childTask) throws SemanticException { + Task currTask = ctx.getCurrTask(); TableScanOperator currTopOp = ctx.getCurrTopOp(); if (currTopOp != null && !ctx.isSeenOp(currTask, currTopOp)) { String currAliasId = ctx.getCurrAliasId(); @@ -176,7 +176,7 @@ private Path processFS(FileSinkOperator fsOp, Stack stack, NodeProcessorCtx opProcCtx, boolean chDir) throws SemanticException { GenMRProcContext ctx = (GenMRProcContext) opProcCtx; - Task currTask = ctx.getCurrTask(); + Task currTask = ctx.getCurrTask(); // If the directory needs to be changed, send the new directory Path dest = null; @@ -195,7 +195,7 @@ private Path processFS(FileSinkOperator fsOp, Stack stack, TableScanOperator currTopOp = ctx.getCurrTopOp(); String currAliasId = ctx.getCurrAliasId(); - HashMap, Task> opTaskMap = + HashMap, Task> opTaskMap = ctx.getOpTaskMap(); // In case of multi-table insert, the path to alias mapping is needed for @@ -203,7 +203,7 @@ private Path processFS(FileSinkOperator fsOp, Stack stack, // reducer, treat it as a plan with null reducer // If it is a map-only job, the task needs to be processed if (currTopOp != null) { - Task mapTask = opTaskMap.get(null); + Task mapTask = opTaskMap.get(null); if (mapTask == null) { if (!ctx.isSeenOp(currTask, currTopOp)) { GenMapRedUtils.setTaskPlan(currAliasId, currTopOp, currTask, false, ctx); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java index a6e2f53b48..bbda668c08 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java @@ -56,7 +56,7 @@ * GenMapRedCtx is used to keep track of the current state. */ public static class GenMapRedCtx { - Task currTask; + Task currTask; String currAliasId; public GenMapRedCtx() { @@ -67,7 +67,7 @@ public GenMapRedCtx() { * the current task * @param currAliasId */ - public GenMapRedCtx(Task currTask, String currAliasId) { + public GenMapRedCtx(Task currTask, String currAliasId) { this.currTask = currTask; this.currAliasId = currAliasId; } @@ -75,7 +75,7 @@ public GenMapRedCtx(Task currTask, String currAliasId) { /** * @return current task */ - public Task getCurrTask() { + public Task getCurrTask() { return currTask; } @@ -92,19 +92,19 @@ public String getCurrAliasId() { * */ public static class GenMRUnionCtx { - final Task uTask; + final Task uTask; List taskTmpDir; List tt_desc; List listTopOperators; - public GenMRUnionCtx(Task uTask) { + public GenMRUnionCtx(Task uTask) { this.uTask = uTask; taskTmpDir = new ArrayList(); tt_desc = new ArrayList(); listTopOperators = new ArrayList<>(); } - public Task getUTask() { + public Task getUTask() { return uTask; } @@ -135,19 +135,19 @@ public void addListTopOperators(TableScanOperator topOperator) { private HiveConf conf; private - HashMap, Task> opTaskMap; + HashMap, Task> opTaskMap; private - HashMap, List>> taskToSeenOps; + HashMap, List>> taskToSeenOps; private HashMap unionTaskMap; private List seenFileSinkOps; private ParseContext parseCtx; private List> mvTask; - private List> rootTasks; + private List> rootTasks; private LinkedHashMap, GenMapRedCtx> mapCurrCtx; - private Task currTask; + private Task currTask; private TableScanOperator currTopOp; private UnionOperator currUnionOp; private String currAliasId; @@ -155,7 +155,7 @@ public void addListTopOperators(TableScanOperator topOperator) { // If many fileSinkDescs are linked to each other, it is a good idea to keep track of // tasks for first fileSinkDesc. others can use it - private Map> linkedFileDescTasks; + private Map> linkedFileDescTasks; /** * Set of read entities. This list is generated by the walker and is passed to @@ -191,10 +191,10 @@ public GenMRProcContext() { */ public GenMRProcContext( HiveConf conf, - HashMap, Task> opTaskMap, + HashMap, Task> opTaskMap, ParseContext parseCtx, List> mvTask, - List> rootTasks, + List> rootTasks, LinkedHashMap, GenMapRedCtx> mapCurrCtx, Set inputs, Set outputs) { this.conf = conf; @@ -210,7 +210,7 @@ public GenMRProcContext( currUnionOp = null; currAliasId = null; unionTaskMap = new HashMap(); - taskToSeenOps = new HashMap, + taskToSeenOps = new HashMap, List>>(); dependencyTaskForMultiInsert = null; linkedFileDescTasks = null; @@ -231,7 +231,7 @@ public void reset() { * @return reducer to task mapping */ public HashMap, - Task> getOpTaskMap() { + Task> getOpTaskMap() { return opTaskMap; } @@ -240,7 +240,7 @@ public void reset() { * reducer to task mapping */ public void setOpTaskMap( - HashMap, Task> opTaskMap) { + HashMap, Task> opTaskMap) { this.opTaskMap = opTaskMap; } @@ -305,7 +305,7 @@ public void setMvTask(List> mvTask) { /** * @return root tasks for the plan */ - public List> getRootTasks() { + public List> getRootTasks() { return rootTasks; } @@ -313,11 +313,11 @@ public void setMvTask(List> mvTask) { * @param rootTasks * root tasks for the plan */ - public void setRootTasks(List> rootTasks) { + public void setRootTasks(List> rootTasks) { this.rootTasks = rootTasks; } - public boolean addRootIfPossible(Task task) { + public boolean addRootIfPossible(Task task) { if (task.getParentTasks() == null || task.getParentTasks().isEmpty()) { if (!rootTasks.contains(task)) { return rootTasks.add(task); @@ -345,7 +345,7 @@ public void setMapCurrCtx( /** * @return current task */ - public Task getCurrTask() { + public Task getCurrTask() { return currTask; } @@ -353,7 +353,7 @@ public void setMapCurrCtx( * @param currTask * current task */ - public void setCurrTask(Task currTask) { + public void setCurrTask(Task currTask) { this.currTask = currTask; } @@ -454,12 +454,12 @@ public DependencyCollectionTask getDependencyTaskForMultiInsert() { return dependencyTaskForMultiInsert; } - public Map> getLinkedFileDescTasks() { + public Map> getLinkedFileDescTasks() { return linkedFileDescTasks; } public void setLinkedFileDescTasks( - Map> linkedFileDescTasks) { + Map> linkedFileDescTasks) { this.linkedFileDescTasks = linkedFileDescTasks; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java index 8b30c8226c..033cbdc807 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java @@ -61,7 +61,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, Map, GenMapRedCtx> mapCurrCtx = ctx .getMapCurrCtx(); GenMapRedCtx mapredCtx = mapCurrCtx.get(stack.get(stack.size() - 2)); - Task currTask = mapredCtx.getCurrTask(); + Task currTask = mapredCtx.getCurrTask(); MapredWork currPlan = (MapredWork) currTask.getWork(); String currAliasId = mapredCtx.getCurrAliasId(); @@ -70,7 +70,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, "But found multiple children : " + op.getChildOperators()); } Operator reducer = op.getChildOperators().get(0); - Task oldTask = ctx.getOpTaskMap().get(reducer); + Task oldTask = ctx.getOpTaskMap().get(reducer); ctx.setCurrAliasId(currAliasId); ctx.setCurrTask(currTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java index 35cdc0402a..8c997c7754 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java @@ -56,12 +56,12 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, Map, GenMapRedCtx> mapCurrCtx = ctx .getMapCurrCtx(); GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get(0)); - Task currTask = mapredCtx.getCurrTask(); + Task currTask = mapredCtx.getCurrTask(); String currAliasId = mapredCtx.getCurrAliasId(); Operator reducer = op.getChildOperators().get(0); - Map, Task> opTaskMap = ctx + Map, Task> opTaskMap = ctx .getOpTaskMap(); - Task oldTask = opTaskMap.get(reducer); + Task oldTask = opTaskMap.get(reducer); ctx.setCurrAliasId(currAliasId); ctx.setCurrTask(currTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java index fbab075a47..ed4bb30c08 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java @@ -67,7 +67,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, .getMapCurrCtx(); GenMapRedCtx mapredCtx = mapCurrCtx.get(union); - Task unionTask = null; + Task unionTask = null; if(mapredCtx != null) { unionTask = mapredCtx.getCurrTask(); } else { @@ -76,9 +76,9 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, MapredWork plan = (MapredWork) unionTask.getWork(); - HashMap, Task> opTaskMap = ctx + HashMap, Task> opTaskMap = ctx .getOpTaskMap(); - Task reducerTask = opTaskMap.get(reducer); + Task reducerTask = opTaskMap.get(reducer); ctx.setCurrTask(unionTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java index 8f01507a2f..abf363a348 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java @@ -65,7 +65,7 @@ private Object processMapOnlyUnion(UnionOperator union, Stack stack, GenMRUnionCtx uCtxTask = ctx.getUnionTask(union); if (uCtxTask != null) { // get task associated with this union - Task uTask = ctx.getUnionTask(union).getUTask(); + Task uTask = ctx.getUnionTask(union).getUTask(); if (uTask != null) { if (ctx.getCurrTask() != null && ctx.getCurrTask() != uTask) { // if ctx.getCurrTask() is in rootTasks, should be removed @@ -88,7 +88,7 @@ private Object processMapOnlyUnion(UnionOperator union, Stack stack, ctx.setUnionTask(union, uCtxTask); } - Task uTask = ctx.getCurrTask(); + Task uTask = ctx.getCurrTask(); if (uTask.getParentTasks() == null || uTask.getParentTasks().isEmpty()) { if (!ctx.getRootTasks().contains(uTask)) { @@ -115,7 +115,7 @@ private Object processMapOnlyUnion(UnionOperator union, Stack stack, private void processSubQueryUnionCreateIntermediate( Operator parent, Operator child, - Task uTask, GenMRProcContext ctx, + Task uTask, GenMRProcContext ctx, GenMRUnionCtx uCtxTask) { ParseContext parseCtx = ctx.getParseCtx(); @@ -141,7 +141,7 @@ private void processSubQueryUnionCreateIntermediate( // assembled in the union context and later used to initialize the union // plan - Task currTask = ctx.getCurrTask(); + Task currTask = ctx.getCurrTask(); currTask.addDependentTask(uTask); if (ctx.getRootTasks().contains(uTask)) { ctx.getRootTasks().remove(uTask); @@ -168,7 +168,7 @@ private void processSubQueryUnionMerge(GenMRProcContext ctx, throws SemanticException { // The current plan can be thrown away after being merged with the union // plan - Task uTask = uCtxTask.getUTask(); + Task uTask = uCtxTask.getUTask(); ctx.setCurrTask(uTask); TableScanOperator topOp = ctx.getCurrTopOp(); if (topOp != null && !ctx.isSeenOp(uTask, topOp)) { @@ -220,10 +220,10 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, assert uPrsCtx != null; - Task currTask = ctx.getCurrTask(); + Task currTask = ctx.getCurrTask(); int pos = UnionProcFactory.getPositionParent(union, stack); - Task uTask = null; + Task uTask = null; MapredWork uPlan = null; // union is encountered for the first time @@ -272,7 +272,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx opProcCtx, } private boolean shouldBeRootTask( - Task currTask) { + Task currTask) { return currTask.getParentTasks() == null || (currTask.getParentTasks().size() == 0); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 5d6143d6a4..afbf80c02c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -152,9 +152,9 @@ public static void initPlan(ReduceSinkOperator op, GenMRProcContext opProcCtx) Map, GenMapRedCtx> mapCurrCtx = opProcCtx.getMapCurrCtx(); GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get(0)); - Task currTask = mapredCtx.getCurrTask(); + Task currTask = mapredCtx.getCurrTask(); MapredWork plan = (MapredWork) currTask.getWork(); - HashMap, Task> opTaskMap = + HashMap, Task> opTaskMap = opProcCtx.getOpTaskMap(); TableScanOperator currTopOp = opProcCtx.getCurrTopOp(); @@ -195,11 +195,11 @@ public static void initPlan(ReduceSinkOperator op, GenMRProcContext opProcCtx) */ public static void initUnionPlan(ReduceSinkOperator op, UnionOperator currUnionOp, GenMRProcContext opProcCtx, - Task unionTask) throws SemanticException { + Task unionTask) throws SemanticException { Operator reducer = op.getChildOperators().get(0); MapredWork plan = (MapredWork) unionTask.getWork(); - HashMap, Task> opTaskMap = + HashMap, Task> opTaskMap = opProcCtx.getOpTaskMap(); opTaskMap.put(reducer, unionTask); @@ -219,7 +219,7 @@ public static void initUnionPlan(ReduceSinkOperator op, UnionOperator currUnionO } private static void setUnionPlan(GenMRProcContext opProcCtx, - boolean local, Task currTask, GenMRUnionCtx uCtx, + boolean local, Task currTask, GenMRUnionCtx uCtx, boolean mergeTask) throws SemanticException { TableScanOperator currTopOp = opProcCtx.getCurrTopOp(); @@ -265,7 +265,7 @@ private static void setUnionPlan(GenMRProcContext opProcCtx, * for the union. The plan has already been created. */ public static void initUnionPlan(GenMRProcContext opProcCtx, UnionOperator currUnionOp, - Task currTask, boolean local) + Task currTask, boolean local) throws SemanticException { // In case of lateral views followed by a join, the same tree // can be traversed more than one @@ -281,8 +281,8 @@ public static void initUnionPlan(GenMRProcContext opProcCtx, UnionOperator currU */ public static void joinUnionPlan(GenMRProcContext opProcCtx, UnionOperator currUnionOp, - Task currentUnionTask, - Task existingTask, boolean local) + Task currentUnionTask, + Task existingTask, boolean local) throws SemanticException { assert currUnionOp != null; GenMRUnionCtx uCtx = opProcCtx.getUnionTask(currUnionOp); @@ -290,7 +290,7 @@ public static void joinUnionPlan(GenMRProcContext opProcCtx, setUnionPlan(opProcCtx, local, existingTask, uCtx, true); - List> parTasks = null; + List> parTasks = null; if (opProcCtx.getRootTasks().contains(currentUnionTask)) { opProcCtx.getRootTasks().remove(currentUnionTask); if (!opProcCtx.getRootTasks().contains(existingTask) && @@ -301,17 +301,17 @@ public static void joinUnionPlan(GenMRProcContext opProcCtx, if ((currentUnionTask != null) && (currentUnionTask.getParentTasks() != null) && !currentUnionTask.getParentTasks().isEmpty()) { - parTasks = new ArrayList>(); + parTasks = new ArrayList>(); parTasks.addAll(currentUnionTask.getParentTasks()); Object[] parTaskArr = parTasks.toArray(); for (Object parTask : parTaskArr) { - ((Task) parTask) + ((Task) parTask) .removeDependentTask(currentUnionTask); } } if ((currentUnionTask != null) && (parTasks != null)) { - for (Task parTask : parTasks) { + for (Task parTask : parTasks) { parTask.addDependentTask(existingTask); if (opProcCtx.getRootTasks().contains(existingTask)) { opProcCtx.getRootTasks().remove(existingTask); @@ -332,22 +332,22 @@ public static void joinUnionPlan(GenMRProcContext opProcCtx, * @param opProcCtx * processing context */ - public static void joinPlan(Task currTask, - Task oldTask, GenMRProcContext opProcCtx) + public static void joinPlan(Task currTask, + Task oldTask, GenMRProcContext opProcCtx) throws SemanticException { assert currTask != null && oldTask != null; TableScanOperator currTopOp = opProcCtx.getCurrTopOp(); - List> parTasks = null; + List> parTasks = null; // terminate the old task and make current task dependent on it if (currTask.getParentTasks() != null && !currTask.getParentTasks().isEmpty()) { - parTasks = new ArrayList>(); + parTasks = new ArrayList>(); parTasks.addAll(currTask.getParentTasks()); Object[] parTaskArr = parTasks.toArray(); for (Object element : parTaskArr) { - ((Task) element).removeDependentTask(currTask); + ((Task) element).removeDependentTask(currTask); } } @@ -356,7 +356,7 @@ public static void joinPlan(Task currTask, } if (parTasks != null) { - for (Task parTask : parTasks) { + for (Task parTask : parTasks) { parTask.addDependentTask(oldTask); } } @@ -374,7 +374,7 @@ public static void joinPlan(Task currTask, * If currTopOp is not set for input of the task, add input for to the task */ static boolean mergeInput(TableScanOperator currTopOp, - GenMRProcContext opProcCtx, Task task, boolean local) + GenMRProcContext opProcCtx, Task task, boolean local) throws SemanticException { if (!opProcCtx.isSeenOp(task, currTopOp)) { String currAliasId = opProcCtx.getCurrAliasId(); @@ -389,7 +389,7 @@ static boolean mergeInput(TableScanOperator currTopOp, * Split and link two tasks by temporary file : pRS-FS / TS-cRS-OP */ static void splitPlan(ReduceSinkOperator cRS, - Task parentTask, Task childTask, + Task parentTask, Task childTask, GenMRProcContext opProcCtx) throws SemanticException { assert parentTask != null && childTask != null; splitTasks(cRS, parentTask, childTask, opProcCtx); @@ -408,10 +408,10 @@ static void splitPlan(ReduceSinkOperator cRS, GenMRProcContext opProcCtx) throws SemanticException { // Generate a new task ParseContext parseCtx = opProcCtx.getParseCtx(); - Task parentTask = opProcCtx.getCurrTask(); + Task parentTask = opProcCtx.getCurrTask(); MapredWork childPlan = getMapRedWork(parseCtx); - Task childTask = TaskFactory.get(childPlan); + Task childTask = TaskFactory.get(childPlan); Operator reducer = cRS.getChildOperators().get(0); // Add the reducer @@ -850,12 +850,12 @@ public static void setKeyAndValueDesc(ReduceWork plan, * * @param task */ - public static void setKeyAndValueDescForTaskTree(Task task) { + public static void setKeyAndValueDescForTaskTree(Task task) { if (task instanceof ConditionalTask) { - List> listTasks = ((ConditionalTask) task) + List> listTasks = ((ConditionalTask) task) .getListTasks(); - for (Task tsk : listTasks) { + for (Task tsk : listTasks) { setKeyAndValueDescForTaskTree(tsk); } } else if (task instanceof ExecDriver) { @@ -887,7 +887,7 @@ public static void setKeyAndValueDescForTaskTree(Task ta return; } - for (Task childTask : task.getChildTasks()) { + for (Task childTask : task.getChildTasks()) { setKeyAndValueDescForTaskTree(childTask); } } @@ -919,7 +919,7 @@ public static void setKeyAndValueDescForTaskTree(Task ta * for an older release will also require picking HIVE-17195 at the least. */ public static void finalMapWorkChores( - List> tasks, Configuration conf, + List> tasks, Configuration conf, Interner interner) { List mrTasks = Utilities.getMRTasks(tasks); if (!mrTasks.isEmpty()) { @@ -1062,7 +1062,7 @@ public static TableScanOperator createTemporaryFile( * @param opProcCtx context **/ private static void splitTasks(ReduceSinkOperator op, - Task parentTask, Task childTask, + Task parentTask, Task childTask, GenMRProcContext opProcCtx) throws SemanticException { if (op.getNumParent() != 1) { throw new IllegalStateException("Expecting operator " + op + " to have one parent. " + @@ -1074,7 +1074,7 @@ private static void splitTasks(ReduceSinkOperator op, // Root Task cannot depend on any other task, therefore childTask cannot be // a root Task - List> rootTasks = opProcCtx.getRootTasks(); + List> rootTasks = opProcCtx.getRootTasks(); if (rootTasks.contains(childTask)) { rootTasks.remove(childTask); } @@ -1262,7 +1262,7 @@ public static void replaceMapWork(String sourceAlias, String targetAlias, public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, Path finalName, DependencyCollectionTask dependencyTask, List> mvTasks, HiveConf conf, - Task currTask, LineageState lineageState) + Task currTask, LineageState lineageState) throws SemanticException { // @@ -1404,7 +1404,7 @@ public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, * @param dependencyTask */ private static void linkMoveTask(Task mvTask, - Task task, HiveConf hconf, + Task task, HiveConf hconf, DependencyCollectionTask dependencyTask) { if (task.getDependentTasks() == null || task.getDependentTasks().isEmpty()) { @@ -1412,7 +1412,7 @@ private static void linkMoveTask(Task mvTask, addDependentMoveTasks(mvTask, hconf, task, dependencyTask); } else { // Otherwise, for each child run this method recursively - for (Task childTask : task.getDependentTasks()) { + for (Task childTask : task.getDependentTasks()) { linkMoveTask(mvTask, childTask, hconf, dependencyTask); } } @@ -1430,7 +1430,7 @@ private static void linkMoveTask(Task mvTask, * @param dependencyTask */ public static void addDependentMoveTasks(Task mvTask, HiveConf hconf, - Task parentTask, DependencyCollectionTask dependencyTask) { + Task parentTask, DependencyCollectionTask dependencyTask) { if (mvTask != null) { if (dependencyTask != null) { @@ -1483,7 +1483,7 @@ private static Path getTableLocationPath(final HiveConf hconf, final TableDesc t * HiveConf */ public static void addStatsTask(FileSinkOperator nd, MoveTask mvTask, - Task currTask, HiveConf hconf) { + Task currTask, HiveConf hconf) { MoveWork mvWork = mvTask.getWork(); BasicStatsWork statsWork = null; @@ -1552,7 +1552,7 @@ public static void addStatsTask(FileSinkOperator nd, MoveTask mvTask, columnStatsWork.truncateExisting(truncate); columnStatsWork.setSourceTask(currTask); - Task statsTask = TaskFactory.get(columnStatsWork); + Task statsTask = TaskFactory.get(columnStatsWork); // subscribe feeds from the MoveTask so that MoveTask can forward the list // of dynamic partition list to the StatsTask @@ -1780,7 +1780,7 @@ protected static MoveWork mergeMovePaths(Path condInputPath, MoveWork linkedMove */ @SuppressWarnings("unchecked") private static ConditionalTask createCondTask(HiveConf conf, - Task currTask, MoveWork mvWork, Serializable mergeWork, + Task currTask, MoveWork mvWork, Serializable mergeWork, Path condInputPath, Path condOutputPath, Task moveTaskToLink, DependencyCollectionTask dependencyTask, LineageState lineageState) { if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { @@ -1807,10 +1807,10 @@ private static ConditionalTask createCondTask(HiveConf conf, // conflicts. // TODO: if we are not dealing with concatenate DDL, we should not create a merge+move path // because it should be impossible to get incompatible outputs. - Task mergeOnlyMergeTask = TaskFactory.get(mergeWork); - Task moveOnlyMoveTask = TaskFactory.get(workForMoveOnlyTask); - Task mergeAndMoveMergeTask = TaskFactory.get(mergeWork); - Task mergeAndMoveMoveTask = TaskFactory.get(moveWork); + Task mergeOnlyMergeTask = TaskFactory.get(mergeWork); + Task moveOnlyMoveTask = TaskFactory.get(workForMoveOnlyTask); + Task mergeAndMoveMergeTask = TaskFactory.get(mergeWork); + Task mergeAndMoveMoveTask = TaskFactory.get(moveWork); // NOTE! It is necessary merge task is the parent of the move task, and not // the other way around, for the proper execution of the execute method of @@ -1823,7 +1823,7 @@ private static ConditionalTask createCondTask(HiveConf conf, ConditionalWork cndWork = new ConditionalWork(listWorks); - List> listTasks = new ArrayList>(); + List> listTasks = new ArrayList>(); listTasks.add(moveOnlyMoveTask); listTasks.add(mergeOnlyMergeTask); listTasks.add(mergeAndMoveMergeTask); @@ -1902,7 +1902,7 @@ public static boolean isSkewedStoredAsDirs(FileSinkDesc fsInputDesc) { * Returns true iff the fsOp requires a merge */ public static boolean isMergeRequired(List> mvTasks, HiveConf hconf, - FileSinkOperator fsOp, Task currTask, boolean isInsertTable) { + FileSinkOperator fsOp, Task currTask, boolean isInsertTable) { // Has the user enabled merging of files for map-only jobs or for all jobs if (mvTasks == null || mvTasks.isEmpty()) { return false; @@ -1939,7 +1939,7 @@ public static boolean isMergeRequired(List> mvTasks, HiveConf hco } private static boolean isMergeRequiredForMr(HiveConf hconf, - FileSinkOperator fsOp, Task currTask) { + FileSinkOperator fsOp, Task currTask) { if (fsOp.getConf().isLinkedFileSink()) { // If the user has HIVEMERGEMAPREDFILES set to false, the idea was the // number of reducers are few, so the number of files anyway are small. @@ -1977,7 +1977,7 @@ private static boolean isMergeRequiredForMr(HiveConf hconf, * @param dependencyTask * @return */ - public static Path createMoveTask(Task currTask, boolean chDir, + public static Path createMoveTask(Task currTask, boolean chDir, FileSinkOperator fsOp, ParseContext parseCtx, List> mvTasks, HiveConf hconf, DependencyCollectionTask dependencyTask) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java index 6bf4deb0da..21d792e2ee 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java @@ -146,7 +146,7 @@ public static void setupBucketMapJoinInfo(MapWork plan, * position of the parent */ private static void initMapJoinPlan(AbstractMapJoinOperator op, - Task currTask, + Task currTask, GenMRProcContext opProcCtx, boolean local) throws SemanticException { @@ -171,7 +171,7 @@ private static void initMapJoinPlan(AbstractMapJoinOperator oldTask, + private static void joinMapJoinPlan(Task oldTask, GenMRProcContext opProcCtx, boolean local) throws SemanticException { TableScanOperator currTopOp = opProcCtx.getCurrTopOp(); @@ -199,12 +199,12 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Map, GenMapRedCtx> mapCurrCtx = ctx .getMapCurrCtx(); GenMapRedCtx mapredCtx = mapCurrCtx.get(mapJoin.getParentOperators().get(pos)); - Task currTask = mapredCtx.getCurrTask(); + Task currTask = mapredCtx.getCurrTask(); MapredWork currPlan = (MapredWork) currTask.getWork(); String currAliasId = mapredCtx.getCurrAliasId(); - HashMap, Task> opTaskMap = + HashMap, Task> opTaskMap = ctx.getOpTaskMap(); - Task oldTask = opTaskMap.get(mapJoin); + Task oldTask = opTaskMap.get(mapJoin); ctx.setCurrAliasId(currAliasId); ctx.setCurrTask(currTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AbstractJoinTaskDispatcher.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AbstractJoinTaskDispatcher.java index 4ac256762c..ba05ec7139 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AbstractJoinTaskDispatcher.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AbstractJoinTaskDispatcher.java @@ -46,18 +46,18 @@ public AbstractJoinTaskDispatcher(PhysicalContext context) { physicalContext = context; } - public abstract Task processCurrentTask(MapRedTask currTask, + public abstract Task processCurrentTask(MapRedTask currTask, ConditionalTask conditionalTask, Context context) throws SemanticException; protected void replaceTaskWithConditionalTask( - Task currTask, ConditionalTask cndTsk) { + Task currTask, ConditionalTask cndTsk) { // add this task into task tree // set all parent tasks - List> parentTasks = currTask.getParentTasks(); + List> parentTasks = currTask.getParentTasks(); currTask.setParentTasks(null); if (parentTasks != null) { - for (Task tsk : parentTasks) { + for (Task tsk : parentTasks) { // make new generated task depends on all the parent tasks of current task. tsk.addDependentTask(cndTsk); // remove the current task from its original parent task's dependent task @@ -69,13 +69,13 @@ protected void replaceTaskWithConditionalTask( physicalContext.addToRootTask(cndTsk); } // set all child tasks - List> oldChildTasks = currTask.getChildTasks(); + List> oldChildTasks = currTask.getChildTasks(); if (oldChildTasks != null) { - for (Task tsk : cndTsk.getListTasks()) { + for (Task tsk : cndTsk.getListTasks()) { if (tsk.equals(currTask)) { continue; } - for (Task oldChild : oldChildTasks) { + for (Task oldChild : oldChildTasks) { tsk.addDependentTask(oldChild); } } @@ -85,13 +85,13 @@ protected void replaceTaskWithConditionalTask( // Replace the task with the new task. Copy the children and parents of the old // task to the new task. protected void replaceTask( - Task currTask, Task newTask) { + Task currTask, Task newTask) { // add this task into task tree // set all parent tasks - List> parentTasks = currTask.getParentTasks(); + List> parentTasks = currTask.getParentTasks(); currTask.setParentTasks(null); if (parentTasks != null) { - for (Task tsk : parentTasks) { + for (Task tsk : parentTasks) { // remove the current task from its original parent task's dependent task tsk.removeDependentTask(currTask); // make new generated task depends on all the parent tasks of current task. @@ -104,10 +104,10 @@ protected void replaceTask( } // set all child tasks - List> oldChildTasks = currTask.getChildTasks(); + List> oldChildTasks = currTask.getChildTasks(); currTask.setChildTasks(null); if (oldChildTasks != null) { - for (Task tsk : oldChildTasks) { + for (Task tsk : oldChildTasks) { // remove the current task from its original parent task's dependent task tsk.getParentTasks().remove(currTask); // make new generated task depends on all the parent tasks of current task. @@ -160,21 +160,21 @@ public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) TaskGraphWalkerContext walkerCtx = (TaskGraphWalkerContext) nodeOutputs[0]; - Task currTask = (Task) nd; + Task currTask = (Task) nd; // not map reduce task or not conditional task, just skip if (currTask.isMapRedTask()) { if (currTask instanceof ConditionalTask) { // get the list of task - List> taskList = ((ConditionalTask) currTask).getListTasks(); - for (Task tsk : taskList) { + List> taskList = ((ConditionalTask) currTask).getListTasks(); + for (Task tsk : taskList) { if (tsk.isMapRedTask()) { - Task newTask = this.processCurrentTask((MapRedTask) tsk, + Task newTask = this.processCurrentTask((MapRedTask) tsk, ((ConditionalTask) currTask), physicalContext.getContext()); walkerCtx.addToDispatchList(newTask); } } } else { - Task newTask = + Task newTask = this.processCurrentTask((MapRedTask) currTask, null, physicalContext.getContext()); walkerCtx.addToDispatchList(newTask); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AnnotateRunTimeStatsOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AnnotateRunTimeStatsOptimizer.java index c04b4717e7..2727e809af 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AnnotateRunTimeStatsOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AnnotateRunTimeStatsOptimizer.java @@ -68,7 +68,7 @@ public AnnotateRunTimeStatsDispatcher(PhysicalContext context, Map stack, Object... nodeOutputs) throws SemanticException { - Task currTask = (Task) nd; + Task currTask = (Task) nd; Set> ops = new HashSet<>(); if (currTask instanceof MapRedTask) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java index 0d9d5e0169..9c4a0c24e7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java @@ -197,7 +197,7 @@ private void mergeMapJoinTaskIntoItsChildMapRedTask(MapRedTask mapJoinTask, Conf return; } - Task childTask = mapJoinTask.getChildTasks().get(0); + Task childTask = mapJoinTask.getChildTasks().get(0); if (!(childTask instanceof MapRedTask)) { // Nothing to do if it is not a MapReduce task. return; @@ -334,13 +334,13 @@ private void mergeMapJoinTaskIntoItsChildMapRedTask(MapRedTask mapJoinTask, Conf } // Step 2.4: Remove this MapJoin task - List> parentTasks = mapJoinTask.getParentTasks(); + List> parentTasks = mapJoinTask.getParentTasks(); mapJoinTask.setParentTasks(null); mapJoinTask.setChildTasks(null); childMapRedTask.getParentTasks().remove(mapJoinTask); if (parentTasks != null) { childMapRedTask.getParentTasks().addAll(parentTasks); - for (Task parentTask : parentTasks) { + for (Task parentTask : parentTasks) { parentTask.getChildTasks().remove(mapJoinTask); if (!parentTask.getChildTasks().contains(childMapRedTask)) { parentTask.getChildTasks().add(childMapRedTask); @@ -371,7 +371,7 @@ public static boolean cannotConvert(long aliasKnownSize, } @Override - public Task processCurrentTask(MapRedTask currTask, + public Task processCurrentTask(MapRedTask currTask, ConditionalTask conditionalTask, Context context) throws SemanticException { @@ -386,12 +386,12 @@ public static boolean cannotConvert(long aliasKnownSize, // create conditional work list and task list List listWorks = new ArrayList(); - List> listTasks = new ArrayList>(); + List> listTasks = new ArrayList>(); // create task to aliases mapping and alias to input file mapping for resolver // Must be deterministic order map for consistent q-test output across Java versions - HashMap, Set> taskToAliases = - new LinkedHashMap, Set>(); + HashMap, Set> taskToAliases = + new LinkedHashMap, Set>(); Map> pathToAliases = currWork.getPathToAliases(); Map> aliasToWork = currWork.getAliasToWork(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CrossProductHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CrossProductHandler.java index 71d060a295..50b9c2bb6a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CrossProductHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CrossProductHandler.java @@ -101,16 +101,16 @@ public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) throws SemanticException { @SuppressWarnings("unchecked") - Task currTask = (Task) nd; + Task currTask = (Task) nd; if (currTask instanceof MapRedTask) { MapRedTask mrTsk = (MapRedTask)currTask; MapredWork mrWrk = mrTsk.getWork(); checkMapJoins(mrTsk); checkMRReducer(currTask.toString(), mrWrk); } else if (currTask instanceof ConditionalTask ) { - List> taskListInConditionalTask = + List> taskListInConditionalTask = ((ConditionalTask) currTask).getListTasks(); - for(Task tsk: taskListInConditionalTask){ + for(Task tsk: taskListInConditionalTask){ dispatch(tsk, stack, nodeOutputs); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java index f7cedfe3be..5be1329546 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java @@ -108,7 +108,7 @@ private GenMRSkewJoinProcessor() { */ @SuppressWarnings("unchecked") public static void processSkewJoin(JoinOperator joinOp, - Task currTask, ParseContext parseCtx) + Task currTask, ParseContext parseCtx) throws SemanticException { // We are trying to adding map joins to handle skew keys, and map join right @@ -117,7 +117,7 @@ public static void processSkewJoin(JoinOperator joinOp, return; } - List> children = currTask.getChildTasks(); + List> children = currTask.getChildTasks(); Path baseTmpDir = parseCtx.getContext().getMRTmpPath(); @@ -149,10 +149,10 @@ public static void processSkewJoin(JoinOperator joinOp, joinDescriptor.setSkewKeyDefinition(HiveConf.getIntVar(parseCtx.getConf(), HiveConf.ConfVars.HIVESKEWJOINKEY)); - HashMap> bigKeysDirToTaskMap = - new HashMap>(); + HashMap> bigKeysDirToTaskMap = + new HashMap>(); List listWorks = new ArrayList(); - List> listTasks = new ArrayList>(); + List> listTasks = new ArrayList>(); MapredWork currPlan = (MapredWork) currTask.getWork(); TableDesc keyTblDesc = (TableDesc) currPlan.getReduceWork().getKeyDesc().clone(); @@ -331,7 +331,7 @@ public static void processSkewJoin(JoinOperator joinOp, MapredWork w = new MapredWork(); w.setMapWork(newPlan); - Task skewJoinMapJoinTask = TaskFactory.get(w); + Task skewJoinMapJoinTask = TaskFactory.get(w); skewJoinMapJoinTask.setFetchSource(currTask.isFetchSource()); bigKeysDirToTaskMap.put(bigKeyDirPath, skewJoinMapJoinTask); @@ -339,13 +339,13 @@ public static void processSkewJoin(JoinOperator joinOp, listTasks.add(skewJoinMapJoinTask); } if (children != null) { - for (Task tsk : listTasks) { - for (Task oldChild : children) { + for (Task tsk : listTasks) { + for (Task oldChild : children) { tsk.addDependentTask(oldChild); } } - currTask.setChildTasks(new ArrayList>()); - for (Task oldChild : children) { + currTask.setChildTasks(new ArrayList>()); + for (Task oldChild : children) { oldChild.getParentTasks().remove(currTask); } listTasks.addAll(children); @@ -358,7 +358,7 @@ public static void processSkewJoin(JoinOperator joinOp, cndTsk.setListTasks(listTasks); cndTsk.setResolver(new ConditionalResolverSkewJoin()); cndTsk.setResolverCtx(context); - currTask.setChildTasks(new ArrayList>()); + currTask.setChildTasks(new ArrayList>()); currTask.addDependentTask(cndTsk); return; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java index 7f7f49ba8c..8f96fd62b2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java @@ -82,7 +82,7 @@ private GenSparkSkewJoinProcessor() { } @SuppressWarnings("unchecked") - public static void processSkewJoin(JoinOperator joinOp, Task currTask, + public static void processSkewJoin(JoinOperator joinOp, Task currTask, ReduceWork reduceWork, ParseContext parseCtx) throws SemanticException { SparkWork currentWork = ((SparkTask) currTask).getWork(); @@ -91,7 +91,7 @@ public static void processSkewJoin(JoinOperator joinOp, Task> children = currTask.getChildTasks(); + List> children = currTask.getChildTasks(); Path baseTmpDir = parseCtx.getContext().getMRTmpPath(); @@ -214,16 +214,16 @@ public static void processSkewJoin(JoinOperator joinOp, Task> bigKeysDirToTaskMap = - new HashMap>(); + HashMap> bigKeysDirToTaskMap = + new HashMap>(); List listWorks = new ArrayList(); - List> listTasks = new ArrayList>(); + List> listTasks = new ArrayList>(); for (int i = 0; i < numAliases - 1; i++) { Byte src = tags[i]; HiveConf hiveConf = new HiveConf(parseCtx.getConf(), GenSparkSkewJoinProcessor.class); SparkWork sparkWork = new SparkWork(parseCtx.getConf().getVar(HiveConf.ConfVars.HIVEQUERYID)); - Task skewJoinMapJoinTask = TaskFactory.get(sparkWork); + Task skewJoinMapJoinTask = TaskFactory.get(sparkWork); skewJoinMapJoinTask.setFetchSource(currTask.isFetchSource()); // create N TableScans @@ -328,17 +328,17 @@ public static void processSkewJoin(JoinOperator joinOp, Task tsk : listTasks) { - for (Task oldChild : children) { + for (Task tsk : listTasks) { + for (Task oldChild : children) { tsk.addDependentTask(oldChild); } } - currTask.setChildTasks(new ArrayList>()); - for (Task oldChild : children) { + currTask.setChildTasks(new ArrayList>()); + for (Task oldChild : children) { oldChild.getParentTasks().remove(currTask); } listTasks.addAll(children); - for (Task oldChild : children) { + for (Task oldChild : children) { listWorks.add(oldChild.getWork()); } } @@ -350,7 +350,7 @@ public static void processSkewJoin(JoinOperator joinOp, Task>()); + currTask.setChildTasks(new ArrayList>()); currTask.addDependentTask(cndTsk); } @@ -397,7 +397,7 @@ private static void insertSHTS(byte tag, TableScanOperator tableScan, MapWork bi hashTableSinkOp.getConf().setTag(tag); } - private static void setMemUsage(MapJoinOperator mapJoinOp, Task task, + private static void setMemUsage(MapJoinOperator mapJoinOp, Task task, ParseContext parseContext) { MapJoinResolver.LocalMapJoinProcCtx context = new MapJoinResolver.LocalMapJoinProcCtx(task, parseContext); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java index 2f2f04f846..6c370f6e6b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java @@ -139,7 +139,7 @@ public LlapDecisionDispatcher(PhysicalContext pctx, LlapMode mode) { public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) throws SemanticException { @SuppressWarnings("unchecked") - Task currTask = (Task) nd; + Task currTask = (Task) nd; if (currTask instanceof TezTask) { TezWork work = ((TezTask) currTask).getWork(); for (BaseWork w: work.getAllWork()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapPreVectorizationPass.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapPreVectorizationPass.java index ec066efae4..16fbe95e2e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapPreVectorizationPass.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapPreVectorizationPass.java @@ -82,7 +82,7 @@ public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) throws SemanticException { @SuppressWarnings("unchecked") - Task currTask = (Task) nd; + Task currTask = (Task) nd; if (currTask instanceof TezTask) { TezWork work = ((TezTask) currTask).getWork(); for (BaseWork w: work.getAllWork()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MapJoinResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MapJoinResolver.java index eac4768951..484369b007 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MapJoinResolver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MapJoinResolver.java @@ -96,7 +96,7 @@ public LocalMapJoinTaskDispatcher(PhysicalContext context) { physicalContext = context; } - private void processCurrentTask(Task currTask, + private void processCurrentTask(Task currTask, ConditionalTask conditionalTask) throws SemanticException { // get current mapred work and its local work MapredWork mapredWork = (MapredWork) currTask.getWork(); @@ -145,10 +145,10 @@ private void processCurrentTask(Task currTask, } newLocalWork.setHasStagedAlias(true); // get all parent tasks - List> parentTasks = currTask.getParentTasks(); + List> parentTasks = currTask.getParentTasks(); currTask.setParentTasks(null); if (parentTasks != null) { - for (Task tsk : parentTasks) { + for (Task tsk : parentTasks) { // make new generated task depends on all the parent tasks of current task. tsk.addDependentTask(localTask); // remove the current task from its original parent task's dependent task @@ -162,7 +162,7 @@ private void processCurrentTask(Task currTask, physicalContext.removeFromRootTask(currTask); } else { // set list task - List> listTask = conditionalTask.getListTasks(); + List> listTask = conditionalTask.getListTasks(); ConditionalWork conditionalWork = conditionalTask.getWork(); int index = listTask.indexOf(currTask); listTask.set(index, localTask); @@ -176,14 +176,14 @@ private void processCurrentTask(Task currTask, // get bigKeysDirToTaskMap ConditionalResolverSkewJoinCtx context = (ConditionalResolverSkewJoinCtx) conditionalTask .getResolverCtx(); - HashMap> bigKeysDirToTaskMap = context + HashMap> bigKeysDirToTaskMap = context .getDirToTaskMap(); // to avoid concurrent modify the hashmap - HashMap> newbigKeysDirToTaskMap = new HashMap>(); + HashMap> newbigKeysDirToTaskMap = new HashMap>(); // reset the resolver - for (Map.Entry> entry : bigKeysDirToTaskMap + for (Map.Entry> entry : bigKeysDirToTaskMap .entrySet()) { - Task task = entry.getValue(); + Task task = entry.getValue(); Path key = entry.getKey(); if (task.equals(currTask)) { newbigKeysDirToTaskMap.put(key, localTask); @@ -197,14 +197,14 @@ private void processCurrentTask(Task currTask, // get bigKeysDirToTaskMap ConditionalResolverCommonJoinCtx context = (ConditionalResolverCommonJoinCtx) conditionalTask .getResolverCtx(); - HashMap, Set> taskToAliases = context.getTaskToAliases(); + HashMap, Set> taskToAliases = context.getTaskToAliases(); // to avoid concurrent modify the hashmap // Must be deterministic order map for consistent q-test output across Java versions - HashMap, Set> newTaskToAliases = - new LinkedHashMap, Set>(); + HashMap, Set> newTaskToAliases = + new LinkedHashMap, Set>(); // reset the resolver - for (Map.Entry, Set> entry : taskToAliases.entrySet()) { - Task task = entry.getKey(); + for (Map.Entry, Set> entry : taskToAliases.entrySet()) { + Task task = entry.getKey(); Set key = new HashSet(entry.getValue()); if (task.equals(currTask)) { @@ -227,13 +227,13 @@ private void processCurrentTask(Task currTask, @Override public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) throws SemanticException { - Task currTask = (Task) nd; + Task currTask = (Task) nd; // not map reduce task or not conditional task, just skip if (currTask.isMapRedTask()) { if (currTask instanceof ConditionalTask) { // get the list of task - List> taskList = ((ConditionalTask) currTask).getListTasks(); - for (Task tsk : taskList) { + List> taskList = ((ConditionalTask) currTask).getListTasks(); + for (Task tsk : taskList) { if (tsk.isMapRedTask()) { this.processCurrentTask(tsk, ((ConditionalTask) currTask)); } @@ -278,14 +278,14 @@ public void setPhysicalContext(PhysicalContext physicalContext) { * A container of current task and parse context. */ public static class LocalMapJoinProcCtx implements NodeProcessorCtx { - private Task currentTask; + private Task currentTask; private ParseContext parseCtx; private List> dummyParentOp = null; private boolean isFollowedByGroupBy; private Map>> directWorks; - public LocalMapJoinProcCtx(Task task, ParseContext parseCtx) { + public LocalMapJoinProcCtx(Task task, ParseContext parseCtx) { currentTask = task; this.parseCtx = parseCtx; dummyParentOp = new ArrayList>(); @@ -293,11 +293,11 @@ public LocalMapJoinProcCtx(Task task, ParseContext parse isFollowedByGroupBy = false; } - public Task getCurrentTask() { + public Task getCurrentTask() { return currentTask; } - public void setCurrentTask(Task currentTask) { + public void setCurrentTask(Task currentTask) { this.currentTask = currentTask; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java index 64f1e7b830..11dba77728 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java @@ -84,7 +84,7 @@ public MemoryCalculator(PhysicalContext pctx) { @Override public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) throws SemanticException { - Task currTask = (Task) nd; + Task currTask = (Task) nd; if (currTask instanceof StatsTask) { currTask = ((StatsTask) currTask).getWork().getSourceTask(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanTaskDispatcher.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanTaskDispatcher.java index b7dd90d4fe..2fb666da5e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanTaskDispatcher.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanTaskDispatcher.java @@ -168,7 +168,7 @@ private String encode(Map partSpec) { @Override public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) throws SemanticException { - Task task = (Task) nd; + Task task = (Task) nd; // create a the context for walking operators ParseContext parseContext = physicalContext.getParseContext(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalContext.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalContext.java index 321dcbe1b6..18e1ceb566 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalContext.java @@ -34,12 +34,12 @@ protected HiveConf conf; private ParseContext parseContext; private Context context; - protected List> rootTasks; - protected Task fetchTask; + protected List> rootTasks; + protected Task fetchTask; public PhysicalContext(HiveConf conf, ParseContext parseContext, - Context context, List> rootTasks, - Task fetchTask) { + Context context, List> rootTasks, + Task fetchTask) { super(); this.conf = conf; this.parseContext = parseContext; @@ -72,27 +72,27 @@ public void setContext(Context context) { this.context = context; } - public List> getRootTasks() { + public List> getRootTasks() { return rootTasks; } - public void setRootTasks(List> rootTasks) { + public void setRootTasks(List> rootTasks) { this.rootTasks = rootTasks; } - public Task getFetchTask() { + public Task getFetchTask() { return fetchTask; } - public void setFetchTask(Task fetchTask) { + public void setFetchTask(Task fetchTask) { this.fetchTask = fetchTask; } - public void addToRootTask(Task tsk){ + public void addToRootTask(Task tsk){ rootTasks.add(tsk); } - public void removeFromRootTask(Task tsk){ + public void removeFromRootTask(Task tsk){ rootTasks.remove(tsk); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SerializeFilter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SerializeFilter.java index 922435077c..c15d05082f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SerializeFilter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SerializeFilter.java @@ -68,7 +68,7 @@ public Serializer(PhysicalContext pctx) { @Override public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) throws SemanticException { - Task currTask = (Task) nd; + Task currTask = (Task) nd; if (currTask instanceof StatsTask) { currTask = ((StatsTask) currTask).getWork().getSourceTask(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinProcFactory.java index 4f76b19283..093226c6c3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinProcFactory.java @@ -56,7 +56,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, return null; } ParseContext parseContext = context.getParseCtx(); - Task currentTsk = context.getCurrentTask(); + Task currentTsk = context.getCurrentTask(); GenMRSkewJoinProcessor.processSkewJoin(op, currentTsk, parseContext); return null; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java index bca6781183..3cabd38360 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java @@ -76,7 +76,7 @@ public SkewJoinTaskDispatcher(PhysicalContext context) { @Override public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) throws SemanticException { - Task task = (Task) nd; + Task task = (Task) nd; if (!task.isMapRedTask() || task instanceof ConditionalTask || ((MapredWork) task.getWork()).getReduceWork() == null) { @@ -140,20 +140,20 @@ public void setPhysicalContext(PhysicalContext physicalContext) { * A container of current task and parse context. */ public static class SkewJoinProcCtx implements NodeProcessorCtx { - private Task currentTask; + private Task currentTask; private ParseContext parseCtx; - public SkewJoinProcCtx(Task task, + public SkewJoinProcCtx(Task task, ParseContext parseCtx) { currentTask = task; this.parseCtx = parseCtx; } - public Task getCurrentTask() { + public Task getCurrentTask() { return currentTask; } - public void setCurrentTask(Task currentTask) { + public void setCurrentTask(Task currentTask) { this.currentTask = currentTask; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java index ebf17085fc..fbf6852013 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java @@ -223,7 +223,7 @@ private boolean isEligibleForOptimization(SMBMapJoinOperator originalSMBJoinOp) } @Override - public Task processCurrentTask(MapRedTask currTask, + public Task processCurrentTask(MapRedTask currTask, ConditionalTask conditionalTask, Context context) throws SemanticException { // whether it contains a sort merge join operator @@ -252,12 +252,12 @@ private boolean isEligibleForOptimization(SMBMapJoinOperator originalSMBJoinOp) // create conditional work list and task list List listWorks = new ArrayList(); - List> listTasks = new ArrayList>(); + List> listTasks = new ArrayList>(); // create task to aliases mapping and alias to input file mapping for resolver // Must be deterministic order map for consistent q-test output across Java versions - HashMap, Set> taskToAliases = - new LinkedHashMap, Set>(); + HashMap, Set> taskToAliases = + new LinkedHashMap, Set>(); // Note that pathToAlias will behave as if the original plan was a join plan Map> pathToAliases = currJoinWork.getMapWork().getPathToAliases(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkCrossProductCheck.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkCrossProductCheck.java index 50245963a0..80248d62b0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkCrossProductCheck.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkCrossProductCheck.java @@ -59,14 +59,14 @@ public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) throws SemanticException { @SuppressWarnings("unchecked") - Task currTask = (Task) nd; + Task currTask = (Task) nd; if (currTask instanceof SparkTask) { SparkWork sparkWork = ((SparkTask) currTask).getWork(); checkShuffleJoin(sparkWork); checkMapJoin((SparkTask) currTask); } else if (currTask instanceof ConditionalTask) { - List> taskList = ((ConditionalTask) currTask).getListTasks(); - for (Task task : taskList) { + List> taskList = ((ConditionalTask) currTask).getListTasks(); + for (Task task : taskList) { dispatch(task, stack, nodeOutputs); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkDynamicPartitionPruningResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkDynamicPartitionPruningResolver.java index 124138361b..98d9c6a4f4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkDynamicPartitionPruningResolver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkDynamicPartitionPruningResolver.java @@ -81,7 +81,7 @@ public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { @Override public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) throws SemanticException { - Task task = (Task) nd; + Task task = (Task) nd; // If the given Task is a SparkTask then search its Work DAG for SparkPartitionPruningSinkOperator if (task instanceof SparkTask) { @@ -124,12 +124,12 @@ public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) throws * Recursively go through the children of the given {@link Task} and check if any child {@link SparkTask} contains * the specified {@link MapWork} object. */ - private boolean taskContainsDependentMapWork(Task task, + private boolean taskContainsDependentMapWork(Task task, MapWork work) throws SemanticException { if (task == null || task.getChildTasks() == null) { return false; } - for (Task childTask : task.getChildTasks()) { + for (Task childTask : task.getChildTasks()) { if (childTask != null && childTask instanceof SparkTask && childTask.getMapWork().contains(work)) { return true; } else if (taskContainsDependentMapWork(childTask, work)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java index 55b46e5029..97cb4a4857 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java @@ -61,7 +61,7 @@ public class SparkMapJoinResolver implements PhysicalPlanResolver { // prevents a task from being processed multiple times - private final Set> visitedTasks = new HashSet<>(); + private final Set> visitedTasks = new HashSet<>(); @Override public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { @@ -245,11 +245,11 @@ private SparkTask createSparkTask(SparkTask originalTask, } } else { if (originalTask != resultTask) { - List> parentTasks = originalTask.getParentTasks(); + List> parentTasks = originalTask.getParentTasks(); if (parentTasks != null && parentTasks.size() > 0) { // avoid concurrent modification - originalTask.setParentTasks(new ArrayList>()); - for (Task parentTask : parentTasks) { + originalTask.setParentTasks(new ArrayList>()); + for (Task parentTask : parentTasks) { parentTask.addDependentTask(resultTask); parentTask.removeDependentTask(originalTask); } @@ -271,12 +271,12 @@ private SparkTask createSparkTask(SparkTask originalTask, @Override public Object dispatch(Node nd, Stack stack, Object... nos) throws SemanticException { - Task currentTask = (Task) nd; + Task currentTask = (Task) nd; if(currentTask.isMapRedTask()) { if (currentTask instanceof ConditionalTask) { - List> taskList = + List> taskList = ((ConditionalTask) currentTask).getListTasks(); - for (Task tsk : taskList) { + for (Task tsk : taskList) { if (tsk instanceof SparkTask) { processCurrentTask((SparkTask) tsk, (ConditionalTask) currentTask); visitedTasks.add(tsk); @@ -350,7 +350,7 @@ private void updateConditionalTask(ConditionalTask conditionalTask, ConditionalWork conditionalWork = conditionalTask.getWork(); SparkWork originWork = originalTask.getWork(); SparkWork newWork = newTask.getWork(); - List> listTask = conditionalTask.getListTasks(); + List> listTask = conditionalTask.getListTasks(); List listWork = (List) conditionalWork.getListWorks(); int taskIndex = listTask.indexOf(originalTask); int workIndex = listWork.indexOf(originWork); @@ -365,15 +365,15 @@ private void updateConditionalTask(ConditionalTask conditionalTask, ConditionalResolverSkewJoin.ConditionalResolverSkewJoinCtx context = (ConditionalResolverSkewJoin.ConditionalResolverSkewJoinCtx) conditionalTask .getResolverCtx(); - HashMap> bigKeysDirToTaskMap = context + HashMap> bigKeysDirToTaskMap = context .getDirToTaskMap(); // to avoid concurrent modify the hashmap - HashMap> newbigKeysDirToTaskMap = - new HashMap>(); + HashMap> newbigKeysDirToTaskMap = + new HashMap>(); // reset the resolver - for (Map.Entry> entry : + for (Map.Entry> entry : bigKeysDirToTaskMap.entrySet()) { - Task task = entry.getValue(); + Task task = entry.getValue(); Path bigKeyDir = entry.getKey(); if (task.equals(originalTask)) { newbigKeysDirToTaskMap.put(bigKeyDir, newTask); @@ -384,7 +384,7 @@ private void updateConditionalTask(ConditionalTask conditionalTask, context.setDirToTaskMap(newbigKeysDirToTaskMap); // update no skew task if (context.getNoSkewTask() != null && context.getNoSkewTask().equals(originalTask)) { - List> noSkewTask = new ArrayList<>(); + List> noSkewTask = new ArrayList<>(); noSkewTask.add(newTask); context.setNoSkewTask(noSkewTask); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/StageIDsRearranger.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/StageIDsRearranger.java index 00de47b6d9..6c874754a1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/StageIDsRearranger.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/StageIDsRearranger.java @@ -70,14 +70,14 @@ protected void accepted(Task task) { } } }; - for (Task task : tasks) { + for (Task task : tasks) { traverse.traverse(task); } return sources; } public static List getExplainOrder(HiveConf conf, List> tasks) { - for (Task task : tasks) { + for (Task task : tasks) { task.setRootTask(true); } String var = conf.getVar(HiveConf.ConfVars.HIVESTAGEIDREARRANGE); @@ -122,7 +122,7 @@ protected boolean isReady(Task task) { return type == ArrangeType.NONE || type == ArrangeType.IDONLY || super.isReady(task); } }; - for (Task task : tasks) { + for (Task task : tasks) { traverse.traverse(task); } return new ArrayList(traverse.traversed); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java index b650299a9a..68bc5675a4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java @@ -990,7 +990,7 @@ private void fixupOtherParent( @Override public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) throws SemanticException { - Task currTask = (Task) nd; + Task currTask = (Task) nd; if (currTask instanceof MapRedTask) { MapredWork mapredWork = ((MapRedTask) currTask).getWork(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSkewJoinProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSkewJoinProcFactory.java index a7c18b0f20..54b25506f9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSkewJoinProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSkewJoinProcFactory.java @@ -83,7 +83,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { SparkSkewJoinResolver.SparkSkewJoinProcCtx context = (SparkSkewJoinResolver.SparkSkewJoinProcCtx) procCtx; - Task currentTsk = context.getCurrentTask(); + Task currentTsk = context.getCurrentTask(); JoinOperator op = (JoinOperator) nd; ReduceWork reduceWork = context.getReducerToReduceWork().get(op); ParseContext parseContext = context.getParseCtx(); @@ -170,11 +170,11 @@ private static void splitTask(SparkTask currentTask, ReduceWork reduceWork, tableScanOp, mapWork, false, tableDesc); // insert the new task between current task and its child @SuppressWarnings("unchecked") - Task newTask = TaskFactory.get(newWork); - List> childTasks = currentTask.getChildTasks(); + Task newTask = TaskFactory.get(newWork); + List> childTasks = currentTask.getChildTasks(); // must have at most one child if (childTasks != null && childTasks.size() > 0) { - Task childTask = childTasks.get(0); + Task childTask = childTasks.get(0); currentTask.removeDependentTask(childTask); newTask.addDependentTask(childTask); } @@ -224,11 +224,11 @@ private static void copyWorkGraph(SparkWork originWork, SparkWork newWork, BaseW } private static boolean supportRuntimeSkewJoin(JoinOperator joinOp, ReduceWork reduceWork, - Task currTask, HiveConf hiveConf) { + Task currTask, HiveConf hiveConf) { if (currTask instanceof SparkTask && GenMRSkewJoinProcessor.skewJoinEnabled(hiveConf, joinOp)) { SparkWork sparkWork = ((SparkTask) currTask).getWork(); - List> children = currTask.getChildTasks(); + List> children = currTask.getChildTasks(); return !joinOp.getConf().isFixedAsSorted() && sparkWork.contains(reduceWork) && (children == null || children.size() <= 1) && OperatorUtils.getOp(reduceWork, CommonJoinOperator.class).size() == 1; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSkewJoinResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSkewJoinResolver.java index 089438ae29..fbd53e0bb9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSkewJoinResolver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSkewJoinResolver.java @@ -77,7 +77,7 @@ public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) throws SemanticException { @SuppressWarnings("unchecked") - Task task = (Task) nd; + Task task = (Task) nd; if (task instanceof SparkTask) { SparkWork sparkWork = ((SparkTask) task).getWork(); SparkSkewJoinProcCtx skewJoinProcCtx = @@ -114,7 +114,7 @@ public void setPhysicalContext(PhysicalContext physicalContext) { // need a map from the reducer to the corresponding ReduceWork private Map, ReduceWork> reducerToReduceWork; - public SparkSkewJoinProcCtx(Task task, + public SparkSkewJoinProcCtx(Task task, ParseContext parseCtx) { super(task, parseCtx); reducerToReduceWork = new HashMap, ReduceWork>(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SplitSparkWorkResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SplitSparkWorkResolver.java index 3e1f85d11f..4d0331df8a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SplitSparkWorkResolver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SplitSparkWorkResolver.java @@ -54,7 +54,7 @@ public class SplitSparkWorkResolver implements PhysicalPlanResolver { @Override public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { - for (Task task : pctx.getRootTasks()) { + for (Task task : pctx.getRootTasks()) { if (task instanceof SparkTask) { splitSparkWork(((SparkTask) task).getWork()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/AbstractSemanticAnalyzerHook.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/AbstractSemanticAnalyzerHook.java index 32b89a7299..4971b066ce 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/AbstractSemanticAnalyzerHook.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/AbstractSemanticAnalyzerHook.java @@ -32,6 +32,6 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context,ASTNode ast) } public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 370697380e..ec3d9d0c56 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -120,7 +120,7 @@ protected final Hive db; protected final HiveConf conf; protected final QueryState queryState; - protected List> rootTasks; + protected List> rootTasks; protected FetchTask fetchTask; protected final Logger LOG; protected final LogHelper console; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java index 76c69cf24b..66cd6977a6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java @@ -93,11 +93,11 @@ private Hive db; private HashSet inputs; private HashSet outputs; - private List> tasks; + private List> tasks; private Logger LOG; private Context ctx; private DumpType eventType = DumpType.EVENT_UNKNOWN; - private Task openTxnTask = null; + private Task openTxnTask = null; public HiveConf getConf() { return conf; @@ -115,7 +115,7 @@ public Hive getHive() { return outputs; } - public List> getTasks() { + public List> getTasks() { return tasks; } @@ -138,7 +138,7 @@ public DumpType getEventType() { public SemanticAnalyzerWrapperContext(HiveConf conf, Hive db, HashSet inputs, HashSet outputs, - List> tasks, + List> tasks, Logger LOG, Context ctx){ this.conf = conf; this.db = db; @@ -149,10 +149,10 @@ public SemanticAnalyzerWrapperContext(HiveConf conf, Hive db, this.ctx = ctx; } - public Task getOpenTxnTask() { + public Task getOpenTxnTask() { return openTxnTask; } - public void setOpenTxnTask(Task openTxnTask) { + public void setOpenTxnTask(Task openTxnTask) { this.openTxnTask = openTxnTask; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java index f977fc1b59..2c0d21a852 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java @@ -68,7 +68,7 @@ public final List> moveTask; // rootTasks is the entry point for all generated tasks - public final List> rootTasks; + public final List> rootTasks; public final Set inputs; public final Set outputs; @@ -164,7 +164,7 @@ @SuppressWarnings("unchecked") public GenTezProcContext(HiveConf conf, ParseContext parseContext, - List> moveTask, List> rootTasks, + List> moveTask, List> rootTasks, Set inputs, Set outputs) { this.conf = conf; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHook.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHook.java index 08aa7e0589..65d08d752d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHook.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHook.java @@ -72,5 +72,5 @@ public ASTNode preAnalyze( */ public void postAnalyze( HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException; + List> rootTasks) throws SemanticException; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index e955989d92..ec75fa4e9a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -548,7 +548,7 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName, return TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), dropTblDesc), x.getConf()); } - private static Task alterTableTask(ImportTableDesc tableDesc, + private static Task alterTableTask(ImportTableDesc tableDesc, EximUtil.SemanticAnalyzerWrapperContext x, ReplicationSpec replicationSpec) { tableDesc.setReplaceMode(true); @@ -558,7 +558,7 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName, return tableDesc.getCreateTableTask(x.getInputs(), x.getOutputs(), x.getConf()); } - private static Task alterSinglePartition( + private static Task alterSinglePartition( ImportTableDesc tblDesc, Table table, Warehouse wh, AlterTableAddPartitionDesc addPartitionDesc, ReplicationSpec replicationSpec, org.apache.hadoop.hive.ql.metadata.Partition ptn, EximUtil.SemanticAnalyzerWrapperContext x) throws MetaException, IOException, HiveException { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java index 42637dfa77..bcba4d7670 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java @@ -93,7 +93,7 @@ public void init(QueryState queryState, LogHelper console, Hive db) { // loop over all the tasks recursively @Override - protected void setInputFormat(Task task) { + protected void setInputFormat(Task task) { if (task instanceof ExecDriver) { MapWork work = ((MapredWork) task.getWork()).getMapWork(); Map> opMap = work.getAliasToWork(); @@ -103,15 +103,15 @@ protected void setInputFormat(Task task) { } } } else if (task instanceof ConditionalTask) { - List> listTasks + List> listTasks = ((ConditionalTask) task).getListTasks(); - for (Task tsk : listTasks) { + for (Task tsk : listTasks) { setInputFormat(tsk); } } if (task.getChildTasks() != null) { - for (Task childTask : task.getChildTasks()) { + for (Task childTask : task.getChildTasks()) { setInputFormat(childTask); } } @@ -137,7 +137,7 @@ private void setInputFormat(MapWork work, Operator op) { } // loop over all the tasks recursively - private void breakTaskTree(Task task) { + private void breakTaskTree(Task task) { if (task instanceof ExecDriver) { Map> opMap = @@ -148,9 +148,9 @@ private void breakTaskTree(Task task) { } } } else if (task instanceof ConditionalTask) { - List> listTasks = ((ConditionalTask) task) + List> listTasks = ((ConditionalTask) task) .getListTasks(); - for (Task tsk : listTasks) { + for (Task tsk : listTasks) { breakTaskTree(tsk); } } @@ -159,7 +159,7 @@ private void breakTaskTree(Task task) { return; } - for (Task childTask : task.getChildTasks()) { + for (Task childTask : task.getChildTasks()) { breakTaskTree(childTask); } } @@ -191,7 +191,7 @@ private static int getNumberOfReducers(MapredWork mrwork, HiveConf conf) { } @Override - protected void decideExecMode(List> rootTasks, Context ctx, + protected void decideExecMode(List> rootTasks, Context ctx, GlobalLimitCtx globalLimitCtx) throws SemanticException { @@ -271,13 +271,13 @@ public boolean accept(Path file) { } @Override - protected void optimizeTaskPlan(List> rootTasks, + protected void optimizeTaskPlan(List> rootTasks, ParseContext pCtx, Context ctx) throws SemanticException { // reduce sink does not have any kids - since the plan by now has been // broken up into multiple // tasks, iterate over all tasks. // For each task, go over all operators recursively - for (Task rootTask : rootTasks) { + for (Task rootTask : rootTasks) { breakTaskTree(rootTask); } @@ -291,7 +291,7 @@ protected void optimizeTaskPlan(List> rootTasks, } @Override - protected void generateTaskTree(List> rootTasks, ParseContext pCtx, + protected void generateTaskTree(List> rootTasks, ParseContext pCtx, List> mvTask, Set inputs, Set outputs) throws SemanticException { // generate map reduce plans @@ -299,7 +299,7 @@ protected void generateTaskTree(List> rootTasks, Pa GenMRProcContext procCtx = new GenMRProcContext( conf, // Must be deterministic order map for consistent q-test output across Java versions - new LinkedHashMap, Task>(), + new LinkedHashMap, Task>(), tempParseContext, mvTask, rootTasks, new LinkedHashMap, GenMapRedCtx>(), inputs, outputs); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java index 67b4901b0c..feecda5bdf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java @@ -110,7 +110,7 @@ private GlobalLimitCtx globalLimitCtx; private HashSet semanticInputs; - private List> rootTasks; + private List> rootTasks; private FetchTask fetchTask; private QueryProperties queryProperties; @@ -194,7 +194,7 @@ public ParseContext( HashMap opToSamplePruner, GlobalLimitCtx globalLimitCtx, HashMap nameToSplitSample, - HashSet semanticInputs, List> rootTasks, + HashSet semanticInputs, List> rootTasks, Map> opToPartToSkewedPruner, Map viewAliasToInput, List reduceSinkOperatorsAddedByEnforceBucketingSorting, @@ -511,8 +511,8 @@ public void setGlobalLimitCtx(GlobalLimitCtx globalLimitCtx) { return semanticInputs; } - public void replaceRootTask(Task rootTask, - List> tasks) { + public void replaceRootTask(Task rootTask, + List> tasks) { this.rootTasks.remove(rootTask); this.rootTasks.addAll(tasks); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 0b55a39a8a..cf62953027 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -121,7 +121,7 @@ public void init(QueryState queryState, LogHelper console, Hive db) { @SuppressWarnings("nls") public void compile(final ParseContext pCtx, - final List> rootTasks, + final List> rootTasks, final HashSet inputs, final HashSet outputs) throws SemanticException { Context ctx = pCtx.getContext(); @@ -225,7 +225,7 @@ public void compile(final ParseContext pCtx, // The idea here is to keep an object reference both in FileSink and in FetchTask for list of files // to be fetched. During Job close file sink will populate the list and fetch task later will use it // to fetch the results. - Collection> tableScanOps = + Collection> tableScanOps = Lists.>newArrayList(pCtx.getTopOps().values()); Set fsOps = OperatorUtils.findOperators(tableScanOps, FileSinkOperator.class); if(fsOps != null && fsOps.size() == 1) { @@ -278,13 +278,13 @@ public void compile(final ParseContext pCtx, generateTaskTree(rootTasks, pCtx, mvTask, inputs, outputs); // For each task, set the key descriptor for the reducer - for (Task rootTask : rootTasks) { + for (Task rootTask : rootTasks) { GenMapRedUtils.setKeyAndValueDescForTaskTree(rootTask); } // If a task contains an operator which instructs bucketizedhiveinputformat // to be used, please do so - for (Task rootTask : rootTasks) { + for (Task rootTask : rootTasks) { setInputFormat(rootTask); } @@ -308,7 +308,7 @@ public void compile(final ParseContext pCtx, throw new SemanticException("Can not find correct root task!"); } try { - Task root = rootTasks.iterator().next(); + Task root = rootTasks.iterator().next(); StatsTask tsk = (StatsTask) genTableStats(pCtx, pCtx.getTopOps().values() .iterator().next(), root, outputs); root.addDependentTask(tsk); @@ -318,10 +318,10 @@ public void compile(final ParseContext pCtx, } genColumnStatsTask(pCtx.getAnalyzeRewrite(), loadFileWork, map, outerQueryLimit, 0); } else { - Set> leafTasks = new LinkedHashSet>(); + Set> leafTasks = new LinkedHashSet>(); getLeafTasks(rootTasks, leafTasks); - List> nonStatsLeafTasks = new ArrayList<>(); - for (Task tsk : leafTasks) { + List> nonStatsLeafTasks = new ArrayList<>(); + for (Task tsk : leafTasks) { // map table name to the correct ColumnStatsTask if (tsk instanceof StatsTask) { map.put(extractTableFullName((StatsTask) tsk), (StatsTask) tsk); @@ -330,8 +330,8 @@ public void compile(final ParseContext pCtx, } } // add cStatsTask as a dependent of all the nonStatsLeafTasks - for (Task tsk : nonStatsLeafTasks) { - for (Task cStatsTask : map.values()) { + for (Task tsk : nonStatsLeafTasks) { + for (Task cStatsTask : map.values()) { tsk.addDependentTask(cStatsTask); } } @@ -360,13 +360,13 @@ public void compile(final ParseContext pCtx, // generate a DDL task and make it a dependent task of the leaf CreateTableDesc crtTblDesc = pCtx.getCreateTable(); crtTblDesc.validate(conf); - Task crtTblTask = TaskFactory.get(new DDLWork(inputs, outputs, crtTblDesc)); + Task crtTblTask = TaskFactory.get(new DDLWork(inputs, outputs, crtTblDesc)); patchUpAfterCTASorMaterializedView(rootTasks, inputs, outputs, crtTblTask, CollectionUtils.isEmpty(crtTblDesc.getPartColNames())); } else if (pCtx.getQueryProperties().isMaterializedView()) { // generate a DDL task and make it a dependent task of the leaf CreateViewDesc viewDesc = pCtx.getCreateViewDesc(); - Task crtViewTask = TaskFactory.get(new DDLWork( + Task crtViewTask = TaskFactory.get(new DDLWork( inputs, outputs, viewDesc)); patchUpAfterCTASorMaterializedView(rootTasks, inputs, outputs, crtViewTask, CollectionUtils.isEmpty(viewDesc.getPartColNames())); @@ -375,10 +375,10 @@ public void compile(final ParseContext pCtx, // of the tree. MaterializedViewUpdateDesc materializedViewDesc = pCtx.getMaterializedViewUpdateDesc(); DDLWork ddlWork = new DDLWork(inputs, outputs, materializedViewDesc); - Set> leafTasks = new LinkedHashSet>(); + Set> leafTasks = new LinkedHashSet>(); getLeafTasks(rootTasks, leafTasks); - Task materializedViewTask = TaskFactory.get(ddlWork, conf); - for (Task task : leafTasks) { + Task materializedViewTask = TaskFactory.get(ddlWork, conf); + for (Task task : leafTasks) { task.addDependentTask(materializedViewTask); } } @@ -494,8 +494,8 @@ private Path getDefaultCtasLocation(final ParseContext pCtx) throws SemanticExce } } - private void patchUpAfterCTASorMaterializedView(List> rootTasks, - Set inputs, Set outputs, Task createTask, + private void patchUpAfterCTASorMaterializedView(List> rootTasks, + Set inputs, Set outputs, Task createTask, boolean createTaskAfterMoveTask) { // clear the mapredWork output file from outputs for CTAS // DDLWork at the tail of the chain will have the output @@ -512,15 +512,15 @@ private void patchUpAfterCTASorMaterializedView(List> leaves = new LinkedHashSet<>(); + Set> leaves = new LinkedHashSet<>(); getLeafTasks(rootTasks, leaves); assert (leaves.size() > 0); // Target task is supposed to be the last task - Task targetTask = createTask; - for (Task task : leaves) { + Task targetTask = createTask; + for (Task task : leaves) { if (task instanceof StatsTask) { // StatsTask require table to already exist - for (Task parentOfStatsTask : task.getParentTasks()) { + for (Task parentOfStatsTask : task.getParentTasks()) { if (parentOfStatsTask instanceof MoveTask && !createTaskAfterMoveTask) { // For partitioned CTAS, we need to create the table before the move task // as we need to create the partitions in metastore and for that we should @@ -530,7 +530,7 @@ private void patchUpAfterCTASorMaterializedView(List parentOfCrtTblTask : createTask.getParentTasks()) { + for (Task parentOfCrtTblTask : createTask.getParentTasks()) { parentOfCrtTblTask.removeDependentTask(task); } createTask.addDependentTask(task); @@ -577,11 +577,11 @@ private void patchUpAfterCTASorMaterializedView(List dependentTask, Task task) { - for (Task parentOfStatsTask : dependentTask.getParentTasks()) { + private void interleaveTask(Task dependentTask, Task task) { + for (Task parentOfStatsTask : dependentTask.getParentTasks()) { parentOfStatsTask.addDependentTask(task); } - for (Task parentOfCrtTblTask : task.getParentTasks()) { + for (Task parentOfCrtTblTask : task.getParentTasks()) { parentOfCrtTblTask.removeDependentTask(dependentTask); } task.addDependentTask(dependentTask); @@ -638,16 +638,16 @@ protected void genColumnStatsTask(AnalyzeRewriteContext analyzeRewrite, /** * Find all leaf tasks of the list of root tasks. */ - private void getLeafTasks(List> rootTasks, - Set> leaves) { + private void getLeafTasks(List> rootTasks, + Set> leaves) { - for (Task root : rootTasks) { + for (Task root : rootTasks) { getLeafTasks(root, leaves); } } - private void getLeafTasks(Task task, - Set> leaves) { + private void getLeafTasks(Task task, + Set> leaves) { if (task.getDependentTasks() == null) { if (!leaves.contains(task)) { leaves.add(task); @@ -660,7 +660,7 @@ private void getLeafTasks(Task task, /* * Called to transform tasks into local tasks where possible/desirable */ - protected abstract void decideExecMode(List> rootTasks, Context ctx, + protected abstract void decideExecMode(List> rootTasks, Context ctx, GlobalLimitCtx globalLimitCtx) throws SemanticException; /* @@ -673,18 +673,18 @@ protected void optimizeOperatorPlan(ParseContext pCtxSet, Set inputs /* * Called after the tasks have been generated to run another round of optimization */ - protected abstract void optimizeTaskPlan(List> rootTasks, + protected abstract void optimizeTaskPlan(List> rootTasks, ParseContext pCtx, Context ctx) throws SemanticException; /* * Called to set the appropriate input format for tasks */ - protected abstract void setInputFormat(Task rootTask); + protected abstract void setInputFormat(Task rootTask); /* * Called to generate the taks tree from the parse context/operator tree */ - protected abstract void generateTaskTree(List> rootTasks, ParseContext pCtx, + protected abstract void generateTaskTree(List> rootTasks, ParseContext pCtx, List> mvTask, Set inputs, Set outputs) throws SemanticException; /* @@ -714,7 +714,7 @@ protected void runDynPartitionSortOptimizations(ParseContext parseContext, HiveC /** * Create a clone of the parse context */ - public ParseContext getParseContext(ParseContext pCtx, List> rootTasks) { + public ParseContext getParseContext(ParseContext pCtx, List> rootTasks) { ParseContext clone = new ParseContext(queryState, pCtx.getOpToPartPruner(), pCtx.getOpToPartList(), pCtx.getTopOps(), pCtx.getJoinOps(), pCtx.getSmbMapJoinOps(), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java index 5000ba47b3..24487568fb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java @@ -597,7 +597,7 @@ private void runDynamicPartitionPruning(OptimizeTezProcContext procCtx, Set> rootTasks, ParseContext pCtx, + protected void generateTaskTree(List> rootTasks, ParseContext pCtx, List> mvTask, Set inputs, Set outputs) throws SemanticException { @@ -690,7 +690,7 @@ protected void generateTaskTree(List> rootTasks, Pa } @Override - protected void setInputFormat(Task task) { + protected void setInputFormat(Task task) { if (task instanceof TezTask) { TezWork work = ((TezTask)task).getWork(); List all = work.getAllWork(); @@ -706,15 +706,15 @@ protected void setInputFormat(Task task) { } } } else if (task instanceof ConditionalTask) { - List> listTasks + List> listTasks = ((ConditionalTask) task).getListTasks(); - for (Task tsk : listTasks) { + for (Task tsk : listTasks) { setInputFormat(tsk); } } if (task.getChildTasks() != null) { - for (Task childTask : task.getChildTasks()) { + for (Task childTask : task.getChildTasks()) { setInputFormat(childTask); } } @@ -737,7 +737,7 @@ private void setInputFormat(MapWork work, Operator op) { } @Override - protected void decideExecMode(List> rootTasks, Context ctx, + protected void decideExecMode(List> rootTasks, Context ctx, GlobalLimitCtx globalLimitCtx) throws SemanticException { // currently all Tez work is on the cluster @@ -745,7 +745,7 @@ protected void decideExecMode(List> rootTasks, Cont } @Override - protected void optimizeTaskPlan(List> rootTasks, ParseContext pCtx, + protected void optimizeTaskPlan(List> rootTasks, ParseContext pCtx, Context ctx) throws SemanticException { PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactory.java index 6713163ace..3031b702c7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactory.java @@ -37,40 +37,40 @@ @LimitedPrivate(value = { "Apache Hive, Apache Sentry (incubating)" }) @Evolving public interface HiveAuthorizationTaskFactory { - public Task createCreateRoleTask(ASTNode node, HashSet inputs, + public Task createCreateRoleTask(ASTNode node, HashSet inputs, HashSet outputs) throws SemanticException; - public Task createDropRoleTask(ASTNode node, HashSet inputs, + public Task createDropRoleTask(ASTNode node, HashSet inputs, HashSet outputs) throws SemanticException; - public Task createShowRoleGrantTask(ASTNode node, Path resultFile, + public Task createShowRoleGrantTask(ASTNode node, Path resultFile, HashSet inputs, HashSet outputs) throws SemanticException; - public Task createGrantRoleTask(ASTNode node, HashSet inputs, + public Task createGrantRoleTask(ASTNode node, HashSet inputs, HashSet outputs) throws SemanticException; - public Task createRevokeRoleTask(ASTNode node, HashSet inputs, + public Task createRevokeRoleTask(ASTNode node, HashSet inputs, HashSet outputs) throws SemanticException; - public Task createGrantTask(ASTNode node, HashSet inputs, + public Task createGrantTask(ASTNode node, HashSet inputs, HashSet outputs) throws SemanticException; - public Task createShowGrantTask(ASTNode node, Path resultFile, HashSet inputs, + public Task createShowGrantTask(ASTNode node, Path resultFile, HashSet inputs, HashSet outputs) throws SemanticException; - public Task createRevokeTask(ASTNode node, HashSet inputs, + public Task createRevokeTask(ASTNode node, HashSet inputs, HashSet outputs) throws SemanticException; - public Task createSetRoleTask(String roleName, + public Task createSetRoleTask(String roleName, HashSet inputs, HashSet outputs) throws SemanticException; - public Task createShowCurrentRoleTask(HashSet inputs, + public Task createShowCurrentRoleTask(HashSet inputs, HashSet outputs, Path resFile) throws SemanticException; - public Task createShowRolePrincipalsTask(ASTNode ast, Path resFile, + public Task createShowRolePrincipalsTask(ASTNode ast, Path resFile, HashSet inputs, HashSet outputs) throws SemanticException; - public Task createShowRolesTask(ASTNode ast, Path resFile, + public Task createShowRolesTask(ASTNode ast, Path resFile, HashSet inputs, HashSet outputs) throws SemanticException; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java index 8f7bcf5d43..40812e4af2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java @@ -73,21 +73,21 @@ public HiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { } @Override - public Task createCreateRoleTask(ASTNode ast, HashSet inputs, + public Task createCreateRoleTask(ASTNode ast, HashSet inputs, HashSet outputs) { String roleName = BaseSemanticAnalyzer.unescapeIdentifier(ast.getChild(0).getText()); CreateRoleDesc createRoleDesc = new CreateRoleDesc(roleName); return TaskFactory.get(new DDLWork(inputs, outputs, createRoleDesc)); } @Override - public Task createDropRoleTask(ASTNode ast, HashSet inputs, + public Task createDropRoleTask(ASTNode ast, HashSet inputs, HashSet outputs) { String roleName = BaseSemanticAnalyzer.unescapeIdentifier(ast.getChild(0).getText()); DropRoleDesc dropRoleDesc = new DropRoleDesc(roleName); return TaskFactory.get(new DDLWork(inputs, outputs, dropRoleDesc)); } @Override - public Task createShowRoleGrantTask(ASTNode ast, Path resultFile, + public Task createShowRoleGrantTask(ASTNode ast, Path resultFile, HashSet inputs, HashSet outputs) { ASTNode child = (ASTNode) ast.getChild(0); PrincipalType principalType = PrincipalType.USER; @@ -107,7 +107,7 @@ public HiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { return TaskFactory.get(new DDLWork(inputs, outputs, showRoleGrantDesc)); } @Override - public Task createGrantTask(ASTNode ast, HashSet inputs, + public Task createGrantTask(ASTNode ast, HashSet inputs, HashSet outputs) throws SemanticException { List privilegeDesc = analyzePrivilegeListDef( (ASTNode) ast.getChild(0)); @@ -135,7 +135,7 @@ public HiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { } @Override - public Task createRevokeTask(ASTNode ast, HashSet inputs, + public Task createRevokeTask(ASTNode ast, HashSet inputs, HashSet outputs) throws SemanticException { List privilegeDesc = analyzePrivilegeListDef((ASTNode) ast.getChild(0)); List principalDesc = AuthorizationParseUtils.analyzePrincipalListDef((ASTNode) ast.getChild(1)); @@ -153,7 +153,7 @@ public HiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { return TaskFactory.get(new DDLWork(inputs, outputs, revokeDesc)); } @Override - public Task createShowGrantTask(ASTNode ast, Path resultFile, HashSet inputs, + public Task createShowGrantTask(ASTNode ast, Path resultFile, HashSet inputs, HashSet outputs) throws SemanticException { PrincipalDesc principalDesc = null; @@ -180,16 +180,16 @@ public HiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { return TaskFactory.get(new DDLWork(inputs, outputs, showGrant)); } @Override - public Task createGrantRoleTask(ASTNode ast, HashSet inputs, + public Task createGrantRoleTask(ASTNode ast, HashSet inputs, HashSet outputs) { return analyzeGrantRevokeRole(true, ast, inputs, outputs); } @Override - public Task createRevokeRoleTask(ASTNode ast, HashSet inputs, + public Task createRevokeRoleTask(ASTNode ast, HashSet inputs, HashSet outputs) { return analyzeGrantRevokeRole(false, ast, inputs, outputs); } - private Task analyzeGrantRevokeRole(boolean isGrant, ASTNode ast, + private Task analyzeGrantRevokeRole(boolean isGrant, ASTNode ast, HashSet inputs, HashSet outputs) { List principalDesc = AuthorizationParseUtils.analyzePrincipalListDef( (ASTNode) ast.getChild(0)); @@ -335,7 +335,7 @@ private String toMessage(ErrorMsg message, Object detail) { } @Override - public Task createSetRoleTask(String roleName, + public Task createSetRoleTask(String roleName, HashSet inputs, HashSet outputs) throws SemanticException { SetRoleDesc setRoleDesc = new SetRoleDesc(roleName); @@ -343,7 +343,7 @@ private String toMessage(ErrorMsg message, Object detail) { } @Override - public Task createShowCurrentRoleTask( + public Task createShowCurrentRoleTask( HashSet inputs, HashSet outputs, Path resFile) throws SemanticException { ShowCurrentRoleDesc showCurrentRoleDesc = new ShowCurrentRoleDesc(resFile.toString()); @@ -351,7 +351,7 @@ private String toMessage(ErrorMsg message, Object detail) { } @Override - public Task createShowRolePrincipalsTask(ASTNode ast, Path resFile, + public Task createShowRolePrincipalsTask(ASTNode ast, Path resFile, HashSet inputs, HashSet outputs) throws SemanticException { String roleName; @@ -367,7 +367,7 @@ private String toMessage(ErrorMsg message, Object detail) { } @Override - public Task createShowRolesTask(ASTNode ast, Path resFile, + public Task createShowRolesTask(ASTNode ast, Path resFile, HashSet inputs, HashSet outputs) throws SemanticException { ShowRolesDesc showRolesDesc = new ShowRolesDesc(resFile.toString()); return TaskFactory.get(new DDLWork(inputs, outputs, showRolesDesc)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbortTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbortTxnHandler.java index 599503a884..b1c2709b33 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbortTxnHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbortTxnHandler.java @@ -34,7 +34,7 @@ */ public class AbortTxnHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { if (!AcidUtils.isAcidEnabled(context.hiveConf)) { context.log.error("Cannot load transaction events as acid is not enabled"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java index 39bd021ea0..d8ed9e2d2f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java @@ -33,7 +33,7 @@ public class AddForeignKeyHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { AddForeignKeyMessage msg = deserializer.getAddForeignKeyMessage(context.dmd.getPayload()); @@ -48,7 +48,7 @@ } } - List> tasks = new ArrayList>(); + List> tasks = new ArrayList>(); if (fks.isEmpty()) { return tasks; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java index e2644177ad..39f896ffcc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java @@ -33,7 +33,7 @@ public class AddNotNullConstraintHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { AddNotNullConstraintMessage msg = deserializer.getAddNotNullConstraintMessage(context.dmd.getPayload()); @@ -48,7 +48,7 @@ } } - List> tasks = new ArrayList>(); + List> tasks = new ArrayList>(); if (nns.isEmpty()) { return tasks; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java index 54a0638d8a..5bfced0398 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java @@ -33,7 +33,7 @@ public class AddPrimaryKeyHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { AddPrimaryKeyMessage msg = deserializer.getAddPrimaryKeyMessage(context.dmd.getPayload()); @@ -48,7 +48,7 @@ } } - List> tasks = new ArrayList>(); + List> tasks = new ArrayList>(); if (pks.isEmpty()) { return tasks; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java index a48b411c57..9cf5ffaa74 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java @@ -33,7 +33,7 @@ public class AddUniqueConstraintHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { AddUniqueConstraintMessage msg = deserializer.getAddUniqueConstraintMessage(context.dmd.getPayload()); @@ -48,7 +48,7 @@ } } - List> tasks = new ArrayList>(); + List> tasks = new ArrayList>(); if (uks.isEmpty()) { return tasks; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AllocWriteIdHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AllocWriteIdHandler.java index bb4402f556..f9a075076a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AllocWriteIdHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AllocWriteIdHandler.java @@ -34,7 +34,7 @@ */ public class AllocWriteIdHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { if (!AcidUtils.isAcidEnabled(context.hiveConf)) { context.log.error("Cannot load alloc write id event as acid is not enabled"); @@ -53,7 +53,7 @@ ReplTxnWork work = new ReplTxnWork(HiveUtils.getReplPolicy(context.dbName), dbName, tableName, ReplTxnWork.OperationType.REPL_ALLOC_WRITE_ID, msg.getTxnToWriteIdList(), context.eventOnlyReplicationSpec()); - Task allocWriteIdTask = TaskFactory.get(work, context.hiveConf); + Task allocWriteIdTask = TaskFactory.get(work, context.hiveConf); context.log.info("Added alloc write id task : {}", allocWriteIdTask.getId()); updatedMetadata.set(context.dmd.getEventTo().toString(), dbName, tableName, null); return Collections.singletonList(allocWriteIdTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java index f8a9bac1d7..76c7dd5194 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java @@ -44,7 +44,7 @@ */ public class AlterDatabaseHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { AlterDatabaseMessage msg = deserializer.getAlterDatabaseMessage(context.dmd.getPayload()); String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CommitTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CommitTxnHandler.java index 96ca2213be..86f1cb9d53 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CommitTxnHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CommitTxnHandler.java @@ -40,7 +40,7 @@ */ public class CommitTxnHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { if (!AcidUtils.isAcidEnabled(context.hiveConf)) { context.log.error("Cannot load transaction events as acid is not enabled"); @@ -49,7 +49,7 @@ CommitTxnMessage msg = deserializer.getCommitTxnMessage(context.dmd.getPayload()); int numEntry = (msg.getTables() == null ? 0 : msg.getTables().size()); - List> tasks = new ArrayList<>(); + List> tasks = new ArrayList<>(); String dbName = context.dbName; String tableNamePrev = null; String tblName = null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateDatabaseHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateDatabaseHandler.java index 0a232a9842..cc30131bdf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateDatabaseHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateDatabaseHandler.java @@ -43,7 +43,7 @@ public class CreateDatabaseHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { MetaData metaData; try { @@ -80,4 +80,4 @@ .set(context.dmd.getEventTo().toString(), destinationDBName, null, null); return Collections.singletonList(createDBTask); } -} \ No newline at end of file +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateFunctionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateFunctionHandler.java index ae663662b3..948d201ddc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateFunctionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateFunctionHandler.java @@ -56,7 +56,7 @@ public String getFunctionName() { } @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { try { FunctionDescBuilder builder = new FunctionDescBuilder(context); @@ -91,7 +91,7 @@ public String getFunctionName() { * add the 'many' to parent/root tasks. The execution environment will make sure that the child barrier task will not get executed unless all parents of the barrier task are complete, * which should only happen when the last task is finished, at which point the child of the barrier task is picked up. */ - Task barrierTask = + Task barrierTask = TaskFactory.get(new DependencyCollectionWork(), context.hiveConf); builder.replCopyTasks.forEach(t -> t.addDependentTask(barrierTask)); barrierTask.addDependentTask(createTask); @@ -204,4 +204,4 @@ ResourceUri destinationResourceUri(ResourceUri resourceUri) return destinationUri; } } -} \ No newline at end of file +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DefaultHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DefaultHandler.java index 64e65180e0..9f6d8eac90 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DefaultHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DefaultHandler.java @@ -26,7 +26,7 @@ public class DefaultHandler extends AbstractMessageHandler { @Override - public List> handle(Context withinContext) + public List> handle(Context withinContext) throws SemanticException { return new ArrayList<>(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DeletePartColStatHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DeletePartColStatHandler.java index f6153a64b8..b300c9dea4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DeletePartColStatHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DeletePartColStatHandler.java @@ -32,7 +32,7 @@ */ public class DeletePartColStatHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { context.log.info("Replication of partition stat delete event is not supported yet"); if (!context.isDbNameEmpty()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DeleteTableColStatHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DeleteTableColStatHandler.java index 404372a613..cb42cb6bcf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DeleteTableColStatHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DeleteTableColStatHandler.java @@ -32,7 +32,7 @@ */ public class DeleteTableColStatHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { context.log.info("Replication of table stat delete event is not supported yet"); if (!context.isDbNameEmpty()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java index 233ff9ef3a..0db9f190fa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java @@ -30,7 +30,7 @@ public class DropConstraintHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { DropConstraintMessage msg = deserializer.getDropConstraintMessage(context.dmd.getPayload()); String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropDatabaseHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropDatabaseHandler.java index 4db6ab3fcb..c10174abd0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropDatabaseHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropDatabaseHandler.java @@ -31,13 +31,13 @@ public class DropDatabaseHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { DropDatabaseMessage msg = deserializer.getDropDatabaseMessage(context.dmd.getPayload()); String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; DropDatabaseDesc desc = new DropDatabaseDesc(actualDbName, true, context.eventOnlyReplicationSpec()); - Task dropDBTask = + Task dropDBTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), desc), context.hiveConf); context.log.info( "Added drop database task : {}:{}", dropDBTask.getId(), desc.getDatabaseName()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropFunctionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropFunctionHandler.java index 167679fefe..89cdaa579e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropFunctionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropFunctionHandler.java @@ -31,7 +31,7 @@ public class DropFunctionHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { DropFunctionMessage msg = deserializer.getDropFunctionMessage(context.dmd.getPayload()); String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java index 0df68157d2..f65559706b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java @@ -34,7 +34,7 @@ public class DropPartitionHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { try { DropPartitionMessage msg = deserializer.getDropPartitionMessage(context.dmd.getPayload()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java index 6e29d61cde..0f1f05bb24 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java @@ -32,7 +32,7 @@ public class DropTableHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { String actualDbName; String actualTblName; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java index 1eeacbf430..4b8274d5e6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java @@ -36,7 +36,7 @@ private static final Logger LOG = LoggerFactory.getLogger(InsertHandler.class); @Override - public List> handle(Context withinContext) + public List> handle(Context withinContext) throws SemanticException { try { FileSystem fs = @@ -59,7 +59,7 @@ // Piggybacking in Import logic for now TableHandler tableHandler = new TableHandler(); - List> tasks = tableHandler.handle(currentContext); + List> tasks = tableHandler.handle(currentContext); readEntitySet.addAll(tableHandler.readEntities()); writeEntitySet.addAll(tableHandler.writeEntities()); getUpdatedMetadata().copyUpdatedMetadata(tableHandler.getUpdatedMetadata()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java index ad3be67ee6..2851880c38 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java @@ -37,7 +37,7 @@ public interface MessageHandler { - List> handle(Context withinContext) throws SemanticException; + List> handle(Context withinContext) throws SemanticException; Set readEntities(); @@ -48,7 +48,7 @@ class Context { public String location; public final String dbName; - public final Task precursor; + public final Task precursor; public DumpMetaData dmd; final HiveConf hiveConf; final Hive db; @@ -56,7 +56,7 @@ final Logger log; public Context(String dbName, String location, - Task precursor, DumpMetaData dmd, HiveConf hiveConf, + Task precursor, DumpMetaData dmd, HiveConf hiveConf, Hive db, org.apache.hadoop.hive.ql.Context nestedContext, Logger log) { this.dbName = dbName; this.location = location; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/OpenTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/OpenTxnHandler.java index 6123371679..cd7274de60 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/OpenTxnHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/OpenTxnHandler.java @@ -34,7 +34,7 @@ */ public class OpenTxnHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { if (!AcidUtils.isAcidEnabled(context.hiveConf)) { context.log.error("Cannot load transaction events as acid is not enabled"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java index 32162655e1..6dd69767fe 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java @@ -36,7 +36,7 @@ public class RenamePartitionHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { AlterPartitionMessage msg = deserializer.getAlterPartitionMessage(context.dmd.getPayload()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java index 50958c8150..c810b8c517 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java @@ -33,7 +33,7 @@ public class RenameTableHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { AlterTableMessage msg = deserializer.getAlterTableMessage(context.dmd.getPayload()); try { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java index 664015f27b..266d034e25 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java @@ -45,9 +45,9 @@ private static final Logger LOG = LoggerFactory.getLogger(TableHandler.class); @Override - public List> handle(Context context) throws SemanticException { + public List> handle(Context context) throws SemanticException { try { - List> importTasks = new ArrayList<>(); + List> importTasks = new ArrayList<>(); boolean isExternal = false, isLocationSet = false; String parsedLocation = null; @@ -82,9 +82,9 @@ (context.precursor != null), parsedLocation, null, context.dbName, null, context.location, x, updatedMetadata, context.getTxnMgr(), tuple.writeId); - Task openTxnTask = x.getOpenTxnTask(); + Task openTxnTask = x.getOpenTxnTask(); if (openTxnTask != null && !importTasks.isEmpty()) { - for (Task t : importTasks) { + for (Task t : importTasks) { openTxnTask.addDependentTask(t); } importTasks.add(openTxnTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java index 91687a038b..1b1efbcd8f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java @@ -34,7 +34,7 @@ public class TruncatePartitionHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) throws SemanticException { + public List> handle(Context context) throws SemanticException { AlterPartitionMessage msg = deserializer.getAlterPartitionMessage(context.dmd.getPayload()); String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; String actualTblName = msg.getTable(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java index bcc15b45ed..c18529fb2d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java @@ -30,7 +30,7 @@ public class TruncateTableHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) throws SemanticException { + public List> handle(Context context) throws SemanticException { AlterTableMessage msg = deserializer.getAlterTableMessage(context.dmd.getPayload()); String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; String actualTblName = msg.getTable(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdatePartColStatHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdatePartColStatHandler.java index bea431c907..d3d8d12759 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdatePartColStatHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdatePartColStatHandler.java @@ -33,7 +33,7 @@ */ public class UpdatePartColStatHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { UpdatePartitionColumnStatMessage upcsm = deserializer.getUpdatePartitionColumnStatMessage(context.dmd.getPayload()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdateTableColStatHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdateTableColStatHandler.java index 6160d438e2..139c50f822 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdateTableColStatHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdateTableColStatHandler.java @@ -33,7 +33,7 @@ */ public class UpdateTableColStatHandler extends AbstractMessageHandler { @Override - public List> handle(Context context) + public List> handle(Context context) throws SemanticException { UpdateTableColumnStatMessage utcsm = deserializer.getUpdateTableColumnStatMessage(context.dmd.getPayload()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkProcContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkProcContext.java index e60f7153f0..767e4cbc74 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkProcContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkProcContext.java @@ -67,7 +67,7 @@ public final List> moveTask; // rootTasks is the entry point for all generated tasks - public final List> rootTasks; + public final List> rootTasks; public final Set inputs; public final Set outputs; @@ -151,7 +151,7 @@ public GenSparkProcContext(HiveConf conf, ParseContext parseContext, List> moveTask, - List> rootTasks, + List> rootTasks, Set inputs, Set outputs, Map topOps) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java index 757cb7af4d..c102a69f8f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java @@ -404,7 +404,7 @@ public void processFileSink(GenSparkProcContext context, FileSinkOperator fileSi * This is forked from {@link GenMapRedUtils}. The difference is that it doesn't check * 'isLinkedFileSink' and does not set parent dir for the linked file sinks. */ - public static Path createMoveTask(Task currTask, boolean chDir, + public static Path createMoveTask(Task currTask, boolean chDir, FileSinkOperator fsOp, ParseContext parseCtx, List> mvTasks, HiveConf hconf, DependencyCollectionTask dependencyTask) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java index 6bc592546f..24429b4a1f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java @@ -361,7 +361,7 @@ private void runDynPartitionSortOptimizations(OptimizeSparkProcContext procCtx) * TODO: need to turn on rules that's commented out and add more if necessary. */ @Override - protected void generateTaskTree(List> rootTasks, ParseContext pCtx, + protected void generateTaskTree(List> rootTasks, ParseContext pCtx, List> mvTask, Set inputs, Set outputs) throws SemanticException { PERF_LOGGER.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_GENERATE_TASK_TREE); @@ -523,7 +523,7 @@ public Object process(Node currNode, Stack stack, } @Override - protected void setInputFormat(Task task) { + protected void setInputFormat(Task task) { if (task instanceof SparkTask) { SparkWork work = ((SparkTask)task).getWork(); List all = work.getAllWork(); @@ -539,15 +539,15 @@ protected void setInputFormat(Task task) { } } } else if (task instanceof ConditionalTask) { - List> listTasks + List> listTasks = ((ConditionalTask) task).getListTasks(); - for (Task tsk : listTasks) { + for (Task tsk : listTasks) { setInputFormat(tsk); } } if (task.getChildTasks() != null) { - for (Task childTask : task.getChildTasks()) { + for (Task childTask : task.getChildTasks()) { setInputFormat(childTask); } } @@ -567,14 +567,14 @@ private void setInputFormat(MapWork work, Operator op) { } @Override - protected void decideExecMode(List> rootTasks, Context ctx, + protected void decideExecMode(List> rootTasks, Context ctx, GlobalLimitCtx globalLimitCtx) throws SemanticException { // currently all Spark work is on the cluster return; } @Override - protected void optimizeTaskPlan(List> rootTasks, ParseContext pCtx, + protected void optimizeTaskPlan(List> rootTasks, ParseContext pCtx, Context ctx) throws SemanticException { PERF_LOGGER.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_OPTIMIZE_TASK_TREE); PhysicalContext physicalCtx = new PhysicalContext(conf, pCtx, pCtx.getContext(), rootTasks, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolver.java index 1983ab75fc..e07794f2b8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolver.java @@ -38,6 +38,6 @@ * opaque context * @return position of the task */ - List> getTasks(HiveConf conf, Object ctx); + List> getTasks(HiveConf conf, Object ctx); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java index cc5baeef9c..c1b9b27ff4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java @@ -50,10 +50,10 @@ public static class ConditionalResolverCommonJoinCtx implements Serializable { private static final long serialVersionUID = 1L; - private HashMap, Set> taskToAliases; + private HashMap, Set> taskToAliases; Map> pathToAliases; Map aliasToKnownSize; - private Task commonJoinTask; + private Task commonJoinTask; private Path localTmpDir; private Path hdfsTmpDir; @@ -61,19 +61,19 @@ public ConditionalResolverCommonJoinCtx() { } - public HashMap, Set> getTaskToAliases() { + public HashMap, Set> getTaskToAliases() { return taskToAliases; } - public void setTaskToAliases(HashMap, Set> taskToAliases) { + public void setTaskToAliases(HashMap, Set> taskToAliases) { this.taskToAliases = taskToAliases; } - public Task getCommonJoinTask() { + public Task getCommonJoinTask() { return commonJoinTask; } - public void setCommonJoinTask(Task commonJoinTask) { + public void setCommonJoinTask(Task commonJoinTask) { this.commonJoinTask = commonJoinTask; } @@ -129,12 +129,12 @@ public ConditionalResolverCommonJoin() { } @Override - public List> getTasks(HiveConf conf, Object objCtx) { + public List> getTasks(HiveConf conf, Object objCtx) { ConditionalResolverCommonJoinCtx ctx = ((ConditionalResolverCommonJoinCtx) objCtx).clone(); - List> resTsks = new ArrayList>(); + List> resTsks = new ArrayList>(); // get aliasToPath and pass it to the heuristic - Task task = resolveDriverAlias(ctx, conf); + Task task = resolveDriverAlias(ctx, conf); if (task == null) { // run common join task @@ -151,7 +151,7 @@ public ConditionalResolverCommonJoin() { return resTsks; } - private Task resolveDriverAlias(ConditionalResolverCommonJoinCtx ctx, HiveConf conf) { + private Task resolveDriverAlias(ConditionalResolverCommonJoinCtx ctx, HiveConf conf) { try { resolveUnknownSizes(ctx, conf); return resolveMapJoinTask(ctx, conf); @@ -161,20 +161,20 @@ public ConditionalResolverCommonJoin() { return null; } - protected Task resolveMapJoinTask( + protected Task resolveMapJoinTask( ConditionalResolverCommonJoinCtx ctx, HiveConf conf) throws Exception { Set participants = getParticipants(ctx); Map aliasToKnownSize = ctx.getAliasToKnownSize(); - Map, Set> taskToAliases = ctx.getTaskToAliases(); + Map, Set> taskToAliases = ctx.getTaskToAliases(); long threshold = HiveConf.getLongVar(conf, HiveConf.ConfVars.HIVESMALLTABLESFILESIZE); Long bigTableSize = null; Long smallTablesSize = null; - Map.Entry, Set> nextTask = null; - for (Map.Entry, Set> entry : taskToAliases.entrySet()) { + Map.Entry, Set> nextTask = null; + for (Map.Entry, Set> entry : taskToAliases.entrySet()) { Set aliases = entry.getValue(); long sumOfOthers = Utilities.sumOfExcept(aliasToKnownSize, participants, aliases); if (sumOfOthers < 0 || sumOfOthers > threshold) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java index a828809349..da1376c0e9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java @@ -56,7 +56,7 @@ public ConditionalResolverMergeFiles() { */ public static class ConditionalResolverMergeFilesCtx implements Serializable { private static final long serialVersionUID = 1L; - List> listTasks; + List> listTasks; private String dir; private DynamicPartitionCtx dpCtx; // merge task could be after dynamic partition insert private ListBucketingCtx lbCtx; @@ -68,7 +68,7 @@ public ConditionalResolverMergeFilesCtx() { * @param dir */ public ConditionalResolverMergeFilesCtx( - List> listTasks, String dir) { + List> listTasks, String dir) { this.listTasks = listTasks; this.dir = dir; } @@ -83,7 +83,7 @@ public String getDir() { /** * @return the listTasks */ - public List> getListTasks() { + public List> getListTasks() { return listTasks; } @@ -91,7 +91,7 @@ public String getDir() { * @param listTasks * the listTasks to set */ - public void setListTasks(List> listTasks) { + public void setListTasks(List> listTasks) { this.listTasks = listTasks; } @@ -118,11 +118,11 @@ public void setLbCtx(ListBucketingCtx lbCtx) { } } - public List> getTasks(HiveConf conf, Object objCtx) { + public List> getTasks(HiveConf conf, Object objCtx) { ConditionalResolverMergeFilesCtx ctx = (ConditionalResolverMergeFilesCtx) objCtx; String dirName = ctx.getDir(); - List> resTsks = new ArrayList>(); + List> resTsks = new ArrayList>(); // check if a map-reduce job is needed to merge the files // If the current size is smaller than the target, merge long trgtSize = conf.getLongVar(HiveConf.ConfVars.HIVEMERGEMAPFILESSIZE); @@ -130,9 +130,9 @@ public void setLbCtx(ListBucketingCtx lbCtx) { .getLongVar(HiveConf.ConfVars.HIVEMERGEMAPFILESAVGSIZE); trgtSize = Math.max(trgtSize, avgConditionSize); - Task mvTask = ctx.getListTasks().get(0); - Task mrTask = ctx.getListTasks().get(1); - Task mrAndMvTask = ctx.getListTasks().get(2); + Task mvTask = ctx.getListTasks().get(0); + Task mrTask = ctx.getListTasks().get(1); + Task mrAndMvTask = ctx.getListTasks().get(2); try { Path dirPath = new Path(dirName); @@ -228,9 +228,9 @@ public void setLbCtx(ListBucketingCtx lbCtx) { * @param dpLbLevel * @throws IOException */ - private void generateActualTasks(HiveConf conf, List> resTsks, - long trgtSize, long avgConditionSize, Task mvTask, - Task mrTask, Task mrAndMvTask, Path dirPath, + private void generateActualTasks(HiveConf conf, List> resTsks, + long trgtSize, long avgConditionSize, Task mvTask, + Task mrTask, Task mrAndMvTask, Path dirPath, FileSystem inpFs, ConditionalResolverMergeFilesCtx ctx, MapWork work, int dpLbLevel) throws IOException { DynamicPartitionCtx dpCtx = ctx.getDPCtx(); @@ -301,7 +301,7 @@ private void generateActualTasks(HiveConf conf, List mergeAndMoveMoveTask = mrAndMvTask.getChildTasks().get(0); + Task mergeAndMoveMoveTask = mrAndMvTask.getChildTasks().get(0); MoveWork mvWork = (MoveWork) mergeAndMoveMoveTask.getWork(); LoadFileDesc lfd = mvWork.getLoadFileWork(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverSkewJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverSkewJoin.java index 5dd7a258dc..0ca5caf1b7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverSkewJoin.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverSkewJoin.java @@ -54,8 +54,8 @@ // tables into corresponding different dirs (one dir per table). // this map stores mapping from "big key dir" to its corresponding mapjoin // task. - private HashMap> dirToTaskMap; - private List> noSkewTask; + private HashMap> dirToTaskMap; + private List> noSkewTask; /** * For serialization use only. @@ -64,27 +64,27 @@ public ConditionalResolverSkewJoinCtx() { } public ConditionalResolverSkewJoinCtx( - HashMap> dirToTaskMap, - List> noSkewTask) { + HashMap> dirToTaskMap, + List> noSkewTask) { super(); this.dirToTaskMap = dirToTaskMap; this.noSkewTask = noSkewTask; } - public HashMap> getDirToTaskMap() { + public HashMap> getDirToTaskMap() { return dirToTaskMap; } public void setDirToTaskMap( - HashMap> dirToTaskMap) { + HashMap> dirToTaskMap) { this.dirToTaskMap = dirToTaskMap; } - public List> getNoSkewTask() { + public List> getNoSkewTask() { return noSkewTask; } - public void setNoSkewTask(List> noSkewTask) { + public void setNoSkewTask(List> noSkewTask) { this.noSkewTask = noSkewTask; } } @@ -93,26 +93,26 @@ public ConditionalResolverSkewJoin() { } @Override - public List> getTasks(HiveConf conf, + public List> getTasks(HiveConf conf, Object objCtx) { ConditionalResolverSkewJoinCtx ctx = (ConditionalResolverSkewJoinCtx) objCtx; - List> resTsks = new ArrayList>(); + List> resTsks = new ArrayList>(); - Map> dirToTaskMap = ctx + Map> dirToTaskMap = ctx .getDirToTaskMap(); - Iterator>> bigKeysPathsIter = dirToTaskMap + Iterator>> bigKeysPathsIter = dirToTaskMap .entrySet().iterator(); try { while (bigKeysPathsIter.hasNext()) { - Entry> entry = bigKeysPathsIter.next(); + Entry> entry = bigKeysPathsIter.next(); Path dirPath = entry.getKey(); FileSystem inpFs = dirPath.getFileSystem(conf); FileStatus[] fstatus = Utilities.listStatusIfExists(dirPath, inpFs); if (fstatus != null && fstatus.length > 0) { - Task task = entry.getValue(); - List> parentOps = task.getParentTasks(); + Task task = entry.getValue(); + List> parentOps = task.getParentTasks(); if(parentOps!=null){ - for(Task parentOp: parentOps){ + for(Task parentOp: parentOps){ //right now only one parent resTsks.add(parentOp); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java index 6e2754ae1f..c0957be200 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java @@ -326,7 +326,7 @@ public String getDatabaseName() { return dbName; } - public Task getCreateTableTask(HashSet inputs, HashSet outputs, + public Task getCreateTableTask(HashSet inputs, HashSet outputs, HiveConf conf) { switch (getDescType()) { case TABLE: diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java index fbf948c4a2..18b5f270d8 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java @@ -574,7 +574,7 @@ public void testGetInputPathsPoolAndFailure() throws IOException, ExecutionExcep verify(pool).shutdownNow(); } - private Task getDependencyCollectionTask(){ + private Task getDependencyCollectionTask(){ return TaskFactory.get(new DependencyCollectionWork()); } @@ -587,7 +587,7 @@ public void testGetInputPathsPoolAndFailure() throws IOException, ExecutionExcep * \ / * ---->DTc---- */ - private List> getTestDiamondTaskGraph(Task providedTask){ + private List> getTestDiamondTaskGraph(Task providedTask){ // Note: never instantiate a task without TaskFactory.get() if you're not // okay with .equals() breaking. Doing it via TaskFactory.get makes sure // that an id is generated, and two tasks of the same type don't show @@ -595,12 +595,12 @@ public void testGetInputPathsPoolAndFailure() throws IOException, ExecutionExcep // array. Without this, DTa, DTb, and DTc would show up as one item in // the list of children. Thus, we're instantiating via a helper method // that instantiates via TaskFactory.get() - Task root = getDependencyCollectionTask(); - Task DTa = getDependencyCollectionTask(); - Task DTb = getDependencyCollectionTask(); - Task DTc = getDependencyCollectionTask(); - Task DTd = getDependencyCollectionTask(); - Task DTe = getDependencyCollectionTask(); + Task root = getDependencyCollectionTask(); + Task DTa = getDependencyCollectionTask(); + Task DTb = getDependencyCollectionTask(); + Task DTc = getDependencyCollectionTask(); + Task DTd = getDependencyCollectionTask(); + Task DTe = getDependencyCollectionTask(); root.addDependentTask(DTa); root.addDependentTask(DTb); @@ -614,7 +614,7 @@ public void testGetInputPathsPoolAndFailure() throws IOException, ExecutionExcep providedTask.addDependentTask(DTe); - List> retVals = new ArrayList>(); + List> retVals = new ArrayList>(); retVals.add(root); return retVals; } @@ -626,21 +626,21 @@ public void testGetInputPathsPoolAndFailure() throws IOException, ExecutionExcep */ public class CountingWrappingTask extends DependencyCollectionTask { int count; - Task wrappedDep = null; + Task wrappedDep = null; - public CountingWrappingTask(Task dep) { + public CountingWrappingTask(Task dep) { count = 0; wrappedDep = dep; super.addDependentTask(wrappedDep); } @Override - public boolean addDependentTask(Task dependent) { + public boolean addDependentTask(Task dependent) { return wrappedDep.addDependentTask(dependent); } @Override - public List> getDependentTasks() { + public List> getDependentTasks() { count++; System.err.println("YAH:getDepTasks got called!"); (new Exception()).printStackTrace(System.err); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/AddDependencyToLeavesTest.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/AddDependencyToLeavesTest.java index 166cf874d7..bf11362e92 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/AddDependencyToLeavesTest.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/AddDependencyToLeavesTest.java @@ -53,21 +53,21 @@ public void shouldNotSkipIntermediateDependencyCollectionTasks() { Task collectionWorkTaskThree = TaskFactory.get(new DependencyCollectionWork()); - @SuppressWarnings("unchecked") Task rootTask = mock(Task.class); + @SuppressWarnings("unchecked") Task rootTask = mock(Task.class); when(rootTask.getDependentTasks()) .thenReturn( Arrays.asList(collectionWorkTaskOne, collectionWorkTaskTwo, collectionWorkTaskThree)); - @SuppressWarnings("unchecked") List> tasksPostCurrentGraph = + @SuppressWarnings("unchecked") List> tasksPostCurrentGraph = Arrays.asList(mock(Task.class), mock(Task.class)); DAGTraversal.traverse(Collections.singletonList(rootTask), new AddDependencyToLeaves(tasksPostCurrentGraph)); - List> dependentTasksForOne = + List> dependentTasksForOne = collectionWorkTaskOne.getDependentTasks(); - List> dependentTasksForTwo = + List> dependentTasksForTwo = collectionWorkTaskTwo.getDependentTasks(); - List> dependentTasksForThree = + List> dependentTasksForThree = collectionWorkTaskThree.getDependentTasks(); assertEquals(dependentTasksForOne.size(), 2); @@ -83,4 +83,4 @@ public void shouldNotSkipIntermediateDependencyCollectionTasks() { } -} \ No newline at end of file +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/TestTaskTracker.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/TestTaskTracker.java index 41ab447de8..bb9999db61 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/TestTaskTracker.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/TestTaskTracker.java @@ -32,7 +32,7 @@ @RunWith(PowerMockRunner.class) public class TestTaskTracker { @Mock - private Task task; + private Task task; @Test public void taskTrackerCompositionInitializesTheMaxTasksCorrectly() { @@ -44,4 +44,4 @@ public void taskTrackerCompositionInitializesTheMaxTasksCorrectly() { TaskTracker taskTracker2 = new TaskTracker(taskTracker); assertFalse(taskTracker2.canAddMoreTasks()); } -} \ No newline at end of file +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/util/DAGTraversalTest.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/util/DAGTraversalTest.java index 6dcecde1dd..f482e3be6e 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/util/DAGTraversalTest.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/util/DAGTraversalTest.java @@ -37,24 +37,24 @@ int count = 0; @Override - public void process(Task task) { + public void process(Task task) { if (task.getDependentTasks() == null || task.getDependentTasks().isEmpty()) { count++; } } @Override - public boolean skipProcessing(Task task) { + public boolean skipProcessing(Task task) { return false; } } @Test public void shouldCountNumberOfLeafNodesCorrectly() { - Task taskWith5NodeTree = linearTree(5); - Task taskWith1NodeTree = linearTree(1); - Task taskWith3NodeTree = linearTree(3); - @SuppressWarnings("unchecked") Task rootTask = mock(Task.class); + Task taskWith5NodeTree = linearTree(5); + Task taskWith1NodeTree = linearTree(1); + Task taskWith3NodeTree = linearTree(3); + @SuppressWarnings("unchecked") Task rootTask = mock(Task.class); when(rootTask.getDependentTasks()) .thenReturn(Arrays.asList(taskWith1NodeTree, taskWith3NodeTree, taskWith5NodeTree)); @@ -63,10 +63,10 @@ public void shouldCountNumberOfLeafNodesCorrectly() { assertEquals(3, function.count); } - private Task linearTree(int numOfNodes) { - Task current = null, head = null; + private Task linearTree(int numOfNodes) { + Task current = null, head = null; for (int i = 0; i < numOfNodes; i++) { - @SuppressWarnings("unchecked") Task task = mock(Task.class); + @SuppressWarnings("unchecked") Task task = mock(Task.class); if (current != null) { when(current.getDependentTasks()).thenReturn(Collections.singletonList(task)); } @@ -78,4 +78,4 @@ public void shouldCountNumberOfLeafNodesCorrectly() { return head; } -} \ No newline at end of file +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java index a40ad247e7..a96d93e042 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java @@ -59,7 +59,7 @@ Licensed to the Apache Software Foundation (ASF) under one public class TestGenMapRedUtilsCreateConditionalTask { private static HiveConf hiveConf; - private Task dummyMRTask; + private Task dummyMRTask; @BeforeClass public static void initializeSessionState() { @@ -187,9 +187,9 @@ public void testConditionalMoveTaskIsOptimized() throws SemanticException { GenMapRedUtils.createMRWorkForMergingFiles(fileSinkOperator, finalDirName, null, moveTaskList, hiveConf, dummyMRTask, new LineageState()); ConditionalTask conditionalTask = (ConditionalTask)dummyMRTask.getChildTasks().get(0); - Task moveOnlyTask = conditionalTask.getListTasks().get(0); - Task mergeOnlyTask = conditionalTask.getListTasks().get(1); - Task mergeAndMoveTask = conditionalTask.getListTasks().get(2); + Task moveOnlyTask = conditionalTask.getListTasks().get(0); + Task mergeOnlyTask = conditionalTask.getListTasks().get(1); + Task mergeAndMoveTask = conditionalTask.getListTasks().get(2); /* * OPTIMIZATION @@ -228,9 +228,9 @@ public void testConditionalMoveTaskIsNotOptimized() throws SemanticException { GenMapRedUtils.createMRWorkForMergingFiles(fileSinkOperator, finalDirName, null, moveTaskList, hiveConf, dummyMRTask, new LineageState()); ConditionalTask conditionalTask = (ConditionalTask)dummyMRTask.getChildTasks().get(0); - Task moveOnlyTask = conditionalTask.getListTasks().get(0); - Task mergeOnlyTask = conditionalTask.getListTasks().get(1); - Task mergeAndMoveTask = conditionalTask.getListTasks().get(2); + Task moveOnlyTask = conditionalTask.getListTasks().get(0); + Task mergeOnlyTask = conditionalTask.getListTasks().get(1); + Task mergeAndMoveTask = conditionalTask.getListTasks().get(2); // Verify moveOnlyTask is NOT optimized assertEquals(1, moveOnlyTask.getChildTasks().size()); @@ -263,9 +263,9 @@ public void testConditionalMoveOnHdfsIsNotOptimized() throws SemanticException { GenMapRedUtils.createMRWorkForMergingFiles(fileSinkOperator, finalDirName, null, moveTaskList, hiveConf, dummyMRTask, new LineageState()); ConditionalTask conditionalTask = (ConditionalTask)dummyMRTask.getChildTasks().get(0); - Task moveOnlyTask = conditionalTask.getListTasks().get(0); - Task mergeOnlyTask = conditionalTask.getListTasks().get(1); - Task mergeAndMoveTask = conditionalTask.getListTasks().get(2); + Task moveOnlyTask = conditionalTask.getListTasks().get(0); + Task mergeOnlyTask = conditionalTask.getListTasks().get(1); + Task mergeAndMoveTask = conditionalTask.getListTasks().get(2); // Verify moveOnlyTask is NOT optimized assertEquals(1, moveOnlyTask.getChildTasks().size()); @@ -309,7 +309,7 @@ private FileSinkOperator createFileSinkOperator(Path finalDirName) { return moveTask; } - private void verifyMoveTask(Task task, Path source, Path target) { + private void verifyMoveTask(Task task, Path source, Path target) { MoveTask moveTask = (MoveTask)task; assertEquals(source, moveTask.getWork().getLoadFileWork().getSourcePath()); assertEquals(target, moveTask.getWork().getLoadFileWork().getTargetDir()); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java index b5d3b8f55a..888e4efcbc 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java @@ -80,7 +80,7 @@ public void setUp() throws Exception { conf, pctx, Collections.EMPTY_LIST, - new ArrayList>(), + new ArrayList>(), Collections.EMPTY_SET, Collections.EMPTY_SET); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java index 3fc82adcf8..780fb2a58e 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java @@ -58,8 +58,8 @@ public void testResolvingDriverAlias() throws Exception { // joins alias1, alias2, alias3 (alias1 was not eligible for big pos) // Must be deterministic order map for consistent q-test output across Java versions - HashMap, Set> taskToAliases = - new LinkedHashMap, Set>(); + HashMap, Set> taskToAliases = + new LinkedHashMap, Set>(); taskToAliases.put(task1, new HashSet(Arrays.asList("alias2"))); taskToAliases.put(task2, new HashSet(Arrays.asList("alias3"))); @@ -88,4 +88,4 @@ public void testResolvingDriverAlias() throws Exception { resolved = resolver.resolveMapJoinTask(ctx, conf); Assert.assertNull(resolved); } -} \ No newline at end of file +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestReadEntityDirect.java b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestReadEntityDirect.java index 51f610d9eb..bfa0efef77 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestReadEntityDirect.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestReadEntityDirect.java @@ -196,7 +196,7 @@ private static Driver createDriver() { @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { readEntities = context.getInputs(); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java index 97ef3c4de3..78366259b2 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java @@ -45,7 +45,7 @@ @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { readEntities = context.getInputs().toArray(new ReadEntity[0]); } diff --git a/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java b/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java index fa61d3c653..c9a57c53ed 100644 --- a/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java +++ b/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java @@ -366,7 +366,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, @Override public void postAnalyze(HiveSemanticAnalyzerHookContext context, - List> rootTasks) throws SemanticException { + List> rootTasks) throws SemanticException { } }