Index: ql/src/java/org/apache/hadoop/hive/ql/Driver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java (revision 1172015) +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java (working copy) @@ -30,6 +30,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Queue; import java.util.Set; import java.util.concurrent.ConcurrentLinkedQueue; @@ -47,6 +48,7 @@ import org.apache.hadoop.hive.ql.exec.ConditionalTask; import org.apache.hadoop.hive.ql.exec.ExecDriver; import org.apache.hadoop.hive.ql.exec.FetchTask; +import org.apache.hadoop.hive.ql.exec.HadoopJobExecHelper; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.exec.Task; @@ -97,6 +99,7 @@ import org.apache.hadoop.hive.ql.parse.VariableSubstitution; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.processors.CommandProcessor; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; @@ -1147,7 +1150,7 @@ if (exitVal != 0) { if (tsk.ifRetryCmdWhenFail()) { if (running.size() != 0) { - taskCleanup(); + taskCleanup(running); } // in case we decided to run everything in local mode, restore the // the jobtracker setting to its initial value @@ -1186,7 +1189,7 @@ SQLState = "08S01"; console.printError(errorMessage); if (running.size() != 0) { - taskCleanup(); + taskCleanup(running); } // in case we decided to run everything in local mode, restore the // the jobtracker setting to its initial value @@ -1352,12 +1355,21 @@ /** * Cleans up remaining tasks in case of failure */ - - public void taskCleanup() { - // The currently existing Shutdown hooks will be automatically called, - // killing the map-reduce processes. - // The non MR processes will be killed as well. - System.exit(9); + public void taskCleanup(Map running) { + Set> entrySet = running.entrySet(); + for (Entry entry : entrySet) { + TaskRunner value = entry.getValue(); + Task task = value.getTask(); + // for killing spawned map-reduce processes. + if (task.getType().equals(StageType.MAPRED)) { + String jobKillURI = task.getJobKillURI(); + if (jobKillURI != null) { + HadoopJobExecHelper.killRunningJob(jobKillURI); + } + } + // for killing non map-reduce processes. + value.interrupt(); + } } /** Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java (revision 1172015) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java (working copy) @@ -454,6 +454,7 @@ HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, pwd); } + jobKillURI = rj.getTrackingURL() + "&action=kill"; returnVal = jobExecHelper.progress(rj, jc); success = (returnVal == 0); } catch (Exception e) { @@ -999,4 +1000,5 @@ public void logPlanProgress(SessionState ss) throws IOException { ss.getHiveHistory().logPlanProgress(queryPlan); } + } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java (revision 1172015) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java (working copy) @@ -182,24 +182,28 @@ public static void killRunningJobs() { synchronized (runningJobKillURIs) { for (String uri : runningJobKillURIs.values()) { - try { - System.err.println("killing job with: " + uri); - java.net.HttpURLConnection conn = (java.net.HttpURLConnection) new java.net.URL(uri) - .openConnection(); - conn.setRequestMethod("POST"); - int retCode = conn.getResponseCode(); - if (retCode != 200) { - System.err.println("Got an error trying to kill job with URI: " + uri + " = " - + retCode); - } - } catch (Exception e) { - System.err.println("trying to kill job, caught: " + e); - // do nothing - } + killRunningJob(uri); } } } + public static void killRunningJob(String uri) { + try { + System.err.println("killing job with: " + uri); + java.net.HttpURLConnection conn = (java.net.HttpURLConnection) new java.net.URL(uri) + .openConnection(); + conn.setRequestMethod("POST"); + int retCode = conn.getResponseCode(); + if (retCode != 200) { + System.err.println("Got an error trying to kill job with URI: " + uri + " = " + + retCode); + } + } catch (Exception e) { + System.err.println("trying to kill job, caught: " + e); + // do nothing + } + } + public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) { if (ctrs == null) { // hadoop might return null if it cannot locate the job. Index: ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java (revision 1172015) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java (working copy) @@ -64,9 +64,11 @@ protected List> backupChildrenTasks = new ArrayList>(); protected static transient Log LOG = LogFactory.getLog(Task.class); protected int taskTag; + protected String jobKillURI; private boolean isLocalMode =false; private boolean retryCmdWhenFail = false; + public static final int NO_TAG = 0; public static final int COMMON_JOIN = 1; public static final int CONVERTED_MAPJOIN = 2; @@ -498,4 +500,8 @@ public String getJobID() { return jobID; } + + public String getJobKillURI() { + return jobKillURI; + } } Index: ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java (revision 1172015) +++ ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java (working copy) @@ -192,7 +192,7 @@ HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE"); } JobClient jc = new JobClient(job); - + String addedJars = ExecDriver.getResourceFiles(job, SessionState.ResourceType.JAR); if (!addedJars.isEmpty()) { job.set("tmpjars", addedJars); @@ -204,6 +204,7 @@ // Finally SUBMIT the JOB! rj = jc.submitJob(job); + jobKillURI = rj.getTrackingURL() + "&action=kill"; returnVal = jobExecHelper.progress(rj, jc); success = (returnVal == 0); Index: ql/src/test/queries/clientnegative/alter_exit.q =================================================================== --- ql/src/test/queries/clientnegative/alter_exit.q (revision 0) +++ ql/src/test/queries/clientnegative/alter_exit.q (revision 0) @@ -0,0 +1,3 @@ +CREATE TABLE SAMPLETABLE(IP STRING , showtime BIGINT ) partitioned by (ds string,ipz int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\040'; + +ALTER TABLE SAMPLETABLE add Partition(ds='sf') location '/user/hive/warehouse' Partition(ipz=100) location '/user/hive/warehouse'; Index: ql/src/test/results/clientnegative/alter_exit.q.out =================================================================== --- ql/src/test/results/clientnegative/alter_exit.q.out (revision 0) +++ ql/src/test/results/clientnegative/alter_exit.q.out (revision 0) @@ -0,0 +1,11 @@ +PREHOOK: query: CREATE TABLE SAMPLETABLE(IP STRING , showtime BIGINT ) partitioned by (ds string,ipz int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\040' +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE SAMPLETABLE(IP STRING , showtime BIGINT ) partitioned by (ds string,ipz int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\040' +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@SAMPLETABLE +PREHOOK: query: ALTER TABLE SAMPLETABLE add Partition(ds='sf') location '/user/hive/warehouse' Partition(ipz=100) location '/user/hive/warehouse' +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@sampletable +FAILED: Error in metadata: table is partitioned but partition spec is not specified or does not fully match table partitioning: {ds=sf} +FAILED: Error in metadata: table is partitioned but partition spec is not specified or does not fully match table partitioning: {ipz=100} +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask