Index: ql/src/java/org/apache/hadoop/hive/ql/Driver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java (revision 1197183) +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java (working copy) @@ -30,6 +30,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Queue; import java.util.Set; import java.util.concurrent.ConcurrentLinkedQueue; @@ -47,6 +48,7 @@ import org.apache.hadoop.hive.ql.exec.ConditionalTask; import org.apache.hadoop.hive.ql.exec.ExecDriver; import org.apache.hadoop.hive.ql.exec.FetchTask; +import org.apache.hadoop.hive.ql.exec.HadoopJobExecHelper; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.exec.Task; @@ -97,6 +99,7 @@ import org.apache.hadoop.hive.ql.parse.VariableSubstitution; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.processors.CommandProcessor; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; @@ -1128,7 +1131,7 @@ if (exitVal != 0) { if (tsk.ifRetryCmdWhenFail()) { if (running.size() != 0) { - taskCleanup(); + taskCleanup(running); } // in case we decided to run everything in local mode, restore the // the jobtracker setting to its initial value @@ -1167,7 +1170,7 @@ SQLState = "08S01"; console.printError(errorMessage); if (running.size() != 0) { - taskCleanup(); + taskCleanup(running); } // in case we decided to run everything in local mode, restore the // the jobtracker setting to its initial value @@ -1333,12 +1336,21 @@ /** * Cleans up remaining tasks in case of failure */ - - public void taskCleanup() { - // The currently existing Shutdown hooks will be automatically called, - // killing the map-reduce processes. - // The non MR processes will be killed as well. - System.exit(9); + public void taskCleanup(Map running) { + Set> entrySet = running.entrySet(); + for (Entry entry : entrySet) { + TaskRunner value = entry.getValue(); + Task task = value.getTask(); + // for killing spawned map-reduce processes. + if (task.getType().equals(StageType.MAPRED)) { + String jobKillURI = task.getJobKillURI(); + if (jobKillURI != null) { + HadoopJobExecHelper.killRunningJob(jobKillURI); + } + } + // for killing non map-reduce processes. + value.interrupt(); + } } /** Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java (revision 1197183) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java (working copy) @@ -455,6 +455,7 @@ HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, pwd); } + jobKillURI = rj.getTrackingURL() + "&action=kill"; returnVal = jobExecHelper.progress(rj, jc); success = (returnVal == 0); } catch (Exception e) { @@ -774,7 +775,7 @@ public Collection> getTopOperators() { return getWork().getAliasToWork().values(); } - + @Override public boolean hasReduce() { MapredWork w = getWork(); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java (revision 1197183) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java (working copy) @@ -180,24 +180,28 @@ public static void killRunningJobs() { synchronized (runningJobKillURIs) { for (String uri : runningJobKillURIs.values()) { - try { - System.err.println("killing job with: " + uri); - java.net.HttpURLConnection conn = (java.net.HttpURLConnection) new java.net.URL(uri) - .openConnection(); - conn.setRequestMethod("POST"); - int retCode = conn.getResponseCode(); - if (retCode != 200) { - System.err.println("Got an error trying to kill job with URI: " + uri + " = " - + retCode); - } - } catch (Exception e) { - System.err.println("trying to kill job, caught: " + e); - // do nothing - } + killRunningJob(uri); } } } + public static void killRunningJob(String uri) { + try { + System.err.println("killing job with: " + uri); + java.net.HttpURLConnection conn = (java.net.HttpURLConnection) new java.net.URL(uri) + .openConnection(); + conn.setRequestMethod("POST"); + int retCode = conn.getResponseCode(); + if (retCode != 200) { + System.err.println("Got an error trying to kill job with URI: " + uri + " = " + + retCode); + } + } catch (Exception e) { + System.err.println("trying to kill job, caught: " + e); + // do nothing + } + } + public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) { if (ctrs == null) { // hadoop might return null if it cannot locate the job. @@ -310,7 +314,7 @@ errMsg.setLength(0); updateCounters(ctrs, rj); - + // Prepare data for Client Stat Publishers (if any present) and execute them if (clientStatPublishers.size() > 0 && ctrs != null) { Map exctractedCounters = extractAllCounterValues(ctrs); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java (revision 1197183) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java (working copy) @@ -65,6 +65,7 @@ protected List> backupChildrenTasks = new ArrayList>(); protected static transient Log LOG = LogFactory.getLog(Task.class); protected int taskTag; + protected String jobKillURI; private boolean isLocalMode =false; private boolean retryCmdWhenFail = false; @@ -352,7 +353,7 @@ public Collection> getTopOperators() { return new LinkedList>(); } - + public boolean hasReduce() { return false; } @@ -360,7 +361,7 @@ public Operator getReducer() { return null; } - + public HashMap getCounters() { return taskCounters; } @@ -507,4 +508,8 @@ public String getJobID() { return jobID; } + + public String getJobKillURI() { + return jobKillURI; + } } Index: ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java (revision 1197183) +++ ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java (working copy) @@ -192,7 +192,7 @@ HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE"); } JobClient jc = new JobClient(job); - + String addedJars = ExecDriver.getResourceFiles(job, SessionState.ResourceType.JAR); if (!addedJars.isEmpty()) { job.set("tmpjars", addedJars); @@ -204,6 +204,7 @@ // Finally SUBMIT the JOB! rj = jc.submitJob(job); + jobKillURI = rj.getTrackingURL() + "&action=kill"; returnVal = jobExecHelper.progress(rj, jc); success = (returnVal == 0); Index: ql/src/test/queries/clientnegative/alter_exit.q =================================================================== --- ql/src/test/queries/clientnegative/alter_exit.q (revision 0) +++ ql/src/test/queries/clientnegative/alter_exit.q (revision 0) @@ -0,0 +1,3 @@ +CREATE TABLE SAMPLETABLE(IP STRING , showtime BIGINT ) partitioned by (ds string,ipz int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\040'; + +ALTER TABLE SAMPLETABLE add Partition(ds='sf') location '/user/hive/warehouse' Partition(ipz=100) location '/user/hive/warehouse'; Index: ql/src/test/results/clientnegative/alter_exit.q.out =================================================================== --- ql/src/test/results/clientnegative/alter_exit.q.out (revision 0) +++ ql/src/test/results/clientnegative/alter_exit.q.out (revision 0) @@ -0,0 +1,11 @@ +PREHOOK: query: CREATE TABLE SAMPLETABLE(IP STRING , showtime BIGINT ) partitioned by (ds string,ipz int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\040' +PREHOOK: type: CREATETABLE +POSTHOOK: query: CREATE TABLE SAMPLETABLE(IP STRING , showtime BIGINT ) partitioned by (ds string,ipz int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\040' +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@SAMPLETABLE +PREHOOK: query: ALTER TABLE SAMPLETABLE add Partition(ds='sf') location '/user/hive/warehouse' Partition(ipz=100) location '/user/hive/warehouse' +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@sampletable +FAILED: Error in metadata: table is partitioned but partition spec is not specified or does not fully match table partitioning: {ds=sf} +FAILED: Error in metadata: table is partitioned but partition spec is not specified or does not fully match table partitioning: {ipz=100} +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask