commit ed441986de3cddf444ad9691e068f9de5bc7e5bf Author: Sahil Takiar Date: Wed Oct 18 17:47:17 2017 -0700 HIVE-17835: HS2 Logs print unnecessary stack trace when HoS query is cancelled diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java index 2ee8c9359d..6ef3462ff5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java @@ -114,6 +114,7 @@ public int execute(DriverContext driverContext) { perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_SUBMIT_JOB); if (driverContext.isShutdown()) { + LOG.warn("Killing Spark job"); killJob(); throw new HiveException("Operation is cancelled."); } @@ -329,7 +330,7 @@ private void killJob() { try { jobRef.cancelJob(); } catch (Exception e) { - LOG.warn("failed to kill job", e); + LOG.warn("Failed to kill Spark job", e); } } } @@ -415,6 +416,7 @@ private void getSparkJobInfo(SparkJobStatus sparkJobStatus, int rc) { if ((error instanceof InterruptedException) || (error instanceof HiveException && error.getCause() instanceof InterruptedException)) { + LOG.info("Killing Spark job since query was interrupted"); killJob(); } setException(error); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java index 37b8363d25..b1ce0b232c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.spark.status.impl.RemoteSparkJobStatus; import org.apache.hadoop.hive.ql.log.PerfLogger; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hive.spark.client.JobHandle; import org.apache.spark.JobExecutionStatus; @@ -180,14 +181,19 @@ public int startMonitor() { if (!done) { Thread.sleep(checkInterval); } - } catch (Exception e) { - String msg = " with exception '" + Utilities.getNameMessage(e) + "'"; - msg = "Failed to monitor Job[" + sparkJobStatus.getJobId() + "]" + msg; - - // Has to use full name to make sure it does not conflict with - // org.apache.commons.lang.StringUtils - LOG.error(msg, e); - console.printError(msg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); + } catch (Exception e) { + if (e instanceof InterruptedException || + (e instanceof HiveException && e.getCause() instanceof InterruptedException)) { + LOG.warn("Interrupted while monitoring the Hive on Spark application, exiting"); + } else { + String msg = " with exception '" + Utilities.getNameMessage(e) + "'"; + msg = "Failed to monitor Job[" + sparkJobStatus.getJobId() + "]" + msg; + + // Has to use full name to make sure it does not conflict with + // org.apache.commons.lang.StringUtils + LOG.error(msg, e); + console.printError(msg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); + } rc = 1; done = true; sparkJobStatus.setError(e);