diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java index b9077d1..73a1f8e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java @@ -333,13 +333,21 @@ private void launchCompactionJob(JobConf job, Path baseDir, CompactionType compa job.getJobName() + "' to " + job.getQueueName() + " queue. " + "(current delta dirs count=" + curDirNumber + ", obsolete delta dirs count=" + obsoleteDirNumber + ". TxnIdRange[" + minTxn + "," + maxTxn + "]"); - RunningJob rj = new JobClient(job).submitJob(job); - LOG.info("Submitted compaction job '" + job.getJobName() + "' with jobID=" + rj.getID() + " compaction ID=" + id); - txnHandler.setHadoopJobId(rj.getID().toString(), id); - rj.waitForCompletion(); - if (!rj.isSuccessful()) { - throw new IOException(compactionType == CompactionType.MAJOR ? "Major" : "Minor" + - " compactor job failed for " + jobName + "! Hadoop JobId: " + rj.getID() ); + JobClient jc = null; + try { + jc = new JobClient(job); + RunningJob rj = jc.submitJob(job); + LOG.info("Submitted compaction job '" + job.getJobName() + "' with jobID=" + rj.getID() + " compaction ID=" + id); + txnHandler.setHadoopJobId(rj.getID().toString(), id); + rj.waitForCompletion(); + if (!rj.isSuccessful()) { + throw new IOException(compactionType == CompactionType.MAJOR ? "Major" : "Minor" + + " compactor job failed for " + jobName + "! Hadoop JobId: " + rj.getID()); + } + } finally { + if (jc!=null) { + jc.close(); + } } } /**