diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java index 16657729042294fc9e3fe805f00be2cccc1ef27b..286e6ee54e484f1283ad9629733adc33b4449359 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java @@ -442,18 +442,27 @@ public void closeOp(boolean abort) throws HiveException { if (spilled) { for (MapJoinTableContainer tableContainer : mapJoinTables) { - if (tableContainer != null) { - if (tableContainer instanceof HybridHashTableContainer) { - HybridHashTableContainer hybridHtContainer = (HybridHashTableContainer) tableContainer; - hybridHtContainer.dumpStats(); - - HashPartition[] hashPartitions = hybridHtContainer.getHashPartitions(); + if (tableContainer != null && tableContainer instanceof HybridHashTableContainer) { + HybridHashTableContainer hybridHtContainer = (HybridHashTableContainer) tableContainer; + hybridHtContainer.dumpStats(); + + HashPartition[] hashPartitions = hybridHtContainer.getHashPartitions(); + // if abort is true just cleanup the spilled partitions and move on + if (abort) { + for (HashPartition hashPartition : hashPartitions) { + try { + hashPartition.clear(); + } catch (IOException e) { + throw new HiveException(e); + } + } + } else { // Clear all in memory partitions first for (int i = 0; i < hashPartitions.length; i++) { if (!hashPartitions[i].isHashMapOnDisk()) { hybridHtContainer.setTotalInMemRowCount( hybridHtContainer.getTotalInMemRowCount() - - hashPartitions[i].getHashMapFromMemory().getNumValues()); + hashPartitions[i].getHashMapFromMemory().getNumValues()); hashPartitions[i].getHashMapFromMemory().clear(); } } @@ -466,12 +475,8 @@ public void closeOp(boolean abort) throws HiveException { hybridMapJoinLeftover = true; hashMapRowGetters[smallTable] = null; continueProcess(hashPartitions[i], hybridHtContainer); - } catch (IOException e) { - e.printStackTrace(); - } catch (ClassNotFoundException e) { - e.printStackTrace(); - } catch (SerDeException e) { - e.printStackTrace(); + } catch (IOException | ClassNotFoundException | SerDeException e) { + throw new HiveException(e); } } hybridMapJoinLeftover = false; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java index cb9083dd0cde84a342681eca0e5e1656ab20d749..a1ce92958719a953b000bc9eea915ad21b57b2d1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java @@ -180,6 +180,20 @@ public VectorMapJoinRowBytesContainer getMatchfileRowBytesContainer() { public boolean isHashMapOnDisk() { return hashMapOnDisk; } + + public void clear() throws IOException { + if (hashMapLocalPath != null) { + Files.delete(hashMapLocalPath); + } + + if (sidefileKVContainer != null) { + sidefileKVContainer.clear(); + } + + if (matchfileRowBytesContainer != null) { + matchfileRowBytesContainer.clear(); + } + } } public HybridHashTableContainer(Configuration hconf, long keyCount, long memUsage, long tableSize)