diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 32bfcf5..6392152 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -525,8 +525,7 @@ protected void createBucketFiles(FSPaths fsp) throws HiveException { int numReducers = totalFiles / numFiles; if (numReducers > 1) { - int currReducer = Integer.valueOf(Utilities.getTaskIdFromFilename(Utilities - .getTaskId(hconf))); + int currReducer = Integer.valueOf(Utilities.getTaskIdFromFilename(taskId)); int reducerIdx = prtner.getPartition(key, null, numReducers); if (currReducer != reducerIdx) { @@ -541,7 +540,7 @@ protected void createBucketFiles(FSPaths fsp) throws HiveException { seenBuckets.add(bucketNum); bucketMap.put(bucketNum, filesIdx); - taskId = Utilities.replaceTaskIdFromFilename(Utilities.getTaskId(hconf), bucketNum); + taskId = Utilities.replaceTaskIdFromFilename(taskId, bucketNum); } createBucketForFileIdx(fsp, filesIdx); filesIdx++; @@ -900,7 +899,7 @@ protected FSPaths getDynOutPaths(List row, String lbDirName) throws Hive pathKey = dpDir; if(conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED)) { String buckNum = row.get(row.size() - 1); - taskId = Utilities.replaceTaskIdFromFilename(Utilities.getTaskId(hconf), buckNum); + taskId = Utilities.replaceTaskIdFromFilename(taskId, buckNum); pathKey = appendToSource(taskId, dpDir); } FSPaths fsp2 = valToPaths.get(pathKey); @@ -1145,7 +1144,6 @@ private void publishStats() throws HiveException { return; } - String taskID = Utilities.getTaskIdFromFilename(Utilities.getTaskId(hconf)); String spSpec = conf.getStaticSpec(); for (Map.Entry entry : valToPaths.entrySet()) { @@ -1155,7 +1153,7 @@ private void publishStats() throws HiveException { // for bucketed tables, hive.optimize.sort.dynamic.partition optimization // adds the taskId to the fspKey. if (conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED)) { - taskID = Utilities.getTaskIdFromFilename(fspKey); + String taskID = Utilities.getTaskIdFromFilename(fspKey); // if length of (prefix/ds=__HIVE_DEFAULT_PARTITION__/000000_0) is greater than max key prefix // and if (prefix/ds=10/000000_0) is less than max key prefix, then former will get hashed // to a smaller prefix (MD5hash/000000_0) and later will stored as such in staging stats table.