commit 4b7f6c51966ff8412b169b685dfaf1cbd476e50a Author: kellyzly Date: Mon Jul 3 22:24:11 2017 -0400 HIVE-17010.1.patch diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index c70e1e0..cda0a4f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -2920,7 +2920,7 @@ public static int estimateNumberOfReducers(HiveConf conf, ContentSummary inputSu return estimateReducers(totalInputFileSize, bytesPerReducer, maxReducers, powersOfTwo); } - public static int estimateReducers(long totalInputFileSize, long bytesPerReducer, + public static int estimateReducers(double totalInputFileSize, long bytesPerReducer, int maxReducers, boolean powersOfTwo) { double bytes = Math.max(totalInputFileSize, bytesPerReducer); int reducers = (int) Math.ceil(bytes / bytesPerReducer); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SetSparkReducerParallelism.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SetSparkReducerParallelism.java index 4924df7..16444f2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SetSparkReducerParallelism.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SetSparkReducerParallelism.java @@ -119,7 +119,7 @@ public Object process(Node nd, Stack stack, } } - long numberOfBytes = 0; + double numberOfBytes = 0; if (useOpStats) { // we need to add up all the estimates from the siblings of this reduce sink