diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java index a50ad78e8f..f0cf2f1c30 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java @@ -25,6 +25,7 @@ import java.util.Map; import java.util.Stack; +import org.apache.hadoop.hive.ql.util.NullOrdering; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.conf.HiveConf; @@ -286,7 +287,7 @@ private ReduceSinkOperator genReducesink1(GroupByOperator mGby1, List> distinctColIndices = new ArrayList<>(); rs1.setConf(PlanUtils.getReduceSinkDesc(reduceKeys, 1, reduceValues, distinctColIndices, outputKeyColumnNames, outputValueColumnNames, true, -1, 1, -1, - AcidUtils.Operation.NOT_ACID)); + AcidUtils.Operation.NOT_ACID, NullOrdering.defaultNullOrder(pGraphContext.getConf()))); rs1.setColumnExprMap(colExprMap); rs1.setSchema(new RowSchema(rowSchema)); @@ -449,7 +450,7 @@ private ReduceSinkOperator genReducesink2(GroupByOperator mGby2, ArrayList reduceKeys = new ArrayList<>(); rs2.setConf(PlanUtils.getReduceSinkDesc(reduceKeys, 0, reduceValues, distinctColIndices, outputKeyColumnNames, outputValueColumnNames, false, -1, 0, 1, - AcidUtils.Operation.NOT_ACID)); + AcidUtils.Operation.NOT_ACID, NullOrdering.defaultNullOrder(pGraphContext.getConf()))); rs2.setColumnExprMap(colExprMap); rs2.setSchema(new RowSchema(rowSchema)); return rs2; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java index 0f95d7788c..e0ab6ce349 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java @@ -81,6 +81,7 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFBloomFilter.GenericUDAFBloomFilterEvaluator; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn; +import org.apache.hadoop.hive.ql.util.NullOrdering; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.slf4j.Logger; @@ -714,7 +715,7 @@ private boolean generateSemiJoinOperatorPlan(DynamicListContext ctx, ParseContex // Create the reduce sink operator ReduceSinkDesc rsDesc = PlanUtils.getReduceSinkDesc( new ArrayList(), rsValueCols, gbOutputNames, false, - -1, 0, 1, Operation.NOT_ACID); + -1, 0, 1, Operation.NOT_ACID, NullOrdering.defaultNullOrder(parseContext.getConf())); ReduceSinkOperator rsOp = (ReduceSinkOperator)OperatorFactory.getAndMakeChild( rsDesc, new RowSchema(groupByOp.getSchema()), groupByOp); Map columnExprMap = new HashMap(); @@ -821,7 +822,7 @@ private void createFinalRsForSemiJoinOp( // Create the final Reduce Sink Operator ReduceSinkDesc rsDescFinal = PlanUtils.getReduceSinkDesc( new ArrayList(), rsValueCols, gbOutputNames, false, - -1, 0, 1, Operation.NOT_ACID); + -1, 0, 1, Operation.NOT_ACID, NullOrdering.defaultNullOrder(parseContext.getConf())); ReduceSinkOperator rsOpFinal = (ReduceSinkOperator)OperatorFactory.getAndMakeChild( rsDescFinal, new RowSchema(gb.getSchema()), gb); Map columnExprMap = new HashMap<>(); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java index 268aca6b58..6e09a17398 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hive.ql.plan.PlanUtils; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode; +import org.apache.hadoop.hive.ql.util.NullOrdering; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; @@ -120,6 +121,8 @@ float minReductionHashAggr; private HIVEGBPHYSICALMODE gbPhysicalPipelineMode; + + private NullOrdering defaultNullOrder = NullOrdering.NULLS_LAST; }; private static HIVEGBPHYSICALMODE getAggOPMode(HiveConf hc, GBInfo gbInfo) { @@ -282,6 +285,7 @@ private static GBInfo getGBInfo(HiveAggregate aggRel, OpAttr inputOpAf, HiveConf // 5. Gather GB Physical pipeline (based on user config & Grping Sets size) gbInfo.gbPhysicalPipelineMode = getAggOPMode(hc, gbInfo); + gbInfo.defaultNullOrder = NullOrdering.defaultNullOrder(hc); return gbInfo; } @@ -651,7 +655,7 @@ private static OpAttr genReduceGBRS(OpAttr inputOpAf, GBInfo gbInfo) throws Sema ReduceSinkOperator rsOp = (ReduceSinkOperator) OperatorFactory.getAndMakeChild(PlanUtils .getReduceSinkDesc(reduceKeys, reduceValues, outputColumnNames, true, -1, getNumPartFieldsForReduceSideRS(gbInfo), getParallelismForReduceSideRS(gbInfo), - AcidUtils.Operation.NOT_ACID), new RowSchema(colInfoLst), reduceSideGB1); + AcidUtils.Operation.NOT_ACID, gbInfo.defaultNullOrder), new RowSchema(colInfoLst), reduceSideGB1); rsOp.setColumnExprMap(colExprMap); @@ -690,7 +694,7 @@ private static OpAttr genMapSideGBRS(OpAttr inputOpAf, GBInfo gbInfo) throws Sem ReduceSinkOperator rsOp = (ReduceSinkOperator) OperatorFactory.getAndMakeChild(PlanUtils .getReduceSinkDesc(reduceKeys, keyLength, reduceValues, gbInfo.distColIndices, outputKeyColumnNames, outputValueColumnNames, true, -1, getNumPartFieldsForMapSideRS( - gbInfo), getParallelismForMapSideRS(gbInfo), AcidUtils.Operation.NOT_ACID), + gbInfo), getParallelismForMapSideRS(gbInfo), AcidUtils.Operation.NOT_ACID, gbInfo.defaultNullOrder), new RowSchema(colInfoLst), mapGB); rsOp.setColumnExprMap(colExprMap); @@ -757,7 +761,7 @@ private static OpAttr genMapSideRS(OpAttr inputOpAf, GBInfo gbInfo) throws Seman .getReduceSinkDesc(reduceKeys, keyLength, reduceValues, gbInfo.distColIndices, outputKeyColumnNames, outputValueColumnNames, true, -1, getNumPartFieldsForMapSideRS(gbInfo), - getParallelismForMapSideRS(gbInfo), AcidUtils.Operation.NOT_ACID), new RowSchema( + getParallelismForMapSideRS(gbInfo), AcidUtils.Operation.NOT_ACID, gbInfo.defaultNullOrder), new RowSchema( colInfoLst), inputOpAf.inputs.get(0)); rsOp.setColumnExprMap(colExprMap); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java index c11ed59012..ee8c9015fc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java @@ -98,6 +98,7 @@ import org.apache.hadoop.hive.ql.plan.SelectDesc; import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.ql.plan.UnionDesc; +import org.apache.hadoop.hive.ql.util.NullOrdering; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -848,12 +849,13 @@ private static ReduceSinkOperator genReduceSink(Operator input, String tableA } ReduceSinkDesc rsDesc; + NullOrdering defaultNullOrder = NullOrdering.defaultNullOrder(hiveConf); if (order.isEmpty()) { rsDesc = PlanUtils.getReduceSinkDesc(reduceKeys, reduceValues, outputColumnNames, false, tag, - reduceKeys.size(), numReducers, acidOperation); + reduceKeys.size(), numReducers, acidOperation, defaultNullOrder); } else { rsDesc = PlanUtils.getReduceSinkDesc(reduceKeys, reduceValues, outputColumnNames, false, tag, - partitionCols, order, nullOrder, numReducers, acidOperation); + partitionCols, order, nullOrder, defaultNullOrder, numReducers, acidOperation); } ReduceSinkOperator rsOp = (ReduceSinkOperator) OperatorFactory.getAndMakeChild( diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index e9b035d3b4..6944502ccd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -408,6 +408,8 @@ private String invalidResultCacheReason; private String invalidAutomaticRewritingMaterializationReason; + private final NullOrdering defaultNullOrder; + private static final CommonToken SELECTDI_TOKEN = new ImmutableCommonToken(HiveParser.TOK_SELECTDI, "TOK_SELECTDI"); private static final CommonToken SELEXPR_TOKEN = @@ -464,6 +466,7 @@ public SemanticAnalyzer(QueryState queryState) throws SemanticException { tabNameToTabObject = new HashMap<>(); defaultJoinMerge = !HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_MERGE_NWAY_JOINS); disableJoinMerge = defaultJoinMerge; + defaultNullOrder = NullOrdering.defaultNullOrder(conf); } @Override @@ -5626,7 +5629,7 @@ private ReduceSinkOperator genGroupByPlanReduceSinkOperator(QB qb, groupingSetsPresent ? keyLength + 1 : keyLength, reduceValues, distinctColIndices, outputKeyColumnNames, outputValueColumnNames, true, -1, numPartitionFields, - numReducers, AcidUtils.Operation.NOT_ACID), + numReducers, AcidUtils.Operation.NOT_ACID, defaultNullOrder), new RowSchema(reduceSinkOutputRowResolver.getColumnInfos()), inputOperatorInfo), reduceSinkOutputRowResolver); rsOp.setColumnExprMap(colExprMap); @@ -5829,7 +5832,7 @@ private ReduceSinkOperator genCommonGroupByPlanReduceSinkOperator(QB qb, List input, List partiti dummy.setParentOperators(null); ReduceSinkDesc rsdesc = PlanUtils.getReduceSinkDesc(newSortCols, valueCols, outputColumns, - false, -1, partitionCols, newSortOrder.toString(), newNullOrder.toString(), + false, -1, partitionCols, newSortOrder.toString(), newNullOrder.toString(), defaultNullOrder, numReducers, acidOp); Operator interim = putOpInsertMap(OperatorFactory.getAndMakeChild(rsdesc, new RowSchema(rsRR.getColumnInfos()), input), rsRR); @@ -9280,7 +9283,7 @@ private Operator genJoinReduceSinkChild(ExprNodeDesc[] joinKeys, ReduceSinkDesc rsDesc = PlanUtils.getReduceSinkDesc(reduceKeys, reduceValues, outputColumns, false, tag, - reduceKeys.size(), numReds, AcidUtils.Operation.NOT_ACID); + reduceKeys.size(), numReds, AcidUtils.Operation.NOT_ACID, defaultNullOrder); ReduceSinkOperator rsOp = (ReduceSinkOperator) putOpInsertMap( OperatorFactory.getAndMakeChild(rsDesc, new RowSchema(outputRR.getColumnInfos()), diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java index e20f6956b2..701382b587 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java @@ -65,6 +65,7 @@ import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory; import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.ql.util.NullOrdering; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.AbstractSerDe; import org.apache.hadoop.hive.serde2.Deserializer; @@ -707,7 +708,7 @@ public int compare(FieldSchema o1, FieldSchema o2) { public static ReduceSinkDesc getReduceSinkDesc( List keyCols, List valueCols, List outputColumnNames, boolean includeKeyCols, int tag, - List partitionCols, String order, String nullOrder, + List partitionCols, String order, String nullOrder, NullOrdering defaultNullOrder, int numReducers, AcidUtils.Operation writeType) { return getReduceSinkDesc(keyCols, keyCols.size(), valueCols, new ArrayList>(), @@ -715,7 +716,7 @@ public static ReduceSinkDesc getReduceSinkDesc( new ArrayList(), includeKeyCols ? outputColumnNames.subList(keyCols.size(), outputColumnNames.size()) : outputColumnNames, - includeKeyCols, tag, partitionCols, order, nullOrder, numReducers, writeType); + includeKeyCols, tag, partitionCols, order, nullOrder, defaultNullOrder, numReducers, writeType); } /** @@ -752,7 +753,7 @@ public static ReduceSinkDesc getReduceSinkDesc( List outputKeyColumnNames, List outputValueColumnNames, boolean includeKeyCols, int tag, - List partitionCols, String order, String nullOrder, + List partitionCols, String order, String nullOrder, NullOrdering defaultNullOrder, int numReducers, AcidUtils.Operation writeType) { TableDesc keyTable = null; TableDesc valueTable = null; @@ -765,7 +766,7 @@ public static ReduceSinkDesc getReduceSinkDesc( order = order + "+"; } if (nullOrder.length() < outputKeyColumnNames.size()) { - nullOrder = nullOrder + "a"; + nullOrder = nullOrder + defaultNullOrder.getSign(); } keyTable = getReduceKeyTableDesc(keySchema, order, nullOrder); outputKeyCols.addAll(outputKeyColumnNames); @@ -809,7 +810,8 @@ public static ReduceSinkDesc getReduceSinkDesc( public static ReduceSinkDesc getReduceSinkDesc( List keyCols, List valueCols, List outputColumnNames, boolean includeKey, int tag, - int numPartitionFields, int numReducers, AcidUtils.Operation writeType) + int numPartitionFields, int numReducers, AcidUtils.Operation writeType, + NullOrdering defaultNullOrder) throws SemanticException { return getReduceSinkDesc(keyCols, keyCols.size(), valueCols, new ArrayList>(), @@ -818,7 +820,7 @@ public static ReduceSinkDesc getReduceSinkDesc( includeKey ? outputColumnNames.subList(keyCols.size(), outputColumnNames.size()) : outputColumnNames, - includeKey, tag, numPartitionFields, numReducers, writeType); + includeKey, tag, numPartitionFields, numReducers, writeType, defaultNullOrder); } /** @@ -853,7 +855,8 @@ public static ReduceSinkDesc getReduceSinkDesc( List> distinctColIndices, List outputKeyColumnNames, List outputValueColumnNames, boolean includeKey, int tag, - int numPartitionFields, int numReducers, AcidUtils.Operation writeType) + int numPartitionFields, int numReducers, AcidUtils.Operation writeType, + NullOrdering defaultNullOrder) throws SemanticException { ArrayList partitionCols = new ArrayList(); @@ -870,11 +873,11 @@ public static ReduceSinkDesc getReduceSinkDesc( StringBuilder nullOrder = new StringBuilder(); for (int i = 0; i < keyCols.size(); i++) { order.append("+"); - nullOrder.append("a"); + nullOrder.append(defaultNullOrder.getSign()); } return getReduceSinkDesc(keyCols, numKeys, valueCols, distinctColIndices, outputKeyColumnNames, outputValueColumnNames, includeKey, tag, - partitionCols, order.toString(), nullOrder.toString(), numReducers, writeType); + partitionCols, order.toString(), nullOrder.toString(), defaultNullOrder, numReducers, writeType); } /** diff --git ql/src/java/org/apache/hadoop/hive/ql/util/NullOrdering.java ql/src/java/org/apache/hadoop/hive/ql/util/NullOrdering.java index 6bf1db272a..f46a9a6268 100644 --- ql/src/java/org/apache/hadoop/hive/ql/util/NullOrdering.java +++ ql/src/java/org/apache/hadoop/hive/ql/util/NullOrdering.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.util; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.NullValueOption; @@ -24,18 +25,20 @@ * Enum for converting different Null ordering description types. */ public enum NullOrdering { - NULLS_FIRST(1, HiveParser.TOK_NULLS_FIRST, NullValueOption.MAXVALUE), - NULLS_LAST(0, HiveParser.TOK_NULLS_LAST, NullValueOption.MINVALUE); + NULLS_FIRST(1, HiveParser.TOK_NULLS_FIRST, NullValueOption.MAXVALUE, 'a'), + NULLS_LAST(0, HiveParser.TOK_NULLS_LAST, NullValueOption.MINVALUE, 'z'); - NullOrdering(int code, int token, NullValueOption nullValueOption) { + NullOrdering(int code, int token, NullValueOption nullValueOption, char sign) { this.code = code; this.token = token; this.nullValueOption = nullValueOption; + this.sign = sign; } private final int code; private final int token; private final NullValueOption nullValueOption; + private final char sign; public static NullOrdering fromToken(int token) { for (NullOrdering nullOrdering : NullOrdering.values()) { @@ -55,6 +58,11 @@ public static NullOrdering fromCode(int code) { throw new EnumConstantNotPresentException(NullOrdering.class, "No enum constant present with code " + code); } + public static NullOrdering defaultNullOrder(HiveConf hiveConf) { + return HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_DEFAULT_NULLS_LAST) ? + NullOrdering.NULLS_LAST : NullOrdering.NULLS_FIRST; + } + public int getCode() { return code; } @@ -66,4 +74,8 @@ public int getToken() { public NullValueOption getNullValueOption() { return nullValueOption; } + + public char getSign() { + return sign; + } } diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java index a78fdfc394..3507ce9184 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.ql.util.NullOrdering; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FSDataInputStream; @@ -252,7 +253,7 @@ private void populateMapRedPlan1(Table src) throws SemanticException { Operator op1 = OperatorFactory.get(ctx, PlanUtils .getReduceSinkDesc(Utilities.makeList(getStringColumn("key")), Utilities.makeList(getStringColumn("value")), outputColumns, true, - -1, 1, -1, AcidUtils.Operation.NOT_ACID)); + -1, 1, -1, AcidUtils.Operation.NOT_ACID, NullOrdering.NULLS_LAST)); addMapWork(mr, src, "a", op1); ReduceWork rWork = new ReduceWork(); @@ -285,7 +286,7 @@ private void populateMapRedPlan2(Table src) throws Exception { .getReduceSinkDesc(Utilities.makeList(getStringColumn("key")), Utilities .makeList(getStringColumn("key"), getStringColumn("value")), - outputColumns, false, -1, 1, -1, AcidUtils.Operation.NOT_ACID)); + outputColumns, false, -1, 1, -1, AcidUtils.Operation.NOT_ACID, NullOrdering.NULLS_LAST)); addMapWork(mr, src, "a", op1); ReduceWork rWork = new ReduceWork(); @@ -321,14 +322,14 @@ private void populateMapRedPlan3(Table src, Table src2) throws SemanticException Operator op1 = OperatorFactory.get(ctx, PlanUtils .getReduceSinkDesc(Utilities.makeList(getStringColumn("key")), Utilities.makeList(getStringColumn("value")), outputColumns, true, - Byte.valueOf((byte) 0), 1, -1, AcidUtils.Operation.NOT_ACID)); + Byte.valueOf((byte) 0), 1, -1, AcidUtils.Operation.NOT_ACID, NullOrdering.NULLS_LAST)); addMapWork(mr, src, "a", op1); Operator op2 = OperatorFactory.get(ctx, PlanUtils .getReduceSinkDesc(Utilities.makeList(getStringColumn("key")), Utilities.makeList(getStringColumn("key")), outputColumns, true, - Byte.valueOf((byte) 1), Integer.MAX_VALUE, -1, AcidUtils.Operation.NOT_ACID)); + Byte.valueOf((byte) 1), Integer.MAX_VALUE, -1, AcidUtils.Operation.NOT_ACID, NullOrdering.NULLS_LAST)); addMapWork(mr, src2, "b", op2); ReduceWork rWork = new ReduceWork(); @@ -364,7 +365,8 @@ private void populateMapRedPlan4(Table src) throws SemanticException { Operator op1 = OperatorFactory.get(ctx, PlanUtils .getReduceSinkDesc(Utilities.makeList(getStringColumn("tkey")), Utilities.makeList(getStringColumn("tkey"), - getStringColumn("tvalue")), outputColumns, false, -1, 1, -1, AcidUtils.Operation.NOT_ACID)); + getStringColumn("tvalue")), outputColumns, false, -1, 1, -1, + AcidUtils.Operation.NOT_ACID, NullOrdering.NULLS_LAST)); Operator op0 = OperatorFactory.get(new ScriptDesc("cat", PlanUtils.getDefaultTableDesc("" + Utilities.tabCode, "key,value"), @@ -409,7 +411,7 @@ private void populateMapRedPlan5(Table src) throws SemanticException { Operator op0 = OperatorFactory.get(ctx, PlanUtils .getReduceSinkDesc(Utilities.makeList(getStringColumn("0")), Utilities .makeList(getStringColumn("0"), getStringColumn("1")), - outputColumns, false, -1, 1, -1, AcidUtils.Operation.NOT_ACID)); + outputColumns, false, -1, 1, -1, AcidUtils.Operation.NOT_ACID, NullOrdering.NULLS_LAST)); Operator op4 = OperatorFactory.get(new SelectDesc(Utilities .makeList(getStringColumn("key"), getStringColumn("value")), @@ -444,7 +446,8 @@ private void populateMapRedPlan6(Table src) throws Exception { Operator op1 = OperatorFactory.get(ctx, PlanUtils .getReduceSinkDesc(Utilities.makeList(getStringColumn("tkey")), Utilities.makeList(getStringColumn("tkey"), - getStringColumn("tvalue")), outputColumns, false, -1, 1, -1, AcidUtils.Operation.NOT_ACID)); + getStringColumn("tvalue")), outputColumns, false, -1, 1, -1, + AcidUtils.Operation.NOT_ACID, NullOrdering.NULLS_LAST)); Operator op0 = OperatorFactory.get(new ScriptDesc( "\'cat\'", PlanUtils.getDefaultTableDesc("" + Utilities.tabCode,