commit bdde692a5054cf52808e5fe5f6b0c21ca536ec94 Author: Ivan Suller Date: Fri Jul 5 15:18:12 2019 +0200 HIVE-21962 Change-Id: I0220f26d5f196ed839712c86212d3c43e8ed1390 diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java index d5f51bfc9c..697d3b2374 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java @@ -575,7 +575,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, ReduceSinkDesc conf = op.getConf(); List colLists = new ArrayList<>(); - ArrayList keys = conf.getKeyCols(); + List keys = conf.getKeyCols(); LOG.debug("Reduce Sink Operator " + op.getIdentifier() + " key:" + keys); for (ExprNodeDesc key : keys) { colLists = mergeFieldNodesWithDesc(colLists, key); @@ -874,8 +874,7 @@ private void handleChildren(SelectOperator op, private static boolean[] getPruneReduceSinkOpRetainFlags( List retainedParentOpOutputCols, ReduceSinkOperator reduce) { ReduceSinkDesc reduceConf = reduce.getConf(); - java.util.ArrayList originalValueEval = reduceConf - .getValueCols(); + List originalValueEval = reduceConf.getValueCols(); boolean[] flags = new boolean[originalValueEval.size()]; for (int i = 0; i < originalValueEval.size(); i++) { flags[i] = false; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java index b4cc76ac49..de61be83b0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java @@ -639,7 +639,7 @@ private boolean convertJoinBucketMapJoin(JoinOperator joinOp, OptimizeTezProcCon ReduceSinkOperator bigTableRS = (ReduceSinkOperator)joinOp.getParentOperators().get(bigTablePosition); OpTraits opTraits = bigTableRS.getOpTraits(); List> listBucketCols = opTraits.getBucketColNames(); - ArrayList bigTablePartitionCols = bigTableRS.getConf().getPartitionCols(); + List bigTablePartitionCols = bigTableRS.getConf().getPartitionCols(); boolean updatePartitionCols = false; List positions = new ArrayList<>(); @@ -691,8 +691,8 @@ private boolean convertJoinBucketMapJoin(JoinOperator joinOp, OptimizeTezProcCon } ReduceSinkOperator rsOp = (ReduceSinkOperator) op; - ArrayList newPartitionCols = new ArrayList<>(); - ArrayList partitionCols = rsOp.getConf().getPartitionCols(); + List newPartitionCols = new ArrayList<>(); + List partitionCols = rsOp.getConf().getPartitionCols(); for (Integer position : positions) { newPartitionCols.add(partitionCols.get(position)); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java index 81684be9c4..89b55001f0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java @@ -177,7 +177,7 @@ public static Object processReduceSinkToHashJoin(ReduceSinkOperator parentRS, Ma keyCount = rowCount = Long.MAX_VALUE; } tableSize = stats.getDataSize(); - ArrayList keyCols = parentRS.getConf().getOutputKeyColumnNames(); + List keyCols = parentRS.getConf().getOutputKeyColumnNames(); if (keyCols != null && !keyCols.isEmpty()) { // See if we can arrive at a smaller number using distinct stats from key columns. long maxKeyCount = 1; @@ -334,7 +334,6 @@ public static Object processReduceSinkToHashJoin(ReduceSinkOperator parentRS, Ma // create an new operator: HashTableDummyOperator, which share the table desc HashTableDummyDesc desc = new HashTableDummyDesc(); - @SuppressWarnings("unchecked") HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get( parentRS.getCompilationOpContext(), desc); TableDesc tbl; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplicationUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplicationUtils.java index 6919da8586..28ddecca9a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplicationUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplicationUtils.java @@ -144,7 +144,7 @@ public static boolean merge(ReduceSinkOperator cRS, ReduceSinkOperator pRS, int if (parentPCs == null || parentPCs.isEmpty()) { // If partitioning columns of the parent RS are not assigned, // assign partitioning columns of the child RS to the parent RS. - ArrayList childPCs = cRS.getConf().getPartitionCols(); + List childPCs = cRS.getConf().getPartitionCols(); pRS.getConf().setPartitionCols(ExprNodeDescUtils.backtrack(childPCs, cRS, pRS)); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java index 825ece6a18..119baea95f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java @@ -609,7 +609,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, RowSchema schema = rop.getSchema(); ReduceSinkDesc desc = rop.getConf(); List keyCols = desc.getKeyCols(); - ArrayList keyColNames = desc.getOutputKeyColumnNames(); + List keyColNames = desc.getOutputKeyColumnNames(); for (int i = 0; i < keyCols.size(); i++) { // order-bys, joins ColumnInfo column = schema.getColumnInfo(Utilities.ReduceField.KEY + "." + keyColNames.get(i)); @@ -620,7 +620,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, ExprProcFactory.getExprDependency(lCtx, inpOp, keyCols.get(i), outputMap)); } List valCols = desc.getValueCols(); - ArrayList valColNames = desc.getOutputValueColumnNames(); + List valColNames = desc.getOutputValueColumnNames(); for (int i = 0; i < valCols.size(); i++) { // todo: currently, bucketing,etc. makes RS differently with those for order-bys or joins ColumnInfo column = schema.getColumnInfo(valColNames.get(i)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java index e7776e595c..01fb734f73 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java @@ -545,7 +545,7 @@ static void extractTraits(BucketingSortingCtx bctx, ReduceSinkOperator rop, Oper static List extractSortCols(ReduceSinkOperator rop, List outputValues) { String sortOrder = rop.getConf().getOrder(); List sortCols = new ArrayList(); - ArrayList keyCols = rop.getConf().getKeyCols(); + List keyCols = rop.getConf().getKeyCols(); for (int i = 0; i < keyCols.size(); i++) { ExprNodeDesc keyCol = keyCols.get(i); if (!(keyCol instanceof ExprNodeColumnDesc)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java index 52e8dcb090..8e45c6502e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java @@ -26,6 +26,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.IdentityHashMap; @@ -1375,7 +1376,7 @@ private boolean verifyAndSetVectorPartDesc( List allTypeInfoList, Set inputFileFormatClassNameSet, Map vectorPartitionDescMap, - Set enabledConditionsMetSet, ArrayList enabledConditionsNotMetList, + Set enabledConditionsMetSet, List enabledConditionsNotMetList, Set newSupportSet, List dataTypeInfoList) { Class inputFileFormatClass = pd.getInputFileFormatClass(); @@ -1672,12 +1673,12 @@ private boolean hasUnsupportedVectorizedParquetDataType( private void setValidateInputFormatAndSchemaEvolutionExplain(MapWork mapWork, Set inputFileFormatClassNameSet, Map vectorPartitionDescMap, - Set enabledConditionsMetSet, ArrayList enabledConditionsNotMetList) { + Collection enabledConditionsMetSet, Collection enabledConditionsNotMetList) { mapWork.setVectorizationInputFileFormatClassNameSet(inputFileFormatClassNameSet); ArrayList vectorPartitionDescList = new ArrayList(); vectorPartitionDescList.addAll(vectorPartitionDescMap.keySet()); mapWork.setVectorPartitionDescList(vectorPartitionDescList); - mapWork.setVectorizationEnabledConditionsMet(new ArrayList(enabledConditionsMetSet)); + mapWork.setVectorizationEnabledConditionsMet(enabledConditionsMetSet); mapWork.setVectorizationEnabledConditionsNotMet(enabledConditionsNotMetList); } @@ -1721,7 +1722,7 @@ private void setValidateInputFormatAndSchemaEvolutionExplain(MapWork mapWork, Map vectorPartitionDescMap = new LinkedHashMap(); Set enabledConditionsMetSet = new HashSet(); - ArrayList enabledConditionsNotMetList = new ArrayList(); + List enabledConditionsNotMetList = new ArrayList(); Set inputFormatSupportSet = new TreeSet(); boolean outsideLoopIsFirstPartition = true; @@ -1933,7 +1934,7 @@ private void setValidateInputFormatAndSchemaEvolutionExplain(MapWork mapWork, ArrayList vectorPartitionDescList = new ArrayList(); vectorPartitionDescList.addAll(vectorPartitionDescMap.keySet()); mapWork.setVectorPartitionDescList(vectorPartitionDescList); - mapWork.setVectorizationEnabledConditionsMet(new ArrayList(enabledConditionsMetSet)); + mapWork.setVectorizationEnabledConditionsMet(enabledConditionsMetSet); mapWork.setVectorizationEnabledConditionsNotMet(enabledConditionsNotMetList); return new ImmutablePair(true, false); @@ -1950,7 +1951,7 @@ private void validateAndVectorizeMapWork(MapWork mapWork, VectorTaskColumnInfo v if (onlyOneTableScanPair == null) { VectorizerReason notVectorizedReason = currentBaseWork.getNotVectorizedReason(); Preconditions.checkState(notVectorizedReason != null); - mapWork.setVectorizationEnabledConditionsNotMet(Arrays.asList(new String[] {notVectorizedReason.toString()})); + mapWork.setVectorizationEnabledConditionsNotMet(Collections.singleton(notVectorizedReason.toString())); return; } String alias = onlyOneTableScanPair.left; @@ -1967,7 +1968,7 @@ private void validateAndVectorizeMapWork(MapWork mapWork, VectorTaskColumnInfo v if (!validateInputFormatAndSchemaEvolutionPair.right) { VectorizerReason notVectorizedReason = currentBaseWork.getNotVectorizedReason(); Preconditions.checkState(notVectorizedReason != null); - mapWork.setVectorizationEnabledConditionsNotMet(Arrays.asList(new String[] {notVectorizedReason.toString()})); + mapWork.setVectorizationEnabledConditionsNotMet(Collections.singleton(notVectorizedReason.toString())); } return; } @@ -4139,7 +4140,7 @@ private boolean canSpecializeReduceSink(ReduceSinkDesc desc, vectorReduceSinkInfo.setReduceSinkKeyExpressions(reduceSinkKeyExpressions); } - ArrayList valueDescs = desc.getValueCols(); + List valueDescs = desc.getValueCols(); final boolean isEmptyValue = (valueDescs.size() == 0); if (!isEmptyValue) { VectorExpression[] allValueExpressions = vContext.getVectorExpressions(valueDescs); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java index 61ea28a5f5..eb6aa7c41f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hive.ql.plan; -import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.Set; @@ -60,7 +59,7 @@ // bucketed or sorted table/partition they cannot be merged. private boolean canBeMerged; private int totalFiles; - private ArrayList partitionCols; + private List partitionCols; private int numFiles; private DynamicPartitionCtx dpCtx; private String staticSpec; // static partition spec ends with a '/' @@ -124,7 +123,7 @@ public FileSinkDesc() { public FileSinkDesc(final Path dirName, final TableDesc tableInfo, final boolean compressed, final int destTableId, final boolean multiFileSpray, final boolean canBeMerged, final int numFiles, final int totalFiles, - final ArrayList partitionCols, final DynamicPartitionCtx dpCtx, Path destPath, + final List partitionCols, final DynamicPartitionCtx dpCtx, Path destPath, Long mmWriteId, boolean isMmCtas, boolean isInsertOverwrite, boolean isQuery) { this.dirName = dirName; @@ -371,14 +370,14 @@ public void setTotalFiles(int totalFiles) { /** * @return the partitionCols */ - public ArrayList getPartitionCols() { + public List getPartitionCols() { return partitionCols; } /** * @param partitionCols the partitionCols to set */ - public void setPartitionCols(ArrayList partitionCols) { + public void setPartitionCols(List partitionCols) { this.partitionCols = partitionCols; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java index bb063c52be..b63c295d61 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java @@ -829,7 +829,7 @@ public void setVectorPartitionDescList(List vectorPartition return vectorPartitionDescList; } - public void setVectorizationEnabledConditionsMet(ArrayList vectorizationEnabledConditionsMet) { + public void setVectorizationEnabledConditionsMet(Collection vectorizationEnabledConditionsMet) { this.vectorizationEnabledConditionsMet = vectorizationEnabledConditionsMet == null ? null : VectorizationCondition.addBooleans( vectorizationEnabledConditionsMet, true); } @@ -838,7 +838,7 @@ public void setVectorizationEnabledConditionsMet(ArrayList vectorization return vectorizationEnabledConditionsMet; } - public void setVectorizationEnabledConditionsNotMet(List vectorizationEnabledConditionsNotMet) { + public void setVectorizationEnabledConditionsNotMet(Collection vectorizationEnabledConditionsNotMet) { this.vectorizationEnabledConditionsNotMet = vectorizationEnabledConditionsNotMet == null ? null : VectorizationCondition.addBooleans( vectorizationEnabledConditionsNotMet, false); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java index b1a04b3a50..217a7633bc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hive.ql.plan; +import static org.apache.hive.common.util.HiveStringUtils.quoteComments; + import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -67,7 +69,6 @@ import org.apache.hadoop.hive.serde2.AbstractSerDe; import org.apache.hadoop.hive.serde2.DelimitedJSONSerDe; import org.apache.hadoop.hive.serde2.Deserializer; -import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe; import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe; import org.apache.hadoop.hive.serde2.lazy.LazySerDeParameters; @@ -84,7 +85,6 @@ import org.apache.hadoop.mapred.TextInputFormat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hive.common.util.HiveStringUtils.quoteComments; /** * PlanUtils. @@ -723,9 +723,9 @@ public int compare(FieldSchema o1, FieldSchema o2) { * @return The reduceSinkDesc object. */ public static ReduceSinkDesc getReduceSinkDesc( - ArrayList keyCols, ArrayList valueCols, + List keyCols, List valueCols, List outputColumnNames, boolean includeKeyCols, int tag, - ArrayList partitionCols, String order, String nullOrder, + List partitionCols, String order, String nullOrder, int numReducers, AcidUtils.Operation writeType) { return getReduceSinkDesc(keyCols, keyCols.size(), valueCols, new ArrayList>(), @@ -764,18 +764,18 @@ public static ReduceSinkDesc getReduceSinkDesc( * @return The reduceSinkDesc object. */ public static ReduceSinkDesc getReduceSinkDesc( - final ArrayList keyCols, int numKeys, - ArrayList valueCols, + final List keyCols, int numKeys, + List valueCols, List> distinctColIndices, List outputKeyColumnNames, List outputValueColumnNames, boolean includeKeyCols, int tag, - ArrayList partitionCols, String order, String nullOrder, + List partitionCols, String order, String nullOrder, int numReducers, AcidUtils.Operation writeType) { TableDesc keyTable = null; TableDesc valueTable = null; - ArrayList outputKeyCols = new ArrayList(); - ArrayList outputValCols = new ArrayList(); + List outputKeyCols = new ArrayList(); + List outputValCols = new ArrayList(); if (includeKeyCols) { List keySchema = getFieldSchemasFromColumnListWithLength( keyCols, distinctColIndices, outputKeyColumnNames, numKeys, ""); @@ -825,7 +825,7 @@ public static ReduceSinkDesc getReduceSinkDesc( * @return The reduceSinkDesc object. */ public static ReduceSinkDesc getReduceSinkDesc( - ArrayList keyCols, ArrayList valueCols, + List keyCols, List valueCols, List outputColumnNames, boolean includeKey, int tag, int numPartitionFields, int numReducers, AcidUtils.Operation writeType) throws SemanticException { @@ -867,8 +867,7 @@ public static ReduceSinkDesc getReduceSinkDesc( * @return The reduceSinkDesc object. */ public static ReduceSinkDesc getReduceSinkDesc( - ArrayList keyCols, int numKeys, - ArrayList valueCols, + List keyCols, int numKeys, List valueCols, List> distinctColIndices, List outputKeyColumnNames, List outputValueColumnNames, boolean includeKey, int tag, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java index b1d8e1feb1..9257375786 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java @@ -48,14 +48,14 @@ /** * Key columns are passed to reducer in the "key". */ - private java.util.ArrayList keyCols; - private java.util.ArrayList outputKeyColumnNames; + private List keyCols; + private List outputKeyColumnNames; private List> distinctColumnIndices; /** * Value columns are passed to reducer in the "value". */ - private java.util.ArrayList valueCols; - private java.util.ArrayList outputValueColumnNames; + private List valueCols; + private List outputValueColumnNames; /** * Describe how to serialize the key. */ @@ -86,7 +86,7 @@ * Partition columns decide the reducer that the current row goes to. * Partition columns are not passed to reducer. */ - private java.util.ArrayList partitionCols; + private List partitionCols; private int numReducers; @@ -134,13 +134,11 @@ private ReducerTraits(int trait) { public ReduceSinkDesc() { } - public ReduceSinkDesc(ArrayList keyCols, + public ReduceSinkDesc(List keyCols, int numDistributionKeys, - ArrayList valueCols, - ArrayList outputKeyColumnNames, + List valueCols, List outputKeyColumnNames, List> distinctColumnIndices, - ArrayList outputValueColumnNames, int tag, - ArrayList partitionCols, int numReducers, + List outputValueColumnNames, int tag, List partitionCols, int numReducers, final TableDesc keySerializeInfo, final TableDesc valueSerializeInfo, AcidUtils.Operation writeType) { this.keyCols = keyCols; @@ -162,9 +160,9 @@ public ReduceSinkDesc(ArrayList keyCols, @Override public Object clone() { ReduceSinkDesc desc = new ReduceSinkDesc(); - desc.setKeyCols((ArrayList) getKeyCols().clone()); - desc.setValueCols((ArrayList) getValueCols().clone()); - desc.setOutputKeyColumnNames((ArrayList) getOutputKeyColumnNames().clone()); + desc.setKeyCols(new ArrayList(getKeyCols())); + desc.setValueCols(new ArrayList(getValueCols())); + desc.setOutputKeyColumnNames(new ArrayList(getOutputKeyColumnNames())); List> distinctColumnIndicesClone = new ArrayList>(); for (List distinctColumnIndex : getDistinctColumnIndices()) { List tmp = new ArrayList(); @@ -172,11 +170,11 @@ public Object clone() { distinctColumnIndicesClone.add(tmp); } desc.setDistinctColumnIndices(distinctColumnIndicesClone); - desc.setOutputValueColumnNames((ArrayList) getOutputValueColumnNames().clone()); + desc.setOutputValueColumnNames(new ArrayList(getOutputValueColumnNames())); desc.setNumDistributionKeys(getNumDistributionKeys()); desc.setTag(getTag()); desc.setNumReducers(getNumReducers()); - desc.setPartitionCols((ArrayList) getPartitionCols().clone()); + desc.setPartitionCols(new ArrayList(getPartitionCols())); desc.setKeySerializeInfo((TableDesc) getKeySerializeInfo().clone()); desc.setValueSerializeInfo((TableDesc) getValueSerializeInfo().clone()); desc.setNumBuckets(numBuckets); @@ -190,7 +188,7 @@ public Object clone() { return desc; } - public java.util.ArrayList getOutputKeyColumnNames() { + public List getOutputKeyColumnNames() { return outputKeyColumnNames; } @@ -210,7 +208,7 @@ public void setOutputKeyColumnNames( this.outputKeyColumnNames = outputKeyColumnNames; } - public java.util.ArrayList getOutputValueColumnNames() { + public List getOutputValueColumnNames() { return outputValueColumnNames; } @@ -235,7 +233,7 @@ public String getKeyColString() { return PlanUtils.getExprListString(keyCols); } - public java.util.ArrayList getKeyCols() { + public List getKeyCols() { return keyCols; } @@ -257,11 +255,11 @@ public String getValueColsString() { return PlanUtils.getExprListString(valueCols); } - public java.util.ArrayList getValueCols() { + public List getValueCols() { return valueCols; } - public void setValueCols(final java.util.ArrayList valueCols) { + public void setValueCols(List valueCols) { this.valueCols = valueCols; } @@ -276,12 +274,12 @@ public String getUserLevelExplainParitionColsString() { return PlanUtils.getExprListString(partitionCols, true); } - public java.util.ArrayList getPartitionCols() { + public List getPartitionCols() { return partitionCols; } public void setPartitionCols( - final java.util.ArrayList partitionCols) { + final List partitionCols) { this.partitionCols = partitionCols; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorizationCondition.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorizationCondition.java index a419195616..c9b574b0f3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorizationCondition.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorizationCondition.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.List; public class VectorizationCondition { @@ -60,7 +61,7 @@ public String getConditionName() { return notMetList; } - public static List addBooleans(List conditions, boolean flag) { + public static List addBooleans(Collection conditions, boolean flag) { ArrayList result = new ArrayList(conditions.size()); for (String condition : conditions) { result.add(condition + " IS " + flag);