diff --git itests/src/test/resources/testconfiguration.properties itests/src/test/resources/testconfiguration.properties index c910712..ddabd72 100644 --- itests/src/test/resources/testconfiguration.properties +++ itests/src/test/resources/testconfiguration.properties @@ -381,6 +381,7 @@ minillaplocal.shared.query.files=alter_merge_2_orc.q,\ vector_reduce2.q,\ vector_reduce3.q,\ vector_reduce_groupby_decimal.q,\ + vector_reduce_grpupby_duplicate_cols.q,\ vector_row__id.q,\ vector_string_concat.q,\ vector_struct_in.q,\ diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java index 13a929b..d0acbba 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java @@ -32,7 +32,7 @@ */ public class VectorGroupKeyHelper extends VectorColumnSetInfo { - private int[] outputColumnNums; + private int[] inputColumnNums; public VectorGroupKeyHelper(int keyCount) { super(keyCount); @@ -44,13 +44,15 @@ void init(VectorExpression[] keyExpressions) throws HiveException { // case, we use the keyCount passed to the constructor and not keyExpressions.length. // Inspect the output type of each key expression. And, remember the output columns. - outputColumnNums = new int[keyCount]; + inputColumnNums = new int[keyCount]; for(int i = 0; i < keyCount; ++i) { VectorExpression keyExpression = keyExpressions[i]; TypeInfo typeInfo = keyExpression.getOutputTypeInfo(); Type columnVectorType = VectorizationContext.getColumnVectorTypeFromTypeInfo(typeInfo); addKey(columnVectorType); - outputColumnNums[i] = keyExpression.getOutputColumnNum(); + + // The input column is the output of the key expression. + inputColumnNums[i] = keyExpression.getOutputColumnNum(); } finishAdding(); } @@ -64,10 +66,13 @@ void init(VectorExpression[] keyExpressions) throws HiveException { */ public void copyGroupKey(VectorizedRowBatch inputBatch, VectorizedRowBatch outputBatch, DataOutputBuffer buffer) throws HiveException { + + int outputColumnNum = 0; + for(int i = 0; i< longIndices.length; ++i) { - final int columnIndex = outputColumnNums[longIndices[i]]; - LongColumnVector inputColumnVector = (LongColumnVector) inputBatch.cols[columnIndex]; - LongColumnVector outputColumnVector = (LongColumnVector) outputBatch.cols[columnIndex]; + final int inputColumnNum = inputColumnNums[longIndices[i]]; + LongColumnVector inputColumnVector = (LongColumnVector) inputBatch.cols[inputColumnNum]; + LongColumnVector outputColumnVector = (LongColumnVector) outputBatch.cols[outputColumnNum++]; // This vectorized code pattern says: // If the input batch has no nulls at all (noNulls is true) OR @@ -91,9 +96,9 @@ public void copyGroupKey(VectorizedRowBatch inputBatch, VectorizedRowBatch outpu } } for(int i=0;i] + Filter Operator + Filter Vectorization: + className: VectorFilterOperator + native: true + predicateExpression: FilterExprAndExpr(children: SelectColumnIsNotNull(col 0:int), SelectColumnIsNotNull(col 1:int)) + predicate: (one is not null and two is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 one (type: int), two (type: int) + 1 1 (type: int), 2 (type: int) + Map Join Vectorization: + bigTableKeyColumnNums: [0, 1] + bigTableRetainedColumnNums: [0, 1] + bigTableValueColumnNums: [0, 1] + className: VectorMapJoinInnerBigOnlyMultiKeyOperator + native: true + nativeConditionsMet: hive.mapjoin.optimized.hashtable IS true, hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Small table vectorizes IS true, Optimized Table and Supports Key Types IS true + projectedOutputColumnNums: [0, 1] + outputColumnNames: _col0, _col1 + input vertices: + 1 Map 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Group By Operator + Group By Vectorization: + className: VectorGroupByOperator + groupByMode: HASH + keyExpressions: col 0:int, col 1:int + native: false + vectorProcessingMode: HASH + projectedOutputColumnNums: [] + keys: _col0 (type: int), _col1 (type: int) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int) + sort order: ++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: int) + Reduce Sink Vectorization: + className: VectorReduceSinkOperator + native: false + nativeConditionsMet: hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true + nativeConditionsNotMet: hive.vectorized.execution.reducesink.new.enabled IS false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized, llap + LLAP IO: no inputs + Map Vectorization: + enabled: true + enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true + inputFormatFeatureSupport: [DECIMAL_64] + vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled] + featureSupportInUse: [] + inputFileFormats: org.apache.hadoop.mapred.TextInputFormat + allNative: false + usesVectorUDFAdaptor: false + vectorized: true + rowBatchContext: + dataColumnCount: 2 + includeColumns: [0, 1] + dataColumns: one:int, two:int + partitionColumnCount: 0 + scratchColumnTypeNames: [] + Reducer 3 + Execution mode: vectorized, llap + Reduce Vectorization: + enabled: true + enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true + reduceColumnNullOrder: aa + reduceColumnSortOrder: ++ + allNative: false + usesVectorUDFAdaptor: false + vectorized: true + rowBatchContext: + dataColumnCount: 2 + dataColumns: KEY._col0:int, KEY._col1:int + partitionColumnCount: 0 + scratchColumnTypeNames: [] + Reduce Operator Tree: + Group By Operator + Group By Vectorization: + className: VectorGroupByOperator + groupByMode: MERGEPARTIAL + keyExpressions: col 0:int, col 1:int, col 0:int + native: false + vectorProcessingMode: MERGE_PARTIAL + projectedOutputColumnNums: [] + keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col2 (type: int), _col1 (type: int), _col2 (type: int) + outputColumnNames: _col0, _col1, _col2 + Select Vectorization: + className: VectorSelectOperator + native: true + projectedOutputColumnNums: [2, 1, 2] + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + File Sink Vectorization: + className: VectorFileSinkOperator + native: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select one as one_0, two, one as one_1 +from demo a +join (select 1 as one, 2 as two) b +on a.one = b.one and a.two = b.two +group by a.one, a.two, a.one +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Input: default@demo +#### A masked pattern was here #### +POSTHOOK: query: select one as one_0, two, one as one_1 +from demo a +join (select 1 as one, 2 as two) b +on a.one = b.one and a.two = b.two +group by a.one, a.two, a.one +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Input: default@demo +#### A masked pattern was here #### +one_0 two one_1 +1 2 1