diff --git itests/hive-blobstore/src/test/results/clientpositive/insert_into_dynamic_partitions.q.out itests/hive-blobstore/src/test/results/clientpositive/insert_into_dynamic_partitions.q.out index 82cf417..3783c15 100644 --- itests/hive-blobstore/src/test/results/clientpositive/insert_into_dynamic_partitions.q.out +++ itests/hive-blobstore/src/test/results/clientpositive/insert_into_dynamic_partitions.q.out @@ -107,8 +107,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string), _bucket_number (type: string), _col0 (type: int) null sort order: aaa + numBuckets: 2 sort order: +++ Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -161,6 +163,7 @@ expressions: KEY._col0 (type: int), KEY._col1 (type: string), KEY._bucket_number (type: string) outputColumnNames: _col0, _col1, _bucket_number File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 directory: ### BLOBSTORE_STAGING_PATH ### diff --git itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out index bab8942..92c785c 100644 --- itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out +++ itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out @@ -79,6 +79,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 directory: ### BLOBSTORE_STAGING_PATH ### @@ -123,7 +124,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -178,6 +181,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -188,6 +192,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types struct escape.delim \ @@ -249,6 +254,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: ### BLOBSTORE_STAGING_PATH ### @@ -372,6 +378,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: ### BLOBSTORE_STAGING_PATH ### diff --git itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions.q.out itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions.q.out index fc8f3d0..91e95c4 100644 --- itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions.q.out +++ itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions.q.out @@ -125,8 +125,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string), _bucket_number (type: string), _col0 (type: int) null sort order: aaa + numBuckets: 2 sort order: +++ Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -179,6 +181,7 @@ expressions: KEY._col0 (type: int), KEY._col1 (type: string), KEY._bucket_number (type: string) outputColumnNames: _col0, _col1, _bucket_number File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 directory: ### BLOBSTORE_STAGING_PATH ### diff --git itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_table.q.out itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_table.q.out index 9903d69..96e77ed 100644 --- itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_table.q.out +++ itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_table.q.out @@ -87,6 +87,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 directory: ### BLOBSTORE_STAGING_PATH ### @@ -131,7 +132,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -186,6 +189,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -196,6 +200,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types struct escape.delim \ @@ -257,6 +262,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: ### BLOBSTORE_STAGING_PATH ### @@ -380,6 +386,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: ### BLOBSTORE_STAGING_PATH ### diff --git itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out index 2addf92..2dd98ef 100644 --- itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out +++ itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out @@ -61,8 +61,10 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE @@ -130,6 +132,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -154,8 +157,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -195,6 +200,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 directory: ### BLOBSTORE_STAGING_PATH ### @@ -242,6 +248,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 428 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -252,6 +259,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types struct escape.delim \ @@ -341,8 +349,10 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE @@ -410,6 +420,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -434,8 +445,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -475,6 +488,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 directory: ### BLOBSTORE_STAGING_PATH ### @@ -522,6 +536,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 428 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -532,6 +547,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types struct escape.delim \ diff --git itests/src/test/resources/testconfiguration.properties itests/src/test/resources/testconfiguration.properties index 3e89071..d4025ab 100644 --- itests/src/test/resources/testconfiguration.properties +++ itests/src/test/resources/testconfiguration.properties @@ -683,6 +683,7 @@ multiMapJoin2.q,\ multi_in_clause.q,\ murmur_hash_migration.q,\ + murmur_hash_migration2.q,\ non_native_window_udf.q,\ optimize_join_ptp.q,\ orc_analyze.q,\ diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java index 3e1100c..4dcd59b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java @@ -23,6 +23,7 @@ import java.util.Map.Entry; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.exec.persistence.RowContainer; import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -215,7 +216,7 @@ // Compute the values int reserve = hasFilter ? valueFields.size() + 1 : valueFields.size(); - List nr = new ArrayList(reserve); + List nr = new ArrayList(reserve); for (int i = 0; i < valueFields.size(); i++) { nr.add(ObjectInspectorUtils.copyToStandardObject(valueFields.get(i) .evaluate(row), valueFieldsOI.get(i), @@ -350,6 +351,7 @@ + Utilities.ctrlaCode, org.apache.hadoop.hive.serde.serdeConstants.LIST_COLUMNS, colNames .toString(), + hive_metastoreConstants.TABLE_BUCKETING_VERSION, "-1", org.apache.hadoop.hive.serde.serdeConstants.LIST_COLUMN_TYPES, colTypes.toString(), serdeConstants.SERIALIZATION_LIB,LazyBinarySerDe.class.getName())); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java index 753f25b..a11cabf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java @@ -92,7 +92,6 @@ protected final transient Collection> asyncInitOperations = new HashSet<>(); private String marker; - protected int bucketingVersion = -1; // It can be optimized later so that an operator operator (init/close) is performed // only after that operation has been performed on all the parents. This will require // initializing the whole tree in all the mappers (which might be required for mappers @@ -1544,12 +1543,4 @@ } return true; } - - public void setBucketingVersion(int bucketingVersion) { - this.bucketingVersion = bucketingVersion; - } - - public int getBucketingVersion() { - return bucketingVersion; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java index e97fcef..da26e4f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java @@ -40,13 +40,10 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorSparkPartitionPruningSinkOperator; import org.apache.hadoop.hive.ql.exec.vector.VectorTopNKeyOperator; import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; -import org.apache.hadoop.hive.ql.exec.vector.reducesink.VectorReduceSinkCommonOperator; import org.apache.hadoop.hive.ql.exec.vector.ptf.VectorPTFOperator; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.optimizer.spark.SparkPartitionPruningSinkDesc; import org.apache.hadoop.hive.ql.parse.spark.SparkPartitionPruningSinkOperator; -import org.apache.hadoop.hive.ql.plan.AbstractOperatorDesc; -import org.apache.hadoop.hive.ql.plan.AbstractVectorDesc; import org.apache.hadoop.hive.ql.plan.AppMasterEventDesc; import org.apache.hadoop.hive.ql.plan.CollectDesc; import org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc; @@ -266,9 +263,6 @@ Operator ret = get(oplist0.getCompilationOpContext(), (Class) conf.getClass()); ret.setConf(conf); - // Set the bucketing Version - ret.setBucketingVersion(oplist0.getBucketingVersion()); - // Add the new operator as child of each of the passed in operators List children = oplist0.getChildOperators(); children.add(ret); @@ -340,7 +334,9 @@ Operator ret = get(ctx, (Class) conf.getClass()); ret.setConf(conf); ret.setSchema(rwsch); - if (oplist.length == 0) return ret; + if (oplist.length == 0) { + return ret; + } // Add the new operator as child of each of the passed in operators for (Operator op : oplist) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java index ce0f08d..964c98d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java @@ -233,7 +233,7 @@ // incase of ACID updates/deletes. boolean acidOp = conf.getWriteType() == AcidUtils.Operation.UPDATE || conf.getWriteType() == AcidUtils.Operation.DELETE; - hashFunc = bucketingVersion == 2 && !acidOp ? + hashFunc = getConf().getBucketingVersion() == 2 && !acidOp ? ObjectInspectorUtils::getBucketHashCode : ObjectInspectorUtils::getBucketHashCodeOld; } catch (Exception e) { @@ -430,7 +430,7 @@ * For Acid Update/Delete case, we expect a single partitionEval of the form * UDFToInteger(ROW__ID) and buckNum == -1 so that the result of this method * is to return the bucketId extracted from ROW__ID unless it optimized by - * {@link org.apache.hadoop.hive.ql.optimizer.SortedDynPartitionOptimizer} + * {@link org.apache.hadoop.hive.ql.optimizer.SortedDynPartitionOptimizer} */ private int computeHashCode(Object row, int buckNum) throws HiveException { // Evaluate the HashCode diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 900642e..d8c46c2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -800,6 +800,7 @@ serdeConstants.SERIALIZATION_FORMAT, "" + Utilities.ctrlaCode, serdeConstants.LIST_COLUMNS, cols, serdeConstants.LIST_COLUMN_TYPES, colTypes, + hive_metastoreConstants.TABLE_BUCKETING_VERSION, "-1", serdeConstants.SERIALIZATION_LIB,LazySimpleSerDe.class.getName()))); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java index ca5f585..156f84d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -237,6 +238,7 @@ serdeConstants.SERIALIZATION_FORMAT, ""+ Utilities.ctrlaCode, serdeConstants.LIST_COLUMNS, colNames.toString(), serdeConstants.LIST_COLUMN_TYPES,colTypes.toString(), + hive_metastoreConstants.TABLE_BUCKETING_VERSION, "-1", serdeConstants.SERIALIZATION_LIB,LazyBinarySerDe.class.getName())); return tblDesc; } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkObjectHashOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkObjectHashOperator.java index 2192274..bf86b48 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkObjectHashOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkObjectHashOperator.java @@ -184,7 +184,7 @@ } // Set hashFunc - hashFunc = bucketingVersion == 2 && !vectorDesc.getIsAcidChange() ? + hashFunc = getConf().getBucketingVersion() == 2 && !vectorDesc.getIsAcidChange() ? ObjectInspectorUtils::getBucketHashCode : ObjectInspectorUtils::getBucketHashCodeOld; @@ -232,21 +232,21 @@ ve.evaluate(batch); } } - + // Perform any value expressions. Results will go into scratch columns. if (reduceSinkValueExpressions != null) { for (VectorExpression ve : reduceSinkValueExpressions) { ve.evaluate(batch); } } - + // Perform any bucket expressions. Results will go into scratch columns. if (reduceSinkBucketExpressions != null) { for (VectorExpression ve : reduceSinkBucketExpressions) { ve.evaluate(batch); } } - + // Perform any partition expressions. Results will go into scratch columns. if (reduceSinkPartitionExpressions != null) { for (VectorExpression ve : reduceSinkPartitionExpressions) { @@ -296,7 +296,9 @@ private void processKey(VectorizedRowBatch batch, int batchIndex, int tag) throws HiveException{ - if (isEmptyKey) return; + if (isEmptyKey) { + return; + } try { keyBinarySortableSerializeWrite.reset(); @@ -318,7 +320,9 @@ } private void processValue(VectorizedRowBatch batch, int batchIndex) throws HiveException { - if (isEmptyValue) return; + if (isEmptyValue) { + return; + } try { valueLazyBinarySerializeWrite.reset(); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketVersionPopulator.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketVersionPopulator.java new file mode 100644 index 0000000..3c6e681 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketVersionPopulator.java @@ -0,0 +1,213 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.optimizer; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Set; +import java.util.Stack; + +import org.apache.hadoop.hive.ql.exec.FileSinkOperator; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; +import org.apache.hadoop.hive.ql.exec.TableScanOperator; +import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; +import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.lib.SemanticDispatcher; +import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker; +import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor; +import org.apache.hadoop.hive.ql.lib.SemanticRule; +import org.apache.hadoop.hive.ql.parse.ParseContext; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Sets; + +public class BucketVersionPopulator extends Transform { + + protected static final Logger LOG = LoggerFactory.getLogger(BucketVersionPopulator.class); + + @Deprecated + + protected ParseContext pGraphContext; + + static class BucketingVersionResult { + Integer bucketingVersion; + + public BucketingVersionResult(Integer version) { + bucketingVersion = version; + } + + public BucketingVersionResult merge(BucketingVersionResult r) throws SemanticException { + if (bucketingVersion == r.bucketingVersion || r.bucketingVersion == -1) { + return new BucketingVersionResult(bucketingVersion); + } + if (bucketingVersion == -1) { + return new BucketingVersionResult(r.bucketingVersion); + } + throw new SemanticException("invalid state; can't set bucketingVersion correctly"); + } + + public BucketingVersionResult merge2(BucketingVersionResult r) { + if (bucketingVersion == r.bucketingVersion || r.bucketingVersion == -1) { + return new BucketingVersionResult(bucketingVersion); + } + return new BucketingVersionResult(2); + } + } + + @Deprecated + Set groups = new HashSet(); + + Map, OpGroup> b = new IdentityHashMap<>(); + + @Override + public ParseContext transform(ParseContext pctx) throws SemanticException { + pGraphContext = pctx; + findOpGroups(); + assignGroupVersions(); + return pctx; + } + + private void assignGroupVersions() { + Set g = groups; + for (OpGroup opGroup : g) { + opGroup.analyzeBucketVersion(); + opGroup.setBucketVersion(); + } + + } + + private ParseContext findOpGroups() throws SemanticException { + + NodeProcessorCtx ctx = new NodeProcessorCtx() { + }; + + Map opRules = new LinkedHashMap(); + + SemanticDispatcher disp = new DefaultRuleDispatcher(new SetPreferredBucketingVersionRule(), opRules, ctx); + // SemanticGraphWalker ogw = new PreOrderWalker(disp); + SemanticGraphWalker ogw = new DefaultGraphWalker(disp); + + ArrayList topNodes = new ArrayList(); + topNodes.addAll(pGraphContext.getTopOps().values()); + ogw.startWalking(topNodes, null); + return pGraphContext; + } + + class OpGroup { + Set> members = Sets.newIdentityHashSet(); + int version = -1; + + public OpGroup() { + groups.add(this); + } + + public void add(Operator o) { + members.add(o); + b.put(o, this); + } + + public void setBucketVersion() { + for (Operator operator : members) { + operator.getConf().setBucketingVersion(version); + } + } + + public void analyzeBucketVersion() { + for (Operator operator : members) { + if (operator instanceof TableScanOperator) { + TableScanOperator tso = (TableScanOperator) operator; + setVersion(tso.getConf().getTableMetadata().getBucketingVersion()); + } + if (operator instanceof FileSinkOperator) { + FileSinkOperator fso = (FileSinkOperator) operator; + int bucketingVersion = fso.getConf().getTableInfo().getBucketingVersion(); + setVersion(bucketingVersion); + } + } + if (version == -1) { + // use version 2 if possible + version = 2; + } + + } + + private void setVersion(int newVersion) { + if (version == newVersion || newVersion == -1) { + return; + } + if (version == -1) { + version = newVersion; + return; + } + throw new RuntimeException("Unable to set version"); + } + + public void merge(OpGroup opGroup) { + for (Operator operator : opGroup.members) { + add(operator); + } + opGroup.members.clear(); + } + + } + + + class SetPreferredBucketingVersionRule implements SemanticNodeProcessor { + + + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) + throws SemanticException { + Operator o = (Operator) nd; + OpGroup g; + if (nodeOutputs.length == 0) { + g = new OpGroup(); + } else { + g = (OpGroup) nodeOutputs[0]; + } + for (int i = 1; i < nodeOutputs.length; i++) { + g.merge((OpGroup) nodeOutputs[i]); + } + g.add(o); + if (o instanceof ReduceSinkOperator) { + // start a new group before the reduceSinkOperator + return new OpGroup(); + } else { + return g; + } + } + + private OpGroup getGroupFor(Operator o) { + OpGroup g = b.get(o.getParentOperators().get(0)); + for (int i = 1; i < o.getNumParent(); i++) { + g.merge(b.get(o.getParentOperators().get(i))); + } + return g; + } + + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java index d8d8cae..2bcc7bd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java @@ -78,7 +78,6 @@ import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import com.google.common.math.DoubleMath; /** @@ -207,7 +206,7 @@ // map join operator by default has no bucket cols and num of reduce sinks // reduced by 1 mapJoinOp.setOpTraits(new OpTraits(null, -1, null, - joinOp.getOpTraits().getNumReduceSinks(), joinOp.getOpTraits().getBucketingVersion())); + joinOp.getOpTraits().getNumReduceSinks())); preserveOperatorInfos(mapJoinOp, joinOp, context); // propagate this change till the next RS for (Operator childOp : mapJoinOp.getChildOperators()) { @@ -543,9 +542,9 @@ context.parseContext.getContext().getPlanMapper().link(joinOp, mergeJoinOp); int numReduceSinks = joinOp.getOpTraits().getNumReduceSinks(); OpTraits opTraits = new OpTraits(joinOp.getOpTraits().getBucketColNames(), numBuckets, - joinOp.getOpTraits().getSortCols(), numReduceSinks, - joinOp.getOpTraits().getBucketingVersion()); + joinOp.getOpTraits().getSortCols(), numReduceSinks); mergeJoinOp.setOpTraits(opTraits); + mergeJoinOp.getConf().setBucketingVersion(joinOp.getConf().getBucketingVersion()); preserveOperatorInfos(mergeJoinOp, joinOp, context); for (Operator parentOp : joinOp.getParentOperators()) { @@ -611,8 +610,7 @@ return; } currentOp.setOpTraits(new OpTraits(opTraits.getBucketColNames(), - opTraits.getNumBuckets(), opTraits.getSortCols(), opTraits.getNumReduceSinks(), - opTraits.getBucketingVersion())); + opTraits.getNumBuckets(), opTraits.getSortCols(), opTraits.getNumReduceSinks())); for (Operator childOp : currentOp.getChildOperators()) { if ((childOp instanceof ReduceSinkOperator) || (childOp instanceof GroupByOperator)) { break; @@ -670,8 +668,7 @@ // we can set the traits for this join operator opTraits = new OpTraits(joinOp.getOpTraits().getBucketColNames(), - tezBucketJoinProcCtx.getNumBuckets(), null, joinOp.getOpTraits().getNumReduceSinks(), - joinOp.getOpTraits().getBucketingVersion()); + tezBucketJoinProcCtx.getNumBuckets(), null, joinOp.getOpTraits().getNumReduceSinks()); mapJoinOp.setOpTraits(opTraits); preserveOperatorInfos(mapJoinOp, joinOp, context); setNumberOfBucketsOnChildren(mapJoinOp); @@ -811,32 +808,32 @@ // tables and version 2 for new tables. All the inputs to the SMB must be // from same version. This only applies to tables read directly and not // intermediate outputs of joins/groupbys - int bucketingVersion = -1; - for (Operator parentOp : joinOp.getParentOperators()) { - // Check if the parent is coming from a table scan, if so, what is the version of it. - assert parentOp.getParentOperators() != null && parentOp.getParentOperators().size() == 1; - Operator op = parentOp.getParentOperators().get(0); - while(op != null && !(op instanceof TableScanOperator - || op instanceof ReduceSinkOperator - || op instanceof CommonJoinOperator)) { - // If op has parents it is guaranteed to be 1. - List> parents = op.getParentOperators(); - Preconditions.checkState(parents.size() == 0 || parents.size() == 1); - op = parents.size() == 1 ? parents.get(0) : null; - } - - if (op instanceof TableScanOperator) { - int localVersion = ((TableScanOperator)op).getConf(). - getTableMetadata().getBucketingVersion(); - if (bucketingVersion == -1) { - bucketingVersion = localVersion; - } else if (bucketingVersion != localVersion) { - // versions dont match, return false. - LOG.debug("SMB Join can't be performed due to bucketing version mismatch"); - return false; - } - } - } + // int bucketingVersion = -1; + // for (Operator parentOp : joinOp.getParentOperators()) { + // // Check if the parent is coming from a table scan, if so, what is the version of it. + // assert parentOp.getParentOperators() != null && parentOp.getParentOperators().size() == 1; + // Operator op = parentOp; + // while(op != null && !(op instanceof TableScanOperator + // || op instanceof ReduceSinkOperator + // || op instanceof CommonJoinOperator)) { + // // If op has parents it is guaranteed to be 1. + // List> parents = op.getParentOperators(); + // Preconditions.checkState(parents.size() == 0 || parents.size() == 1); + // op = parents.size() == 1 ? parents.get(0) : null; + // } + // + // if (op instanceof TableScanOperator) { + // int localVersion = ((TableScanOperator)op).getConf(). + // getTableMetadata().getBucketingVersion(); + // if (bucketingVersion == -1) { + // bucketingVersion = localVersion; + // } else if (bucketingVersion != localVersion) { + // // versions dont match, return false. + // LOG.debug("SMB Join can't be performed due to bucketing version mismatch"); + // return false; + // } + // } + // } LOG.info("We can convert the join to an SMB join."); return true; @@ -1537,8 +1534,7 @@ joinOp.getOpTraits().getBucketColNames(), numReducers, null, - joinOp.getOpTraits().getNumReduceSinks(), - joinOp.getOpTraits().getBucketingVersion()); + joinOp.getOpTraits().getNumReduceSinks()); mapJoinOp.setOpTraits(opTraits); preserveOperatorInfos(mapJoinOp, joinOp, context); // propagate this change till the next RS diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java index da277d0..cb2680d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java @@ -191,7 +191,9 @@ transformations.add(new FixedBucketPruningOptimizer(compatMode)); } - if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTREDUCEDEDUPLICATION)) { + transformations.add(new BucketVersionPopulator()); + + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTREDUCEDEDUPLICATION)) { transformations.add(new ReduceSinkDeDuplication()); } transformations.add(new NonBlockingOpDeDupProc()); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java index c98417a..21f6e21 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.Utilities.ReduceField; import org.apache.hadoop.hive.ql.io.AcidUtils; -import org.apache.hadoop.hive.ql.io.RecordIdentifier; import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher; import org.apache.hadoop.hive.ql.lib.SemanticDispatcher; @@ -222,7 +221,7 @@ /** * ROW__ID is always the 1st column of Insert representing Update/Delete operation * (set up in {@link org.apache.hadoop.hive.ql.parse.UpdateDeleteSemanticAnalyzer}) - * and we wrap it in UDFToInteger + * and we wrap it in UDFToInteger * (in {@link org.apache.hadoop.hive.ql.parse.SemanticAnalyzer#getPartitionColsFromBucketColsForUpdateDelete(Operator, boolean)}) * which extracts bucketId from it * see {@link org.apache.hadoop.hive.ql.udf.UDFToInteger#evaluate(RecordIdentifier)}*/ @@ -285,6 +284,7 @@ // Create ReduceSink operator ReduceSinkOperator rsOp = getReduceSinkOp(partitionPositions, sortPositions, sortOrder, sortNullOrder, allRSCols, bucketColumns, numBuckets, fsParent, fsOp.getConf().getWriteType()); + rsOp.getConf().setBucketingVersion(fsOp.getConf().getBucketingVersion()); List descs = new ArrayList(allRSCols.size()); List colNames = new ArrayList(); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionTimeGranularityOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionTimeGranularityOptimizer.java index d458ebb..9d80f08 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionTimeGranularityOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionTimeGranularityOptimizer.java @@ -211,6 +211,7 @@ } ReduceSinkOperator rsOp = getReduceSinkOp(keyPositions, sortOrder, sortNullOrder, allRSCols, granularitySelOp, fsOp.getConf().getWriteType()); + rsOp.getConf().setBucketingVersion(fsOp.getConf().getBucketingVersion()); // Create backtrack SelectOp final List descs = new ArrayList<>(allRSCols.size()); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveTableScanVisitor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveTableScanVisitor.java index 72411ec..14958aa 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveTableScanVisitor.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveTableScanVisitor.java @@ -108,7 +108,6 @@ // 2. Setup TableScan TableScanOperator ts = (TableScanOperator) OperatorFactory.get( hiveOpConverter.getSemanticAnalyzer().getOpContext(), tsd, new RowSchema(colInfos)); - ts.setBucketingVersion(tsd.getTableMetadata().getBucketingVersion()); //now that we let Calcite process subqueries we might have more than one // tablescan with same alias. diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplicationUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplicationUtils.java index 28ddecc..4681ba7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplicationUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplicationUtils.java @@ -208,7 +208,7 @@ throws SemanticException { return strictMerge(cRS, ImmutableList.of(pRS)); } - + public static boolean strictMerge(ReduceSinkOperator cRS, List pRSs) throws SemanticException { ReduceSinkDesc cRSc = cRS.getConf(); @@ -226,7 +226,7 @@ if (moveRSOrderTo == null) { return false; } - + int cKeySize = cRSc.getKeyCols().size(); for (int i = 0; i < cKeySize; i++) { ExprNodeDesc cExpr = cRSc.getKeyCols().get(i); @@ -240,7 +240,7 @@ return false; } } - + int cPartSize = cRSc.getPartitionCols().size(); for (int i = 0; i < cPartSize; i++) { ExprNodeDesc cExpr = cRSc.getPartitionCols().get(i); @@ -309,6 +309,9 @@ if (cConf.getDistinctColumnIndices().size() >= 2) { return null; } + if (cConf.getBucketingVersion() != pConf.getBucketingVersion()) { + return null; + } Integer moveReducerNumTo = checkNumReducer(cConf.getNumReducers(), pConf.getNumReducers()); if (moveReducerNumTo == null || moveReducerNumTo > 0 && cConf.getNumReducers() < minReducer) { @@ -480,6 +483,9 @@ // ensure SEL does not branch protected static boolean checkSelectSingleBranchOnly(ReduceSinkOperator cRS, ReduceSinkOperator pRS) { Operator parent = cRS.getParentOperators().get(0); + if (cRS.getConf().getBucketingVersion() != pRS.getConf().getBucketingVersion()) { + return false; + } while (parent != pRS) { assert parent.getNumParent() == 1; if (!(parent instanceof SelectOperator)) { diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java index c935b74..ffaccaf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java @@ -97,12 +97,10 @@ List> listBucketCols = new ArrayList>(); int numBuckets = -1; int numReduceSinks = 1; - int bucketingVersion = -1; OpTraits parentOpTraits = rs.getParentOperators().get(0).getOpTraits(); if (parentOpTraits != null) { numBuckets = parentOpTraits.getNumBuckets(); numReduceSinks += parentOpTraits.getNumReduceSinks(); - bucketingVersion = parentOpTraits.getBucketingVersion(); } List bucketCols = new ArrayList<>(); @@ -162,9 +160,9 @@ listBucketCols.add(bucketCols); OpTraits opTraits = new OpTraits(listBucketCols, numBuckets, - listBucketCols, numReduceSinks, bucketingVersion); + listBucketCols, numReduceSinks); rs.setOpTraits(opTraits); - rs.setBucketingVersion(bucketingVersion); +// rs.getConf().setBucketingVersion(bucketingVersion); return null; } } @@ -243,7 +241,7 @@ } // num reduce sinks hardcoded to 0 because TS has no parents OpTraits opTraits = new OpTraits(bucketColsList, numBuckets, - sortedColsList, 0, table.getBucketingVersion()); + sortedColsList, 0); ts.setOpTraits(opTraits); return null; } @@ -269,15 +267,13 @@ List> listBucketCols = new ArrayList<>(); int numReduceSinks = 0; - int bucketingVersion = -1; OpTraits parentOpTraits = gbyOp.getParentOperators().get(0).getOpTraits(); if (parentOpTraits != null) { numReduceSinks = parentOpTraits.getNumReduceSinks(); - bucketingVersion = parentOpTraits.getBucketingVersion(); } listBucketCols.add(gbyKeys); OpTraits opTraits = new OpTraits(listBucketCols, -1, listBucketCols, - numReduceSinks, bucketingVersion); + numReduceSinks); gbyOp.setOpTraits(opTraits); return null; } @@ -313,16 +309,14 @@ List> listBucketCols = new ArrayList<>(); int numReduceSinks = 0; - int bucketingVersion = -1; OpTraits parentOptraits = ptfOp.getParentOperators().get(0).getOpTraits(); if (parentOptraits != null) { numReduceSinks = parentOptraits.getNumReduceSinks(); - bucketingVersion = parentOptraits.getBucketingVersion(); } listBucketCols.add(partitionKeys); OpTraits opTraits = new OpTraits(listBucketCols, -1, listBucketCols, - numReduceSinks, bucketingVersion); + numReduceSinks); ptfOp.setOpTraits(opTraits); return null; } @@ -392,7 +386,6 @@ int numBuckets = -1; int numReduceSinks = 0; - int bucketingVersion = -1; OpTraits parentOpTraits = selOp.getParentOperators().get(0).getOpTraits(); if (parentOpTraits != null) { // if bucket columns are empty, then numbuckets must be set to -1. @@ -401,10 +394,9 @@ numBuckets = parentOpTraits.getNumBuckets(); } numReduceSinks = parentOpTraits.getNumReduceSinks(); - bucketingVersion = parentOpTraits.getBucketingVersion(); } OpTraits opTraits = new OpTraits(listBucketCols, numBuckets, listSortCols, - numReduceSinks, bucketingVersion); + numReduceSinks); selOp.setOpTraits(opTraits); return null; } @@ -442,7 +434,7 @@ // The bucketingVersion is not relevant here as it is never used. // For SMB, we look at the parent tables' bucketing versions and for // bucket map join the big table's bucketing version is considered. - joinOp.setOpTraits(new OpTraits(bucketColsList, -1, bucketColsList, numReduceSinks, 2)); + joinOp.setOpTraits(new OpTraits(bucketColsList, -1, bucketColsList, numReduceSinks)); return null; } @@ -496,8 +488,6 @@ Operator operator = (Operator) nd; int numReduceSinks = 0; - int bucketingVersion = -1; - boolean bucketingVersionSeen = false; for (Operator parentOp : operator.getParentOperators()) { if (parentOp.getOpTraits() == null) { continue; @@ -505,17 +495,9 @@ if (parentOp.getOpTraits().getNumReduceSinks() > numReduceSinks) { numReduceSinks = parentOp.getOpTraits().getNumReduceSinks(); } - // If there is mismatch in bucketingVersion, then it should be set to - // -1, that way SMB will be disabled. - if (bucketingVersion == -1 && !bucketingVersionSeen) { - bucketingVersion = parentOp.getOpTraits().getBucketingVersion(); - bucketingVersionSeen = true; - } else if (bucketingVersion != parentOp.getOpTraits().getBucketingVersion()) { - bucketingVersion = -1; - } } OpTraits opTraits = new OpTraits(null, -1, - null, numReduceSinks, bucketingVersion); + null, numReduceSinks); operator.setOpTraits(opTraits); return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java index 9882916..990cd36 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java @@ -4010,9 +4010,6 @@ LOG.info("Vectorizer vectorizeOperator reduce sink class " + opClass.getSimpleName()); - // Get the bucketing version - int bucketingVersion = ((ReduceSinkOperator)op).getBucketingVersion(); - Operator vectorOp = null; try { vectorOp = OperatorFactory.getVectorOperator( @@ -4024,9 +4021,7 @@ throw new HiveException(e); } - // Set the bucketing version Preconditions.checkArgument(vectorOp instanceof VectorReduceSinkCommonOperator); - vectorOp.setBucketingVersion(bucketingVersion); return vectorOp; } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java index 0638caf..bbfb853 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java @@ -121,7 +121,7 @@ // we can set the traits for this join operator OpTraits opTraits = new OpTraits(bucketColNames, numBuckets, null, - joinOp.getOpTraits().getNumReduceSinks(), joinOp.getOpTraits().getBucketingVersion()); + joinOp.getOpTraits().getNumReduceSinks()); mapJoinOp.setOpTraits(opTraits); mapJoinOp.setStatistics(joinOp.getStatistics()); setNumberOfBucketsOnChildren(mapJoinOp); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 4f1e23d..19eb1df 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.Context; @@ -1758,6 +1759,7 @@ prop.setProperty("columns", colTypes[0]); prop.setProperty("columns.types", colTypes[1]); prop.setProperty(serdeConstants.SERIALIZATION_LIB, LazySimpleSerDe.class.getName()); + prop.setProperty(hive_metastoreConstants.TABLE_BUCKETING_VERSION, "-1"); FetchWork fetch = new FetchWork(ctx.getResFile(), new TableDesc(TextInputFormat.class, IgnoreKeyTextOutputFormat.class, prop), -1); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index c87f2d2..f48cc71 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -11462,9 +11462,6 @@ if (properties != null) { tsDesc.setOpProps(properties); } - - // Set the bucketing Version - top.setBucketingVersion(tsDesc.getTableMetadata().getBucketingVersion()); } else { rwsch = opParseCtx.get(top).getRowResolver(); top.setChildOperators(null); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java index caab056..b87d50b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java @@ -25,6 +25,7 @@ import java.util.Deque; import java.util.HashMap; import java.util.HashSet; +import java.util.IdentityHashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; @@ -81,6 +82,7 @@ import org.apache.hadoop.hive.ql.lib.RuleRegExp; import org.apache.hadoop.hive.ql.log.PerfLogger; import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.optimizer.BucketVersionPopulator; import org.apache.hadoop.hive.ql.optimizer.ConstantPropagate; import org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcCtx.ConstantPropagateOption; import org.apache.hadoop.hive.ql.optimizer.ConvertJoinMapJoin; @@ -213,6 +215,9 @@ runStatsDependentOptimizations(procCtx, inputs, outputs); perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Run the optimizations that use stats for optimization"); + // repopulate bucket versions; join conversion may have created some new reducesinks + new BucketVersionPopulator().transform(pCtx); + perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER); if(procCtx.conf.getBoolVar(ConfVars.HIVEOPTJOINREDUCEDEDUPLICATION)) { new ReduceSinkJoinDeDuplication().transform(procCtx.parseContext); @@ -240,15 +245,7 @@ markOperatorsWithUnstableRuntimeStats(procCtx); perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "markOperatorsWithUnstableRuntimeStats"); - // ATTENTION : DO NOT, I REPEAT, DO NOT WRITE ANYTHING AFTER updateBucketingVersionForUpgrade() - // ANYTHING WHICH NEEDS TO BE ADDED MUST BE ADDED ABOVE - // This call updates the bucketing version of final ReduceSinkOp based on - // the bucketing version of FileSinkOp. This operation must happen at the - // end to ensure there is no further rewrite of plan which may end up - // removing/updating the ReduceSinkOp as was the case with SortedDynPartitionOptimizer - // Update bucketing version of ReduceSinkOp if needed - updateBucketingVersionForUpgrade(procCtx); - + bucketingVersionSanityCheck(procCtx); } private void runCycleAnalysisForPartitionPruning(OptimizeTezProcContext procCtx, @@ -1969,7 +1966,7 @@ } } - private void updateBucketingVersionForUpgrade(OptimizeTezProcContext procCtx) { + private void bucketingVersionSanityCheck(OptimizeTezProcContext procCtx) throws SemanticException { // Fetch all the FileSinkOperators. Set fsOpsAll = new HashSet<>(); for (TableScanOperator ts : procCtx.parseContext.getTopOps().values()) { @@ -1978,7 +1975,7 @@ fsOpsAll.addAll(fsOps); } - + Map, Integer> processedOperators = new IdentityHashMap<>(); for (FileSinkOperator fsOp : fsOpsAll) { // Look for direct parent ReduceSinkOp // If there are more than 1 parent, bail out. @@ -1991,8 +1988,22 @@ continue; } - // Found the target RSOp - parent.setBucketingVersion(fsOp.getConf().getTableInfo().getBucketingVersion()); + // Found the target RSOp 0 + int bucketingVersion = fsOp.getConf().getTableInfo().getBucketingVersion(); + if (fsOp.getConf().getTableInfo().getBucketingVersion() == -1) { + break; + } + if (fsOp.getConf().getTableInfo().getBucketingVersion() != fsOp.getConf().getBucketingVersion()) { + throw new RuntimeException("FsOp bucketingVersions is inconsistent with its tableinfo"); + } + if (processedOperators.containsKey(parent) && processedOperators.get(parent) != bucketingVersion) { + throw new SemanticException(String.format( + "Operator (%s) is already processed and is using bucketingVersion(%d); so it can't be changed to %d ", + parent, processedOperators.get(parent), bucketingVersion)); + } + processedOperators.put(parent, bucketingVersion); + + //parent.getConf().setBucketingVersion(bucketingVersion); break; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java index 09fa145..7e0c9d0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java @@ -145,17 +145,6 @@ new ConstantPropagate(ConstantPropagateProcCtx.ConstantPropagateOption.SHORTCUT).transform(pCtx); } - // ATTENTION : DO NOT, I REPEAT, DO NOT WRITE ANYTHING AFTER updateBucketingVersionForUpgrade() - // ANYTHING WHICH NEEDS TO BE ADDED MUST BE ADDED ABOVE - // This call updates the bucketing version of final ReduceSinkOp based on - // the bucketing version of FileSinkOp. This operation must happen at the - // end to ensure there is no further rewrite of plan which may end up - // removing/updating the ReduceSinkOp as was the case with SortedDynPartitionOptimizer - // Update bucketing version of ReduceSinkOp if needed - // Note: This has been copied here from TezCompiler, change seems needed for bucketing to work - // properly moving forward. - updateBucketingVersionForUpgrade(procCtx); - PERF_LOGGER.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_OPTIMIZE_OPERATOR_TREE); } @@ -636,36 +625,4 @@ PERF_LOGGER.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_OPTIMIZE_TASK_TREE); return; } - - private void updateBucketingVersionForUpgrade(OptimizeSparkProcContext procCtx) { - // Fetch all the FileSinkOperators. - Set fsOpsAll = new HashSet<>(); - for (TableScanOperator ts : procCtx.getParseContext().getTopOps().values()) { - Set fsOps = OperatorUtils.findOperators( - ts, FileSinkOperator.class); - fsOpsAll.addAll(fsOps); - } - - - for (FileSinkOperator fsOp : fsOpsAll) { - if (!fsOp.getConf().getTableInfo().isSetBucketingVersion()) { - continue; - } - // Look for direct parent ReduceSinkOp - // If there are more than 1 parent, bail out. - Operator parent = fsOp; - List> parentOps = parent.getParentOperators(); - while (parentOps != null && parentOps.size() == 1) { - parent = parentOps.get(0); - if (!(parent instanceof ReduceSinkOperator)) { - parentOps = parent.getParentOperators(); - continue; - } - - // Found the target RSOp - parent.setBucketingVersion(fsOp.getConf().getTableInfo().getBucketingVersion()); - break; - } - } - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java index 65a107e..661da27 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java @@ -38,6 +38,7 @@ protected long memNeeded = 0; protected long memAvailable = 0; protected String runtimeStatsTmpDir; + protected int bucketingVersion = -2; /** * A map of output column name to input expression map. This is used by @@ -46,6 +47,8 @@ */ protected Map colExprMap; + private String myName = "N/A"; + @Override @Explain(skipHeader = true, displayName = "Statistics") public Statistics getStatistics() { @@ -171,4 +174,13 @@ throw new RuntimeException(); } + @Override + public int getBucketingVersion() { + return bucketingVersion; + } + + @Override + public void setBucketingVersion(int bucketingVersion) { + this.bucketingVersion = bucketingVersion; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java index f55c6ae..619f68e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java @@ -129,7 +129,7 @@ final List partitionCols, final DynamicPartitionCtx dpCtx, Path destPath, Long mmWriteId, boolean isMmCtas, boolean isInsertOverwrite, boolean isQuery, boolean isCTASorCM, boolean isDirectInsert) { this.dirName = dirName; - this.tableInfo = tableInfo; + setTableInfo(tableInfo); this.compressed = compressed; this.destTableId = destTableId; this.multiFileSpray = multiFileSpray; @@ -152,7 +152,7 @@ final boolean compressed) { this.dirName = dirName; - this.tableInfo = tableInfo; + setTableInfo(tableInfo); this.compressed = compressed; destTableId = 0; this.multiFileSpray = false; @@ -268,6 +268,7 @@ public void setTableInfo(final TableDesc tableInfo) { this.tableInfo = tableInfo; + bucketingVersion = tableInfo.getBucketingVersion(); } @Explain(displayName = "compressed") @@ -616,6 +617,10 @@ return isMmCtas; } + @Explain(displayName = "bucketingVersion", explainLevels = { Level.EXTENDED }) + public int getBucketingVersionForExplain() { + return getBucketingVersion(); + } /** * Whether this is CREATE TABLE SELECT or CREATE MATERIALIZED VIEW statemet * Set by semantic analyzer this is required because CTAS/CM requires some special logic diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/OpTraits.java ql/src/java/org/apache/hadoop/hive/ql/plan/OpTraits.java index d3b62ce..246c089 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/OpTraits.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/OpTraits.java @@ -26,16 +26,13 @@ private List> sortColNames; private int numBuckets; private int numReduceSinks; - private int bucketingVersion; public OpTraits(List> bucketColNames, int numBuckets, - List> sortColNames, int numReduceSinks, - int bucketingVersion) { + List> sortColNames, int numReduceSinks) { this.bucketColNames = bucketColNames; this.numBuckets = numBuckets; this.sortColNames = sortColNames; this.numReduceSinks = numReduceSinks; - this.bucketingVersion = bucketingVersion; } public List> getBucketColNames() { @@ -71,17 +68,9 @@ return this.numReduceSinks; } - public void setBucketingVersion(int bucketingVersion) { - this.bucketingVersion = bucketingVersion; - } - - public int getBucketingVersion() { - return bucketingVersion; - } - @Override public String toString() { return "{ bucket column names: " + bucketColNames + "; sort column names: " - + sortColNames + "; bucket count: " + numBuckets + "; bucketing version: " + bucketingVersion + " }"; + + sortColNames + "; bucket count: " + numBuckets + "}"; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/OperatorDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/OperatorDesc.java index e8a5827..276c4a3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/OperatorDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/OperatorDesc.java @@ -40,4 +40,8 @@ public void setColumnExprMap(Map colExprMap); void fillSignature(Map ret); + + public void setBucketingVersion(int bucketingVersion); + + public int getBucketingVersion(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java index 980f39b..6282c8e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java @@ -286,6 +286,7 @@ outputFormat = IgnoreKeyTextOutputFormat.class; } properties.setProperty(serdeConstants.SERIALIZATION_LIB, serdeClass.getName()); + properties.setProperty(hive_metastoreConstants.TABLE_BUCKETING_VERSION, "-1"); return new TableDesc(inputFormat, outputFormat, properties); } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java index 32715c9..a807fa9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java @@ -437,6 +437,7 @@ this.outputName = outputName; } + @Explain(displayName = "numBuckets", explainLevels = { Level.EXTENDED }) public int getNumBuckets() { return numBuckets; } @@ -445,6 +446,11 @@ this.numBuckets = numBuckets; } + @Explain(displayName = "bucketingVersion", explainLevels = { Level.EXTENDED }) + public int getBucketingVersionForExplain() { + return getBucketingVersion(); + } + public List getBucketCols() { return bucketCols; } diff --git ql/src/test/queries/clientpositive/infer_bucket_sort_num_buckets.q ql/src/test/queries/clientpositive/infer_bucket_sort_num_buckets.q index a8f5e17..c7167e7 100644 --- ql/src/test/queries/clientpositive/infer_bucket_sort_num_buckets.q +++ ql/src/test/queries/clientpositive/infer_bucket_sort_num_buckets.q @@ -19,7 +19,7 @@ -- and the partition for 1 will get written in one reducer. So hr=0 should be bucketed by key -- and hr=1 should not. -EXPLAIN +EXPLAIN EXTENDED INSERT OVERWRITE TABLE test_table_n0 PARTITION (ds = '2008-04-08', hr) SELECT key2, value, cast(hr as int) FROM (SELECT if ((key % 3) < 2, 0, 1) as key2, value, (key % 2) as hr diff --git ql/src/test/queries/clientpositive/murmur_hash_migration.q ql/src/test/queries/clientpositive/murmur_hash_migration.q index 54207a7..c114ef6 100644 --- ql/src/test/queries/clientpositive/murmur_hash_migration.q +++ ql/src/test/queries/clientpositive/murmur_hash_migration.q @@ -36,14 +36,14 @@ CREATE TABLE tab_part_n11 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -explain +explain extended insert overwrite table tab_part_n11 partition (ds='2008-04-08') select key,value from srcbucket_mapjoin_part_n20; insert overwrite table tab_part_n11 partition (ds='2008-04-08') select key,value from srcbucket_mapjoin_part_n20; CREATE TABLE tab_n10(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -explain +explain extended insert overwrite table tab_n10 partition (ds='2008-04-08') select key,value from srcbucket_mapjoin_n18; insert overwrite table tab_n10 partition (ds='2008-04-08') @@ -52,44 +52,14 @@ analyze table tab_part_n11 compute statistics for columns; analyze table tab_n10 compute statistics for columns; -explain +explain extended select t1.key, t1.value, t2.key, t2.value from srcbucket_mapjoin_n18 t1, srcbucket_mapjoin_part_n20 t2 where t1.key = t2.key order by t1.key, t1.value, t2.key, t2.value; select t1.key, t1.value, t2.key, t2.value from srcbucket_mapjoin_n18 t1, srcbucket_mapjoin_part_n20 t2 where t1.key = t2.key order by t1.key, t1.value, t2.key, t2.value; -explain +set hive.auto.convert.join=true; + +explain extended select t1.key, t1.value, t2.key, t2.value from tab_part_n11 t1, tab_n10 t2 where t1.key = t2.key order by t1.key, t1.value, t2.key, t2.value; select t1.key, t1.value, t2.key, t2.value from tab_part_n11 t1, tab_n10 t2 where t1.key = t2.key order by t1.key, t1.value, t2.key, t2.value; -set hive.optimize.ppd=true; -set hive.optimize.index.filter=true; -set hive.tez.bucket.pruning=true; -set hive.fetch.task.conversion=none; -set hive.support.concurrency=true; -set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; - - -create transactional table acid_ptn_bucket1 (a int, b int) partitioned by(ds string) -clustered by (a) into 2 buckets stored as ORC -TBLPROPERTIES('bucketing_version'='1', 'transactional'='true', 'transactional_properties'='default'); - -explain extended insert into acid_ptn_bucket1 partition (ds) values(1,2,'today'),(1,3,'today'),(1,4,'yesterday'),(2,2,'yesterday'),(2,3,'today'),(2,4,'today'); -insert into acid_ptn_bucket1 partition (ds) values(1,2,'today'),(1,3,'today'),(1,4,'yesterday'),(2,2,'yesterday'),(2,3,'today'),(2,4,'today'); - -alter table acid_ptn_bucket1 add columns(c int); - -insert into acid_ptn_bucket1 partition (ds) values(3,2,1000,'yesterday'),(3,3,1001,'today'),(3,4,1002,'yesterday'),(4,2,1003,'today'), (4,3,1004,'yesterday'),(4,4,1005,'today'); -select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536870912 and ds='today'; -select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536936448 and ds='today'; - ---create table s1 as select key, value from src where value > 2 group by key, value limit 10; ---create table s2 as select key, '45' from src s2 where key > 1 group by key limit 10; - -create table s1 (key int, value int) stored as ORC; -create table s2 (key int, value int) stored as ORC; - -insert into s1 values(111, 33), (10, 45), (103, 44), (129, 34), (128, 11); -insert into s2 values(10, 45), (100, 45), (103, 44), (110, 12), (128, 34), (117, 71); -insert into table acid_ptn_bucket1 partition(ds='today') select key, count(value), key from (select * from s1 union all select * from s2) sub group by key; -select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536870912 and ds='today'; -select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536936448 and ds='today'; diff --git ql/src/test/queries/clientpositive/murmur_hash_migration2.q ql/src/test/queries/clientpositive/murmur_hash_migration2.q new file mode 100644 index 0000000..362ead7 --- /dev/null +++ ql/src/test/queries/clientpositive/murmur_hash_migration2.q @@ -0,0 +1,44 @@ +--! qt:dataset:src +set hive.stats.column.autogather=false; +set hive.strict.checks.bucketing=false; + +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; +set hive.auto.convert.join=true; +set hive.auto.convert.join.noconditionaltask=true; +set hive.auto.convert.join.noconditionaltask.size=30000; + +set hive.optimize.bucketingsorting=false; + +set hive.optimize.ppd=true; +set hive.optimize.index.filter=true; +set hive.tez.bucket.pruning=true; +set hive.fetch.task.conversion=none; +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; + + +create transactional table acid_ptn_bucket1 (a int, b int) partitioned by(ds string) +clustered by (a) into 2 buckets stored as ORC +TBLPROPERTIES('bucketing_version'='1', 'transactional'='true', 'transactional_properties'='default'); + +explain extended insert into acid_ptn_bucket1 partition (ds) values(1,2,'today'),(1,3,'today'),(1,4,'yesterday'),(2,2,'yesterday'),(2,3,'today'),(2,4,'today'); +insert into acid_ptn_bucket1 partition (ds) values(1,2,'today'),(1,3,'today'),(1,4,'yesterday'),(2,2,'yesterday'),(2,3,'today'),(2,4,'today'); + +alter table acid_ptn_bucket1 add columns(c int); + +insert into acid_ptn_bucket1 partition (ds) values(3,2,1000,'yesterday'),(3,3,1001,'today'),(3,4,1002,'yesterday'),(4,2,1003,'today'), (4,3,1004,'yesterday'),(4,4,1005,'today'); +select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536870912 and ds='today'; +select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536936448 and ds='today'; + +--create table s1 as select key, value from src where value > 2 group by key, value limit 10; +--create table s2 as select key, '45' from src s2 where key > 1 group by key limit 10; + +create table s1 (key int, value int) stored as ORC; +create table s2 (key int, value int) stored as ORC; + +insert into s1 values(111, 33), (10, 45), (103, 44), (129, 34), (128, 11); +insert into s2 values(10, 45), (100, 45), (103, 44), (110, 12), (128, 34), (117, 71); +insert into table acid_ptn_bucket1 partition(ds='today') select key, count(value), key from (select * from s1 union all select * from s2) sub group by key; +select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536870912 and ds='today'; +select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536936448 and ds='today'; diff --git ql/src/test/queries/clientpositive/topnkey_grouping_sets.q ql/src/test/queries/clientpositive/topnkey_grouping_sets.q index 3efd0b9..0bcf08c 100644 --- ql/src/test/queries/clientpositive/topnkey_grouping_sets.q +++ ql/src/test/queries/clientpositive/topnkey_grouping_sets.q @@ -20,6 +20,45 @@ set hive.optimize.topnkey=true; EXPLAIN +<<<<<<< HEAD +SELECT a, b, grouping(a), grouping(b), grouping(a, b) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 3; +SELECT a, b, grouping(a), grouping(b), grouping(a, b) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 3; + +set hive.optimize.topnkey=false; +SELECT a, b, grouping(a), grouping(b), grouping(a, b) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 3; + +set hive.optimize.topnkey=true; +EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 10; +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 10; + +set hive.optimize.topnkey=false; +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 10; + +set hive.optimize.topnkey=true; +EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 3; +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 3; + +set hive.optimize.topnkey=false; +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 3; + +set hive.optimize.topnkey=true; +EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 1; +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 1; + +set hive.optimize.topnkey=false; +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 1; + +set hive.optimize.topnkey=true; +EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a), (b)) ORDER BY a,b LIMIT 7; +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a), (b)) ORDER BY a,b LIMIT 7; + +set hive.optimize.topnkey=false; +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a), (b)) ORDER BY a,b LIMIT 7; +======= SELECT a, b, grouping(a), grouping(b), grouping(a, b) FROM t_test_grouping_sets GROUP BY a, b GROUPING SETS ((a, b), (a), (b), ()) ORDER BY a, b LIMIT 3; SELECT a, b, grouping(a), grouping(b), grouping(a, b) FROM t_test_grouping_sets GROUP BY a, b GROUPING SETS ((a, b), (a), (b), ()) ORDER BY a, b LIMIT 3; @@ -57,6 +96,7 @@ set hive.optimize.topnkey=false; SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a), (b)) ORDER BY b, a LIMIT 7; +>>>>>>> apache/master set hive.optimize.topnkey=true; EXPLAIN diff --git ql/src/test/results/clientpositive/acid_nullscan.q.out ql/src/test/results/clientpositive/acid_nullscan.q.out index 0e5c241..5795e3e 100644 --- ql/src/test/results/clientpositive/acid_nullscan.q.out +++ ql/src/test/results/clientpositive/acid_nullscan.q.out @@ -65,7 +65,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -96,7 +98,7 @@ serialization.ddl struct acid_vectorized_n1 { i32 a, string b} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - totalSize 2602 + totalSize 2583 transactional true transactional_properties default #### A masked pattern was here #### @@ -121,7 +123,7 @@ serialization.ddl struct acid_vectorized_n1 { i32 a, string b} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2602 + totalSize 2583 transactional true transactional_properties default #### A masked pattern was here #### @@ -138,6 +140,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -148,6 +151,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ diff --git ql/src/test/results/clientpositive/acid_table_stats.q.out ql/src/test/results/clientpositive/acid_table_stats.q.out index c4bc80e..4128adb 100644 --- ql/src/test/results/clientpositive/acid_table_stats.q.out +++ ql/src/test/results/clientpositive/acid_table_stats.q.out @@ -97,7 +97,7 @@ numFiles 2 numRows 1000 rawDataSize 0 - totalSize 4384 + totalSize 4361 #### A masked pattern was here #### # Storage Information @@ -184,7 +184,7 @@ numFiles 2 numRows 1000 rawDataSize 0 - totalSize 4384 + totalSize 4361 #### A masked pattern was here #### # Storage Information @@ -235,7 +235,7 @@ numFiles 2 numRows 1000 rawDataSize 0 - totalSize 4384 + totalSize 4361 #### A masked pattern was here #### # Storage Information @@ -331,7 +331,7 @@ numFiles 4 numRows 2000 rawDataSize 0 - totalSize 8769 + totalSize 8722 #### A masked pattern was here #### # Storage Information @@ -380,7 +380,7 @@ numFiles 4 numRows 2000 rawDataSize 0 - totalSize 8769 + totalSize 8722 #### A masked pattern was here #### # Storage Information @@ -593,7 +593,7 @@ numFiles 2 numRows 1000 rawDataSize 176000 - totalSize 3326 + totalSize 3325 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/autoColumnStats_4.q.out ql/src/test/results/clientpositive/autoColumnStats_4.q.out index a9b5ad0..7ef13b3 100644 --- ql/src/test/results/clientpositive/autoColumnStats_4.q.out +++ ql/src/test/results/clientpositive/autoColumnStats_4.q.out @@ -214,7 +214,7 @@ numFiles 2 numRows 10 rawDataSize 0 - totalSize 1903 + totalSize 1884 transactional true transactional_properties default #### A masked pattern was here #### @@ -256,10 +256,10 @@ Table Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} bucketing_version 2 - numFiles 4 + numFiles 3 numRows 8 rawDataSize 0 - totalSize 3293 + totalSize 2596 transactional true transactional_properties default #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/autoColumnStats_5a.q.out ql/src/test/results/clientpositive/autoColumnStats_5a.q.out index 4bc9df4..1fd961d 100644 --- ql/src/test/results/clientpositive/autoColumnStats_5a.q.out +++ ql/src/test/results/clientpositive/autoColumnStats_5a.q.out @@ -48,6 +48,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -89,8 +90,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 868 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 868 Basic stats: COMPLETE Column stats: COMPLETE @@ -151,6 +154,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 884 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -161,6 +165,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:int escape.delim \ @@ -225,6 +230,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -304,6 +310,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -474,6 +481,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -515,8 +523,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 868 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 868 Basic stats: COMPLETE Column stats: COMPLETE @@ -577,6 +587,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 884 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -587,6 +598,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:int escape.delim \ @@ -651,6 +663,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -730,6 +743,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/autoColumnStats_8.q.out ql/src/test/results/clientpositive/autoColumnStats_8.q.out index de7352c..43893cb 100644 --- ql/src/test/results/clientpositive/autoColumnStats_8.q.out +++ ql/src/test/results/clientpositive/autoColumnStats_8.q.out @@ -80,6 +80,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 666 Data size: 363636 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -121,8 +122,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 2 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE @@ -138,6 +141,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 666 Data size: 241092 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 2 #### A masked pattern was here #### @@ -180,6 +184,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 2316 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -414,6 +419,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -424,6 +430,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -515,8 +522,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: '2008-12-31' (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: '2008-12-31' (type: string), _col1 (type: string) Statistics: Num rows: 2 Data size: 2316 Basic stats: COMPLETE Column stats: COMPLETE @@ -564,6 +573,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 2316 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -574,6 +584,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/auto_join_reordering_values.q.out ql/src/test/results/clientpositive/auto_join_reordering_values.q.out index 0378c65..b81418c 100644 --- ql/src/test/results/clientpositive/auto_join_reordering_values.q.out +++ ql/src/test/results/clientpositive/auto_join_reordering_values.q.out @@ -128,8 +128,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 106 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 1 Data size: 106 Basic stats: COMPLETE Column stats: COMPLETE @@ -150,8 +152,10 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE @@ -221,6 +225,7 @@ outputColumnNames: _col0, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 106 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -245,8 +250,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 106 Basic stats: COMPLETE Column stats: COMPLETE @@ -267,8 +274,10 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE @@ -361,6 +370,7 @@ outputColumnNames: _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 106 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -385,8 +395,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col2 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col2 (type: int) Statistics: Num rows: 1 Data size: 106 Basic stats: COMPLETE Column stats: COMPLETE @@ -407,8 +419,10 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE @@ -501,6 +515,7 @@ outputColumnNames: _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -525,8 +540,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col3 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col3 (type: int) Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE @@ -547,8 +564,10 @@ outputColumnNames: _col0 Statistics: Num rows: 100 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 100 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE @@ -648,6 +667,7 @@ Number of rows: 5 Statistics: Num rows: 1 Data size: 98 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -658,6 +678,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:int escape.delim \ diff --git ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out index 4731629..4faba94 100644 --- ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out +++ ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out @@ -88,8 +88,10 @@ expressions: _col0 (type: int), _col7 (type: string) outputColumnNames: _col0, _col1 Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) tag: -1 @@ -260,8 +262,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: '1' (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: '1' (type: string) tag: -1 diff --git ql/src/test/results/clientpositive/beeline/smb_mapjoin_12.q.out ql/src/test/results/clientpositive/beeline/smb_mapjoin_12.q.out index 1f301e8..f6c2f9d 100644 --- ql/src/test/results/clientpositive/beeline/smb_mapjoin_12.q.out +++ ql/src/test/results/clientpositive/beeline/smb_mapjoin_12.q.out @@ -316,8 +316,10 @@ expressions: _col0 (type: int), concat(_col1, _col7) (type: string) outputColumnNames: _col0, _col1 Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) tag: -1 @@ -490,8 +492,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: '2' (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: '2' (type: string) tag: -1 diff --git ql/src/test/results/clientpositive/beeline/smb_mapjoin_13.q.out ql/src/test/results/clientpositive/beeline/smb_mapjoin_13.q.out index 71ea244..fe2561b 100644 --- ql/src/test/results/clientpositive/beeline/smb_mapjoin_13.q.out +++ ql/src/test/results/clientpositive/beeline/smb_mapjoin_13.q.out @@ -102,8 +102,10 @@ expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + tag: -1 TopN: 10 @@ -289,8 +291,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 550 Data size: 52250 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 550 Data size: 52250 Basic stats: COMPLETE Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/binary_output_format.q.out ql/src/test/results/clientpositive/binary_output_format.q.out index ec6a3a2..b414360 100644 --- ql/src/test/results/clientpositive/binary_output_format.q.out +++ ql/src/test/results/clientpositive/binary_output_format.q.out @@ -80,6 +80,7 @@ input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + bucketing_version -1 columns _col0 columns.types string field.delim 9 @@ -89,6 +90,7 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -133,7 +135,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -200,6 +204,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -210,6 +215,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types struct escape.delim \ @@ -277,6 +283,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -370,6 +377,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/bucket1.q.out ql/src/test/results/clientpositive/bucket1.q.out index 6e8965f..0a6f785 100644 --- ql/src/test/results/clientpositive/bucket1.q.out +++ ql/src/test/results/clientpositive/bucket1.q.out @@ -39,8 +39,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE @@ -108,6 +110,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -152,6 +155,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -216,7 +220,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -258,6 +264,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -268,6 +275,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/bucket2.q.out ql/src/test/results/clientpositive/bucket2.q.out index e21515e..cbada47 100644 --- ql/src/test/results/clientpositive/bucket2.q.out +++ ql/src/test/results/clientpositive/bucket2.q.out @@ -38,8 +38,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE @@ -107,6 +109,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -154,6 +157,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -164,6 +168,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/bucket3.q.out ql/src/test/results/clientpositive/bucket3.q.out index a494c21..4448ab6 100644 --- ql/src/test/results/clientpositive/bucket3.q.out +++ ql/src/test/results/clientpositive/bucket3.q.out @@ -39,8 +39,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE @@ -108,6 +110,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -151,6 +154,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 949 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -214,8 +218,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 949 Basic stats: COMPLETE Column stats: COMPLETE @@ -263,6 +269,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 965 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -273,6 +280,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:string escape.delim \ diff --git ql/src/test/results/clientpositive/bucket_map_join_1.q.out ql/src/test/results/clientpositive/bucket_map_join_1.q.out index 98c0aa4..440345f 100644 --- ql/src/test/results/clientpositive/bucket_map_join_1.q.out +++ ql/src/test/results/clientpositive/bucket_map_join_1.q.out @@ -110,7 +110,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -182,6 +184,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -192,6 +195,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ diff --git ql/src/test/results/clientpositive/bucket_map_join_2.q.out ql/src/test/results/clientpositive/bucket_map_join_2.q.out index 01d0999..20b2703 100644 --- ql/src/test/results/clientpositive/bucket_map_join_2.q.out +++ ql/src/test/results/clientpositive/bucket_map_join_2.q.out @@ -110,7 +110,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -182,6 +184,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -192,6 +195,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ diff --git ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out index 8006d5c..98a45ee 100644 --- ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out +++ ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out @@ -247,6 +247,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 134 Data size: 20826 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -290,6 +291,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -458,7 +460,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -499,6 +503,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -509,6 +514,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -709,6 +715,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 134 Data size: 20826 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -752,6 +759,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -920,7 +928,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -961,6 +971,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -971,6 +982,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out index 23d704a..902c129 100644 --- ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out +++ ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out @@ -231,6 +231,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 134 Data size: 20826 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -274,6 +275,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -442,7 +444,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -483,6 +487,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -493,6 +498,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -693,6 +699,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 134 Data size: 20826 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -736,6 +743,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -904,7 +912,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -945,6 +955,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -955,6 +966,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out index 5d59a53..42a6998 100644 --- ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out +++ ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out @@ -231,6 +231,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 134 Data size: 20826 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -274,6 +275,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -442,7 +444,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -483,6 +487,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -493,6 +498,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -693,6 +699,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 134 Data size: 20826 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -736,6 +743,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -904,7 +912,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -945,6 +955,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -955,6 +966,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/bucketcontext_1.q.out ql/src/test/results/clientpositive/bucketcontext_1.q.out index b136f29..3df511a 100644 --- ql/src/test/results/clientpositive/bucketcontext_1.q.out +++ ql/src/test/results/clientpositive/bucketcontext_1.q.out @@ -230,7 +230,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -445,7 +447,9 @@ mode: hash outputColumnNames: _col0 Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: tag: -1 value expressions: _col0 (type: bigint) diff --git ql/src/test/results/clientpositive/bucketcontext_2.q.out ql/src/test/results/clientpositive/bucketcontext_2.q.out index 139b04b..0d6610d 100644 --- ql/src/test/results/clientpositive/bucketcontext_2.q.out +++ ql/src/test/results/clientpositive/bucketcontext_2.q.out @@ -214,7 +214,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -429,7 +431,9 @@ mode: hash outputColumnNames: _col0 Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: tag: -1 value expressions: _col0 (type: bigint) diff --git ql/src/test/results/clientpositive/bucketcontext_3.q.out ql/src/test/results/clientpositive/bucketcontext_3.q.out index dbe68ee..63c44d8 100644 --- ql/src/test/results/clientpositive/bucketcontext_3.q.out +++ ql/src/test/results/clientpositive/bucketcontext_3.q.out @@ -262,7 +262,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -426,7 +428,9 @@ mode: hash outputColumnNames: _col0 Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: tag: -1 value expressions: _col0 (type: bigint) diff --git ql/src/test/results/clientpositive/bucketcontext_4.q.out ql/src/test/results/clientpositive/bucketcontext_4.q.out index 0cc4d7e..0e265a6 100644 --- ql/src/test/results/clientpositive/bucketcontext_4.q.out +++ ql/src/test/results/clientpositive/bucketcontext_4.q.out @@ -278,7 +278,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -442,7 +444,9 @@ mode: hash outputColumnNames: _col0 Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: tag: -1 value expressions: _col0 (type: bigint) diff --git ql/src/test/results/clientpositive/bucketcontext_5.q.out ql/src/test/results/clientpositive/bucketcontext_5.q.out index 25f89a8..c8acdb5 100644 --- ql/src/test/results/clientpositive/bucketcontext_5.q.out +++ ql/src/test/results/clientpositive/bucketcontext_5.q.out @@ -140,7 +140,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -292,7 +294,9 @@ mode: hash outputColumnNames: _col0 Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: tag: -1 value expressions: _col0 (type: bigint) diff --git ql/src/test/results/clientpositive/bucketcontext_6.q.out ql/src/test/results/clientpositive/bucketcontext_6.q.out index 0a5baeb..f871196 100644 --- ql/src/test/results/clientpositive/bucketcontext_6.q.out +++ ql/src/test/results/clientpositive/bucketcontext_6.q.out @@ -162,7 +162,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -373,7 +375,9 @@ mode: hash outputColumnNames: _col0 Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: tag: -1 value expressions: _col0 (type: bigint) diff --git ql/src/test/results/clientpositive/bucketcontext_7.q.out ql/src/test/results/clientpositive/bucketcontext_7.q.out index 734dd0f..4a3788d 100644 --- ql/src/test/results/clientpositive/bucketcontext_7.q.out +++ ql/src/test/results/clientpositive/bucketcontext_7.q.out @@ -297,7 +297,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -516,7 +518,9 @@ mode: hash outputColumnNames: _col0 Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: tag: -1 value expressions: _col0 (type: bigint) diff --git ql/src/test/results/clientpositive/bucketcontext_8.q.out ql/src/test/results/clientpositive/bucketcontext_8.q.out index 8260858..bdc12a8 100644 --- ql/src/test/results/clientpositive/bucketcontext_8.q.out +++ ql/src/test/results/clientpositive/bucketcontext_8.q.out @@ -297,7 +297,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -516,7 +518,9 @@ mode: hash outputColumnNames: _col0 Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: tag: -1 value expressions: _col0 (type: bigint) diff --git ql/src/test/results/clientpositive/bucketmapjoin10.q.out ql/src/test/results/clientpositive/bucketmapjoin10.q.out index fcf056d..c4d6070 100644 --- ql/src/test/results/clientpositive/bucketmapjoin10.q.out +++ ql/src/test/results/clientpositive/bucketmapjoin10.q.out @@ -301,7 +301,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -421,6 +423,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -431,6 +434,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ diff --git ql/src/test/results/clientpositive/bucketmapjoin11.q.out ql/src/test/results/clientpositive/bucketmapjoin11.q.out index 7032fb5..16114c9 100644 --- ql/src/test/results/clientpositive/bucketmapjoin11.q.out +++ ql/src/test/results/clientpositive/bucketmapjoin11.q.out @@ -317,7 +317,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -437,6 +439,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -447,6 +450,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -671,7 +675,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -791,6 +797,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -801,6 +808,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ diff --git ql/src/test/results/clientpositive/bucketmapjoin12.q.out ql/src/test/results/clientpositive/bucketmapjoin12.q.out index 2ef7cb7..5c45394 100644 --- ql/src/test/results/clientpositive/bucketmapjoin12.q.out +++ ql/src/test/results/clientpositive/bucketmapjoin12.q.out @@ -226,7 +226,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -296,6 +298,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -306,6 +309,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -466,7 +470,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -536,6 +542,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -546,6 +553,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ diff --git ql/src/test/results/clientpositive/bucketmapjoin13.q.out ql/src/test/results/clientpositive/bucketmapjoin13.q.out index b6e55b3..693377f 100644 --- ql/src/test/results/clientpositive/bucketmapjoin13.q.out +++ ql/src/test/results/clientpositive/bucketmapjoin13.q.out @@ -189,7 +189,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -311,6 +313,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -321,6 +324,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -493,7 +497,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -564,6 +570,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -574,6 +581,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -752,7 +760,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -823,6 +833,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -833,6 +844,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -1011,7 +1023,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -1082,6 +1096,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1092,6 +1107,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ diff --git ql/src/test/results/clientpositive/bucketmapjoin5.q.out ql/src/test/results/clientpositive/bucketmapjoin5.q.out index 5ada6e7..b0e2931 100644 --- ql/src/test/results/clientpositive/bucketmapjoin5.q.out +++ ql/src/test/results/clientpositive/bucketmapjoin5.q.out @@ -253,6 +253,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 312 Data size: 178025 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -296,7 +297,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -416,6 +419,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -426,6 +430,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -492,6 +497,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -582,6 +588,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -868,6 +875,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 163 Data size: 93968 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -911,7 +919,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -1031,6 +1041,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1041,6 +1052,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -1107,6 +1119,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1197,6 +1210,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/bucketmapjoin8.q.out ql/src/test/results/clientpositive/bucketmapjoin8.q.out index 5c0ac98..e1b658e 100644 --- ql/src/test/results/clientpositive/bucketmapjoin8.q.out +++ ql/src/test/results/clientpositive/bucketmapjoin8.q.out @@ -192,7 +192,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -262,6 +264,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -272,6 +275,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -449,7 +453,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -519,6 +525,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -529,6 +536,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ diff --git ql/src/test/results/clientpositive/bucketmapjoin9.q.out ql/src/test/results/clientpositive/bucketmapjoin9.q.out index 61be706..709c780 100644 --- ql/src/test/results/clientpositive/bucketmapjoin9.q.out +++ ql/src/test/results/clientpositive/bucketmapjoin9.q.out @@ -192,7 +192,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -262,6 +264,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -272,6 +275,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -474,7 +478,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -544,6 +550,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -554,6 +561,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ diff --git ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out index cb9d5d0..c90fa59 100644 --- ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out +++ ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out @@ -192,6 +192,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 146 Data size: 70215 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -235,7 +236,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -304,6 +307,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -314,6 +318,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -380,6 +385,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -470,6 +476,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out index b5aceed..1adac25 100644 --- ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out +++ ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out @@ -258,6 +258,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 163 Data size: 93968 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -301,7 +302,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -370,6 +373,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -380,6 +384,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -446,6 +451,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -536,6 +542,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out index 5bb0f03..13dc04c 100644 --- ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out +++ ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out @@ -48,8 +48,10 @@ outputColumnNames: key, value Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: key (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: key (type: string) Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE @@ -70,8 +72,10 @@ outputColumnNames: key, value Statistics: Num rows: 111 Data size: 30192 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: key (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: key (type: string) Statistics: Num rows: 111 Data size: 30192 Basic stats: COMPLETE Column stats: COMPLETE @@ -246,6 +250,7 @@ outputColumnNames: key, value, key0, value0 Statistics: Num rows: 55 Data size: 19580 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -256,6 +261,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns key,value,key0,value0 columns.types string:string:string:string escape.delim \ @@ -362,8 +368,10 @@ outputColumnNames: key, value Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: key (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: key (type: string) Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE @@ -384,8 +392,10 @@ outputColumnNames: key, value Statistics: Num rows: 111 Data size: 30192 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: key (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: key (type: string) Statistics: Num rows: 111 Data size: 30192 Basic stats: COMPLETE Column stats: COMPLETE @@ -560,6 +570,7 @@ outputColumnNames: key, value, key0, value0 Statistics: Num rows: 55 Data size: 19580 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -570,6 +581,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns key,value,key0,value0 columns.types string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/cbo_stats_estimation.q.out ql/src/test/results/clientpositive/cbo_stats_estimation.q.out index 389a9bc..55793a1 100644 --- ql/src/test/results/clientpositive/cbo_stats_estimation.q.out +++ ql/src/test/results/clientpositive/cbo_stats_estimation.q.out @@ -51,7 +51,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -117,6 +119,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -127,6 +130,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -182,7 +186,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -248,6 +254,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -258,6 +265,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ diff --git ql/src/test/results/clientpositive/columnstats_partlvl.q.out ql/src/test/results/clientpositive/columnstats_partlvl.q.out index f12577c..8f7c0c1 100644 --- ql/src/test/results/clientpositive/columnstats_partlvl.q.out +++ ql/src/test/results/clientpositive/columnstats_partlvl.q.out @@ -145,8 +145,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 1062 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: 2000.0D (type: double) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: 2000.0D (type: double) Statistics: Num rows: 3 Data size: 1062 Basic stats: PARTIAL Column stats: NONE @@ -220,6 +222,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 354 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -230,6 +233,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:double escape.delim \ @@ -419,8 +423,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 1062 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: 4000.0D (type: double) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: 4000.0D (type: double) Statistics: Num rows: 3 Data size: 1062 Basic stats: PARTIAL Column stats: NONE @@ -494,6 +500,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 354 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -504,6 +511,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:double escape.delim \ diff --git ql/src/test/results/clientpositive/columnstats_tbllvl.q.out ql/src/test/results/clientpositive/columnstats_tbllvl.q.out index f22d15c..bc57e56 100644 --- ql/src/test/results/clientpositive/columnstats_tbllvl.q.out +++ ql/src/test/results/clientpositive/columnstats_tbllvl.q.out @@ -134,7 +134,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1480 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1480 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -201,6 +203,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1512 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -211,6 +214,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -639,7 +643,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1480 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1480 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -706,6 +712,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1512 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -716,6 +723,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/comments.q.out ql/src/test/results/clientpositive/comments.q.out index b94e2ce..09f1e7c 100644 --- ql/src/test/results/clientpositive/comments.q.out +++ ql/src/test/results/clientpositive/comments.q.out @@ -150,7 +150,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -220,6 +222,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -230,6 +233,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ diff --git ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out index 3450294..67b8160 100644 --- ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out +++ ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out @@ -39,7 +39,9 @@ outputColumnNames: _col0 Statistics: Num rows: 2 Data size: 182 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 2 Data size: 182 Basic stats: COMPLETE Column stats: COMPLETE tag: 0 @@ -54,7 +56,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE tag: 1 @@ -177,6 +181,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 50 Data size: 17650 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -187,6 +192,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/cp_sel.q.out ql/src/test/results/clientpositive/cp_sel.q.out index 471ff80..477069f 100644 --- ql/src/test/results/clientpositive/cp_sel.q.out +++ ql/src/test/results/clientpositive/cp_sel.q.out @@ -229,9 +229,9 @@ POSTHOOK: Input: default@testpartbucket POSTHOOK: Input: default@testpartbucket@ds=hello/hr=world #### A masked pattern was here #### -0 val_0 hello world -0 val_0 hello world -0 val_0 hello world +10 val_10 hello world +10 val_10 hello world +104 val_104 hello world PREHOOK: query: drop table testpartbucket PREHOOK: type: DROPTABLE PREHOOK: Input: default@testpartbucket diff --git ql/src/test/results/clientpositive/display_colstats_tbllvl.q.out ql/src/test/results/clientpositive/display_colstats_tbllvl.q.out index ca7ce6f..bb06109 100644 --- ql/src/test/results/clientpositive/display_colstats_tbllvl.q.out +++ ql/src/test/results/clientpositive/display_colstats_tbllvl.q.out @@ -159,7 +159,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1480 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1480 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -226,6 +228,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1512 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -236,6 +239,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/druid/druidkafkamini_basic.q.out ql/src/test/results/clientpositive/druid/druidkafkamini_basic.q.out index f57b6b3..13f16c6 100644 --- ql/src/test/results/clientpositive/druid/druidkafkamini_basic.q.out +++ ql/src/test/results/clientpositive/druid/druidkafkamini_basic.q.out @@ -251,8 +251,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 354 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 2 Data size: 354 Basic stats: COMPLETE Column stats: COMPLETE @@ -328,8 +330,10 @@ Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: language (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: language (type: string) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE @@ -447,6 +451,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 389 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### @@ -457,6 +462,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:string escape.delim \ diff --git ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out index a993c7b..a8af291 100644 --- ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out +++ ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out @@ -221,7 +221,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE tag: -1 @@ -312,6 +314,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### @@ -322,6 +325,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ diff --git ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out index 005e711..d6115d8 100644 --- ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out +++ ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out @@ -571,8 +571,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col2 (type: double) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col2 (type: double) Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE @@ -593,8 +595,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col2 (type: double) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col2 (type: double) Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE @@ -723,6 +727,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 791 Data size: 215943 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### @@ -733,6 +738,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:int:string escape.delim \ diff --git ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out index fb7b0bc..f6c7d50 100644 --- ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out +++ ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out @@ -89,8 +89,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 475 Data size: 85013 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 475 Data size: 85013 Basic stats: COMPLETE Column stats: NONE @@ -111,8 +113,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 475 Data size: 85013 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 475 Data size: 85013 Basic stats: COMPLETE Column stats: NONE @@ -237,6 +241,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 522 Data size: 93514 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### @@ -247,6 +252,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types int:string:int:string escape.delim \ diff --git ql/src/test/results/clientpositive/filter_aggr.q.out ql/src/test/results/clientpositive/filter_aggr.q.out index e3fe160..76206df 100644 --- ql/src/test/results/clientpositive/filter_aggr.q.out +++ ql/src/test/results/clientpositive/filter_aggr.q.out @@ -47,8 +47,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE @@ -122,6 +124,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -132,6 +135,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types string:bigint:int escape.delim \ diff --git ql/src/test/results/clientpositive/filter_join_breaktask.q.out ql/src/test/results/clientpositive/filter_join_breaktask.q.out index 565d628..37ab727 100644 --- ql/src/test/results/clientpositive/filter_join_breaktask.q.out +++ ql/src/test/results/clientpositive/filter_join_breaktask.q.out @@ -67,8 +67,10 @@ outputColumnNames: _col0 Statistics: Num rows: 15 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 15 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE @@ -88,8 +90,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 15 Data size: 1375 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 15 Data size: 1375 Basic stats: COMPLETE Column stats: COMPLETE @@ -160,6 +164,7 @@ outputColumnNames: _col0, _col2 Statistics: Num rows: 25 Data size: 2305 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -184,8 +189,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col2 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 25 Data size: 2305 Basic stats: COMPLETE Column stats: COMPLETE @@ -206,8 +213,10 @@ outputColumnNames: _col0 Statistics: Num rows: 25 Data size: 2225 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 25 Data size: 2225 Basic stats: COMPLETE Column stats: COMPLETE @@ -304,6 +313,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 32 Data size: 2956 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -314,6 +324,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types int:string escape.delim \ diff --git ql/src/test/results/clientpositive/filter_union.q.out ql/src/test/results/clientpositive/filter_union.q.out index be6ed20..48adbbc 100644 --- ql/src/test/results/clientpositive/filter_union.q.out +++ ql/src/test/results/clientpositive/filter_union.q.out @@ -61,8 +61,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE @@ -136,6 +138,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -162,6 +165,7 @@ Union Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -172,6 +176,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types string:bigint:int escape.delim \ @@ -188,6 +193,7 @@ Union Statistics: Num rows: 500 Data size: 49500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -198,6 +204,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types string:bigint:int escape.delim \ @@ -278,8 +285,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE @@ -353,6 +362,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/groupby_map_ppr.q.out ql/src/test/results/clientpositive/groupby_map_ppr.q.out index 952f310..621a80a 100644 --- ql/src/test/results/clientpositive/groupby_map_ppr.q.out +++ ql/src/test/results/clientpositive/groupby_map_ppr.q.out @@ -59,8 +59,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 500 Data size: 93500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 93500 Basic stats: COMPLETE Column stats: COMPLETE @@ -184,6 +186,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 316 Data size: 86268 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -227,6 +230,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -290,7 +294,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -332,6 +338,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -342,6 +349,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out index bd43f54..4db2458 100644 --- ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out +++ ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out @@ -59,8 +59,10 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1000 Data size: 294000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 294000 Basic stats: COMPLETE Column stats: COMPLETE @@ -184,6 +186,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 316 Data size: 88796 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -227,6 +230,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 2152 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -290,7 +294,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 2152 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -332,6 +338,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 2200 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -342,6 +349,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4 columns.types struct:struct:struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/groupby_ppr.q.out ql/src/test/results/clientpositive/groupby_ppr.q.out index d7549d9..bb5e7e6 100644 --- ql/src/test/results/clientpositive/groupby_ppr.q.out +++ ql/src/test/results/clientpositive/groupby_ppr.q.out @@ -52,8 +52,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE @@ -177,6 +179,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 316 Data size: 86268 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -214,6 +217,7 @@ outputColumnNames: key, c1, c2 Statistics: Num rows: 316 Data size: 86268 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -277,7 +281,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 316 Data size: 86268 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -319,6 +325,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -329,6 +336,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out index 95f95b0..43f1ac8 100644 --- ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out +++ ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out @@ -52,8 +52,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE @@ -177,6 +179,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 316 Data size: 88796 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -214,6 +217,7 @@ outputColumnNames: key, c1, c2, c3, c4 Statistics: Num rows: 316 Data size: 88796 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -277,7 +281,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 316 Data size: 88796 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -319,6 +325,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 2200 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -329,6 +336,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4 columns.types struct:struct:struct:struct:struct escape.delim \ @@ -426,8 +434,10 @@ outputColumnNames: $f0, $f1, $f2 Statistics: Num rows: 1000 Data size: 459000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: $f0 (type: string), $f1 (type: string), $f2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Map-reduce partition columns: $f0 (type: string) Statistics: Num rows: 1000 Data size: 459000 Basic stats: COMPLETE Column stats: COMPLETE @@ -551,6 +561,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -588,6 +599,7 @@ outputColumnNames: key, c1, c2, c3, c4 Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -651,7 +663,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -697,6 +711,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 2200 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -707,6 +722,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4 columns.types struct:struct:struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/groupby_sort_1_23.q.out ql/src/test/results/clientpositive/groupby_sort_1_23.q.out index 6498e24..8061ffa 100644 --- ql/src/test/results/clientpositive/groupby_sort_1_23.q.out +++ ql/src/test/results/clientpositive/groupby_sort_1_23.q.out @@ -82,6 +82,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -125,7 +126,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -196,6 +199,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1064 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -206,6 +210,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -272,6 +277,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -362,6 +368,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -526,8 +533,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE @@ -605,6 +614,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 1104 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -648,6 +658,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1656 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -711,7 +722,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1656 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -753,6 +766,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1688 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -763,6 +777,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -850,6 +865,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -893,7 +909,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -964,6 +982,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1064 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -974,6 +993,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -1040,6 +1060,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1130,6 +1151,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1293,6 +1315,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -1336,7 +1359,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -1407,6 +1432,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1064 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1417,6 +1443,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -1483,6 +1510,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1573,6 +1601,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1744,6 +1773,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -1787,7 +1817,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1456 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1456 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -1858,6 +1890,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1504 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1868,6 +1901,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -1934,6 +1968,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2024,6 +2059,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2189,8 +2225,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE @@ -2268,6 +2306,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 1104 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -2311,6 +2350,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 2080 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2374,7 +2414,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 2080 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -2416,6 +2458,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 2128 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2426,6 +2469,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:struct:struct escape.delim \ @@ -2507,8 +2551,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: double) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: double) Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE @@ -2586,6 +2632,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -2629,6 +2676,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1456 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2692,7 +2740,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1456 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -2734,6 +2784,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1504 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2744,6 +2795,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -2837,8 +2889,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: double) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: double) Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE @@ -2916,6 +2970,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -2959,6 +3014,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3022,7 +3078,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -3064,6 +3122,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1064 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3074,6 +3133,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -3177,6 +3237,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -3220,7 +3281,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -3247,6 +3310,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -3290,7 +3354,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -3361,6 +3427,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1064 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3371,6 +3438,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -3437,6 +3505,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3527,6 +3596,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3713,8 +3783,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: double) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: double) Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE @@ -3792,6 +3864,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3834,6 +3907,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -3877,7 +3951,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -3892,6 +3968,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -3935,7 +4012,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -4029,6 +4108,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1064 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4039,6 +4119,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -4105,6 +4186,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4195,6 +4277,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4378,8 +4461,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE @@ -4402,8 +4487,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE @@ -4482,6 +4569,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 607 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -4525,6 +4613,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4588,7 +4677,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -4630,6 +4721,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4640,6 +4732,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -4738,8 +4831,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE @@ -4813,6 +4908,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 1104 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4850,8 +4946,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE @@ -4861,8 +4959,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 1104 Basic stats: COMPLETE Column stats: NONE @@ -4960,6 +5060,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 3 Data size: 607 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4970,6 +5071,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4 columns.types string:bigint:string:string:bigint escape.delim \ @@ -5050,8 +5152,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE @@ -5129,6 +5233,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -5172,6 +5277,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -5235,7 +5341,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -5277,6 +5385,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -5287,6 +5396,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -5372,6 +5482,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 588 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -5415,7 +5526,9 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1712 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1712 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -5486,6 +5599,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -5496,6 +5610,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:struct:struct escape.delim \ @@ -5562,6 +5677,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -5652,6 +5768,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -5826,6 +5943,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 6 Data size: 612 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -5869,7 +5987,9 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 2136 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 2136 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -5940,6 +6060,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 2200 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -5950,6 +6071,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4 columns.types struct:struct:struct:struct:struct escape.delim \ @@ -6016,6 +6138,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -6106,6 +6229,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -6279,6 +6403,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 588 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -6322,7 +6447,9 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1712 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1712 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -6393,6 +6520,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -6403,6 +6531,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:struct:struct escape.delim \ @@ -6469,6 +6598,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -6559,6 +6689,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -6739,6 +6870,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 588 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -6782,7 +6914,9 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1712 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1712 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -6853,6 +6987,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -6863,6 +6998,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:struct:struct escape.delim \ @@ -6929,6 +7065,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -7019,6 +7156,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/groupby_sort_6.q.out ql/src/test/results/clientpositive/groupby_sort_6.q.out index 6930641..6bf1057 100644 --- ql/src/test/results/clientpositive/groupby_sort_6.q.out +++ ql/src/test/results/clientpositive/groupby_sort_6.q.out @@ -61,8 +61,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 92 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 92 Basic stats: COMPLETE Column stats: COMPLETE @@ -83,6 +85,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -126,6 +129,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -189,7 +193,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -231,6 +237,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -241,6 +248,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -329,8 +337,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 92 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 92 Basic stats: COMPLETE Column stats: COMPLETE @@ -351,6 +361,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -394,6 +405,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -457,7 +469,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -499,6 +513,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -509,6 +524,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -586,8 +602,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 184 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 184 Basic stats: PARTIAL Column stats: NONE @@ -660,6 +678,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 184 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -703,6 +722,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1032 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -766,7 +786,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1032 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -808,6 +830,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1064 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -818,6 +841,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out index 38826ef..0588f5f 100644 --- ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out +++ ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out @@ -82,6 +82,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -125,7 +126,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -196,6 +199,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1064 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -206,6 +210,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -272,6 +277,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -362,6 +368,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -527,8 +534,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: rand() (type: double) Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE @@ -602,6 +611,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -626,8 +636,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE @@ -675,6 +687,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 1104 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -718,6 +731,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1656 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -781,7 +795,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1656 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -823,6 +839,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1688 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -833,6 +850,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -920,6 +938,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -963,7 +982,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -1034,6 +1055,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1064 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1044,6 +1066,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -1110,6 +1133,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1200,6 +1224,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1363,6 +1388,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -1406,7 +1432,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -1477,6 +1505,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1064 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1487,6 +1516,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -1553,6 +1583,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1643,6 +1674,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1814,6 +1846,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -1857,7 +1890,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1456 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1456 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -1928,6 +1963,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1504 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1938,6 +1974,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -2004,6 +2041,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2094,6 +2132,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2260,8 +2299,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: rand() (type: double) Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE @@ -2335,6 +2376,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2359,8 +2401,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE @@ -2408,6 +2452,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 1104 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -2451,6 +2496,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 2080 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2514,7 +2560,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 2080 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -2556,6 +2604,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 2128 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2566,6 +2615,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:struct:struct escape.delim \ @@ -2648,8 +2698,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: double) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: rand() (type: double) Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE @@ -2723,6 +2775,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2747,8 +2800,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: double) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: double) Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE @@ -2796,6 +2851,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -2839,6 +2895,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1456 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2902,7 +2959,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1456 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -2944,6 +3003,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1504 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2954,6 +3014,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -3048,8 +3109,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: double) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: rand() (type: double) Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE @@ -3123,6 +3186,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3147,8 +3211,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: double) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: double) Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE @@ -3196,6 +3262,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -3239,6 +3306,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3302,7 +3370,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -3344,6 +3414,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1064 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3354,6 +3425,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -3457,6 +3529,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -3500,7 +3573,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -3527,6 +3602,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -3570,7 +3646,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -3641,6 +3719,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1064 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3651,6 +3730,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -3717,6 +3797,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3807,6 +3888,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3994,8 +4076,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: double) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: rand() (type: double) Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE @@ -4069,6 +4153,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4093,8 +4178,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: double) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: double) Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE @@ -4142,6 +4229,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4184,6 +4272,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -4227,7 +4316,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -4242,6 +4333,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -4285,7 +4377,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1032 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -4379,6 +4473,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1064 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4389,6 +4484,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -4455,6 +4551,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4545,6 +4642,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4728,8 +4826,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE @@ -4752,8 +4852,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE @@ -4832,6 +4934,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 607 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -4875,6 +4978,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4938,7 +5042,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -4980,6 +5086,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4990,6 +5097,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -5089,8 +5197,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: rand() (type: double) Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE @@ -5164,6 +5274,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -5188,8 +5299,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE @@ -5233,6 +5346,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 1104 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -5270,8 +5384,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: NONE @@ -5281,8 +5397,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 1104 Basic stats: COMPLETE Column stats: NONE @@ -5380,6 +5498,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 3 Data size: 607 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -5390,6 +5509,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4 columns.types string:bigint:string:string:bigint escape.delim \ @@ -5471,8 +5591,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: rand() (type: double) Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE @@ -5546,6 +5668,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -5570,8 +5693,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE @@ -5619,6 +5744,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -5662,6 +5788,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -5725,7 +5852,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -5767,6 +5896,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -5777,6 +5907,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -5862,6 +5993,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 588 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -5905,7 +6037,9 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1712 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1712 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -5976,6 +6110,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -5986,6 +6121,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:struct:struct escape.delim \ @@ -6052,6 +6188,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -6142,6 +6279,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -6316,6 +6454,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 6 Data size: 612 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -6359,7 +6498,9 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 2136 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 2136 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -6430,6 +6571,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 2200 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -6440,6 +6582,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4 columns.types struct:struct:struct:struct:struct escape.delim \ @@ -6506,6 +6649,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -6596,6 +6740,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -6769,6 +6914,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 588 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -6812,7 +6958,9 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1712 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1712 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -6883,6 +7031,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -6893,6 +7042,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:struct:struct escape.delim \ @@ -6959,6 +7109,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -7049,6 +7200,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -7229,6 +7381,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 588 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -7272,7 +7425,9 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1712 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1712 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -7343,6 +7498,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -7353,6 +7509,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:struct:struct escape.delim \ @@ -7419,6 +7576,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -7509,6 +7667,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out index 9b869a1..02c40a2 100644 --- ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out +++ ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out @@ -661,7 +661,7 @@ numFiles 1 numRows 305 rawDataSize 1163 - totalSize 1346 + totalSize 1347 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/infer_bucket_sort_num_buckets.q.out ql/src/test/results/clientpositive/infer_bucket_sort_num_buckets.q.out index 52fd083..cd19964 100644 --- ql/src/test/results/clientpositive/infer_bucket_sort_num_buckets.q.out +++ ql/src/test/results/clientpositive/infer_bucket_sort_num_buckets.q.out @@ -6,7 +6,7 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table_n0 -PREHOOK: query: EXPLAIN +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE test_table_n0 PARTITION (ds = '2008-04-08', hr) SELECT key2, value, cast(hr as int) FROM (SELECT if ((key % 3) < 2, 0, 1) as key2, value, (key % 2) as hr @@ -18,7 +18,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Output: default@test_table_n0@ds=2008-04-08 -POSTHOOK: query: EXPLAIN +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE test_table_n0 PARTITION (ds = '2008-04-08', hr) SELECT key2, value, cast(hr as int) FROM (SELECT if ((key % 3) < 2, 0, 1) as key2, value, (key % 2) as hr @@ -44,17 +44,127 @@ alias: srcpart filterExpr: (ds = '2008-04-08') (type: boolean) Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false Select Operator expressions: if(((key % 3) < 2), 0, 1) (type: int), value (type: string), UDFToInteger((key % 2)) (type: int) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 99000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1000 Data size: 99000 Basic stats: COMPLETE Column stats: COMPLETE + tag: -1 value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int) + auto parallelism: false Execution mode: vectorized + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: hr=11 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + hr 11 + properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} + bucket_count -1 + column.name.delimiter , + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.srcpart + numFiles 1 + numRows 500 + partition_columns ds/hr + partition_columns.types string:string + rawDataSize 5312 + serialization.ddl struct srcpart { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + bucketing_version 2 + column.name.delimiter , + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.srcpart + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct srcpart { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcpart + name: default.srcpart +#### A masked pattern was here #### + Partition + base file name: hr=12 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + hr 12 + properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} + bucket_count -1 + column.name.delimiter , + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.srcpart + numFiles 1 + numRows 500 + partition_columns ds/hr + partition_columns.types string:string + rawDataSize 5312 + serialization.ddl struct srcpart { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + bucketing_version 2 + column.name.delimiter , + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.srcpart + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct srcpart { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcpart + name: default.srcpart + Truncated Path -> Alias: + /srcpart/ds=2008-04-08/hr=11 [a:srcpart] + /srcpart/ds=2008-04-08/hr=12 [a:srcpart] + Needs Tagging: false Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), CAST( VALUE._col2 AS STRING) (type: string) @@ -72,30 +182,89 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 316 Data size: 360872 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + column.name.delimiter , + columns _col0,_col1,_col2,_col3 + columns.types string,string,struct,struct + escape.delim \ + serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false File Output Operator + bucketingVersion: 1 compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + column.name.delimiter , + columns _col0,_col1,_col2 + columns.types int,string,string + escape.delim \ + serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-2 Map Reduce Map Operator Tree: TableScan + GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: '2008-04-08' (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: '2008-04-08' (type: string), _col1 (type: string) Statistics: Num rows: 316 Data size: 360872 Basic stats: COMPLETE Column stats: COMPLETE + tag: -1 value expressions: _col2 (type: struct), _col3 (type: struct) + auto parallelism: false Execution mode: vectorized + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -mr-10002 + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + column.name.delimiter , + columns _col0,_col1,_col2,_col3 + columns.types string,string,struct,struct + escape.delim \ + serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + column.name.delimiter , + columns _col0,_col1,_col2,_col3 + columns.types string,string,struct,struct + escape.delim \ + serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + Truncated Path -> Alias: +#### A masked pattern was here #### + Needs Tagging: false Reduce Operator Tree: Group By Operator aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) @@ -108,46 +277,126 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 316 Data size: 365928 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 Statistics: Num rows: 316 Data size: 365928 Basic stats: COMPLETE Column stats: COMPLETE +#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + bucketing_version -1 + columns _col0,_col1,_col2,_col3 + columns.types struct:struct:string:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-4 Stats Work Basic Stats Work: +#### A masked pattern was here #### Column Stats Desc: Columns: key, value Column Types: int, string Table: default.test_table_n0 + Is Table Level Stats: false Stage: Stage-3 Map Reduce Map Operator Tree: TableScan + GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col2 (type: string) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 1000 Data size: 279000 Basic stats: COMPLETE Column stats: COMPLETE + tag: -1 value expressions: _col0 (type: int), _col1 (type: string) + auto parallelism: false + Path -> Bucketed Columns: +#### A masked pattern was here #### Execution mode: vectorized + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -mr-10003 + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + column.name.delimiter , + columns _col0,_col1,_col2 + columns.types int,string,string + escape.delim \ + serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + column.name.delimiter , + columns _col0,_col1,_col2 + columns.types int,string,string + escape.delim \ + serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + Path -> Sorted Columns: +#### A masked pattern was here #### + Truncated Path -> Alias: +#### A masked pattern was here #### + Needs Tagging: false Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string) outputColumnNames: _col0, _col1, _col2 File Output Operator + bucketingVersion: 2 compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### Dp Sort State: PARTITION_SORTED + NumFilesPerFileSink: 1 + Static Partition Specification: ds=2008-04-08/ Statistics: Num rows: 1000 Data size: 279000 Basic stats: COMPLETE Column stats: COMPLETE +#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + bucketing_version 2 + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.test_table_n0 + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct test_table_n0 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.test_table_n0 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false Stage: Stage-0 Move Operator @@ -156,9 +405,25 @@ ds 2008-04-08 hr replace: true +#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + bucketing_version 2 + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.test_table_n0 + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct test_table_n0 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.test_table_n0 diff --git ql/src/test/results/clientpositive/input23.q.out ql/src/test/results/clientpositive/input23.q.out index 396f2c1..cd79dd3 100644 --- ql/src/test/results/clientpositive/input23.q.out +++ ql/src/test/results/clientpositive/input23.q.out @@ -37,7 +37,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE tag: 0 @@ -57,7 +59,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE tag: 1 @@ -135,6 +139,7 @@ Number of rows: 5 Statistics: Num rows: 5 Data size: 3530 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -145,6 +150,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7 columns.types string:string:string:string:string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/input_part1.q.out ql/src/test/results/clientpositive/input_part1.q.out index 195f52c..3ac6602 100644 --- ql/src/test/results/clientpositive/input_part1.q.out +++ ql/src/test/results/clientpositive/input_part1.q.out @@ -51,6 +51,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 166 Data size: 45650 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -94,7 +95,9 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1744 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1744 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -162,6 +165,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -172,6 +176,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:struct:struct escape.delim \ @@ -238,6 +243,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -328,6 +334,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/input_part2.q.out ql/src/test/results/clientpositive/input_part2.q.out index b187bc2..9d56af7 100644 --- ql/src/test/results/clientpositive/input_part2.q.out +++ ql/src/test/results/clientpositive/input_part2.q.out @@ -64,6 +64,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 333 Data size: 91575 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -107,7 +108,9 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1744 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1744 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -122,6 +125,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 333 Data size: 91575 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 2 #### A masked pattern was here #### @@ -165,6 +169,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1744 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -294,6 +299,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -304,6 +310,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:struct:struct escape.delim \ @@ -370,6 +377,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -460,6 +468,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -595,7 +604,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1744 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -637,6 +648,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -647,6 +659,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/input_part7.q.out ql/src/test/results/clientpositive/input_part7.q.out index 5ac50a4..f040d60 100644 --- ql/src/test/results/clientpositive/input_part7.q.out +++ ql/src/test/results/clientpositive/input_part7.q.out @@ -50,8 +50,10 @@ outputColumnNames: _col0, _col1, _col3 Statistics: Num rows: 666 Data size: 303696 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 666 Data size: 303696 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -76,8 +78,10 @@ outputColumnNames: _col0, _col1, _col3 Statistics: Num rows: 666 Data size: 303696 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 666 Data size: 303696 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -193,6 +197,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 666 Data size: 303696 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -203,6 +208,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/join17.q.out ql/src/test/results/clientpositive/join17.q.out index e8bd76d..8d28f35 100644 --- ql/src/test/results/clientpositive/join17.q.out +++ ql/src/test/results/clientpositive/join17.q.out @@ -49,8 +49,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE @@ -71,8 +73,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE @@ -147,6 +151,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 791 Data size: 150290 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -190,6 +195,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1728 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -253,7 +259,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1728 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -295,6 +303,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -305,6 +314,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/join26.q.out ql/src/test/results/clientpositive/join26.q.out index aaa6bf2..2031b58 100644 --- ql/src/test/results/clientpositive/join26.q.out +++ ql/src/test/results/clientpositive/join26.q.out @@ -183,6 +183,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 61 Data size: 16348 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -226,6 +227,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -441,7 +443,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -483,6 +487,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -493,6 +498,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/join32.q.out ql/src/test/results/clientpositive/join32.q.out index d45e6b9..a8b9ae9 100644 --- ql/src/test/results/clientpositive/join32.q.out +++ ql/src/test/results/clientpositive/join32.q.out @@ -183,6 +183,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 63 Data size: 16884 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -226,6 +227,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -441,7 +443,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -483,6 +487,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -493,6 +498,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/join33.q.out ql/src/test/results/clientpositive/join33.q.out index 04d9951..b53a32a 100644 --- ql/src/test/results/clientpositive/join33.q.out +++ ql/src/test/results/clientpositive/join33.q.out @@ -183,6 +183,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 63 Data size: 16884 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -226,6 +227,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -441,7 +443,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -483,6 +487,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -493,6 +498,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/join34.q.out ql/src/test/results/clientpositive/join34.q.out index b8fd984..fdeb022 100644 --- ql/src/test/results/clientpositive/join34.q.out +++ ql/src/test/results/clientpositive/join34.q.out @@ -110,6 +110,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 46 Data size: 12236 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -153,6 +154,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -199,6 +201,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 46 Data size: 12236 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -242,6 +245,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -408,7 +412,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -450,6 +456,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -460,6 +467,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/join35.q.out ql/src/test/results/clientpositive/join35.q.out index f524ff4..f4d2c0c 100644 --- ql/src/test/results/clientpositive/join35.q.out +++ ql/src/test/results/clientpositive/join35.q.out @@ -75,8 +75,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 74 Data size: 7030 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 74 Data size: 7030 Basic stats: COMPLETE Column stats: COMPLETE @@ -146,6 +148,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 74 Data size: 7030 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -212,6 +215,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 46 Data size: 8234 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -255,6 +259,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -290,6 +295,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 46 Data size: 8234 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -333,6 +339,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -495,7 +502,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -537,6 +546,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -547,6 +557,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -579,8 +590,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 74 Data size: 7030 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 74 Data size: 7030 Basic stats: COMPLETE Column stats: COMPLETE @@ -650,6 +663,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 74 Data size: 7030 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/join9.q.out ql/src/test/results/clientpositive/join9.q.out index 5c11241..0479d64 100644 --- ql/src/test/results/clientpositive/join9.q.out +++ ql/src/test/results/clientpositive/join9.q.out @@ -53,8 +53,10 @@ outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE @@ -74,8 +76,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE @@ -200,6 +204,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 791 Data size: 75145 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -243,6 +248,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -306,7 +312,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -348,6 +356,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -358,6 +367,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/join_filters_overlap.q.out ql/src/test/results/clientpositive/join_filters_overlap.q.out index 6bb6de1..90f441c 100644 --- ql/src/test/results/clientpositive/join_filters_overlap.q.out +++ ql/src/test/results/clientpositive/join_filters_overlap.q.out @@ -45,8 +45,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE @@ -67,8 +69,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -144,6 +148,7 @@ outputColumnNames: _col0, _col1, _col2, _col4, _col5 Statistics: Num rows: 3 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -168,8 +173,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE @@ -190,8 +197,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -294,6 +303,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 3 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -304,6 +314,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5 columns.types int:int:int:int:int:int escape.delim \ @@ -384,8 +395,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -401,8 +414,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE @@ -478,6 +493,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 3 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -502,8 +518,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col2 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col2 (type: int) Statistics: Num rows: 3 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE @@ -524,8 +542,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -628,6 +648,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 3 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -638,6 +659,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5 columns.types int:int:int:int:int:int escape.delim \ @@ -718,8 +740,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -735,8 +759,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE @@ -812,6 +838,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 3 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -836,8 +863,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col2 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col2 (type: int) Statistics: Num rows: 3 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE @@ -858,8 +887,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -962,6 +993,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 3 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -972,6 +1004,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5 columns.types int:int:int:int:int:int escape.delim \ @@ -1050,8 +1083,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE @@ -1067,8 +1102,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE @@ -1145,6 +1182,7 @@ outputColumnNames: _col0, _col1, _col2, _col4, _col5, _col6 Statistics: Num rows: 9 Data size: 216 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1169,8 +1207,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col4 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col4 (type: int) Statistics: Num rows: 9 Data size: 216 Basic stats: COMPLETE Column stats: COMPLETE @@ -1191,8 +1231,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -1291,6 +1333,7 @@ outputColumnNames: _col0, _col1, _col2, _col4, _col5, _col8, _col9 Statistics: Num rows: 9 Data size: 252 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1315,8 +1358,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 9 Data size: 252 Basic stats: COMPLETE Column stats: COMPLETE @@ -1337,8 +1382,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -1441,6 +1488,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 9 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1451,6 +1499,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7 columns.types int:int:int:int:int:int:int:int escape.delim \ @@ -1521,8 +1570,10 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 3 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE @@ -1543,8 +1594,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -1620,6 +1673,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col5, _col6 Statistics: Num rows: 3 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1644,8 +1698,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE @@ -1666,8 +1722,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -1766,6 +1824,7 @@ outputColumnNames: _col0, _col1, _col2, _col5, _col6, _col7, _col8 Statistics: Num rows: 3 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1790,8 +1849,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE @@ -1812,8 +1873,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -1916,6 +1979,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 3 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1926,6 +1990,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7 columns.types int:int:int:int:int:int:int:int escape.delim \ diff --git ql/src/test/results/clientpositive/join_map_ppr.q.out ql/src/test/results/clientpositive/join_map_ppr.q.out index 0e5cc5f..e3d8212 100644 --- ql/src/test/results/clientpositive/join_map_ppr.q.out +++ ql/src/test/results/clientpositive/join_map_ppr.q.out @@ -113,6 +113,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1100 Data size: 195800 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -156,7 +157,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -226,6 +229,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -236,6 +240,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -302,6 +307,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -392,6 +398,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -765,6 +772,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1100 Data size: 104500 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -808,7 +816,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -878,6 +888,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -888,6 +899,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -954,6 +966,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1044,6 +1057,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/kafka/kafka_storage_handler.q.out ql/src/test/results/clientpositive/kafka/kafka_storage_handler.q.out index 68ea97d..71af39c 100644 --- ql/src/test/results/clientpositive/kafka/kafka_storage_handler.q.out +++ ql/src/test/results/clientpositive/kafka/kafka_storage_handler.q.out @@ -1109,8 +1109,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 160 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: bigint), _col1 (type: timestamp), _col2 (type: binary) null sort order: zzz + numBuckets: -1 sort order: +++ Map-reduce partition columns: _col0 (type: bigint), _col1 (type: timestamp), _col2 (type: binary) Statistics: Num rows: 1 Data size: 160 Basic stats: COMPLETE Column stats: NONE @@ -1334,6 +1336,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 160 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### @@ -1344,6 +1347,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types bigint:timestamp:binary escape.delim \ @@ -1432,8 +1436,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 160 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: bigint), _col1 (type: timestamp), _col2 (type: binary) null sort order: zzz + numBuckets: -1 sort order: +++ Map-reduce partition columns: _col0 (type: bigint), _col1 (type: timestamp), _col2 (type: binary) Statistics: Num rows: 1 Data size: 160 Basic stats: COMPLETE Column stats: NONE @@ -1657,6 +1663,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 160 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### @@ -1667,6 +1674,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types bigint:timestamp:binary escape.delim \ diff --git ql/src/test/results/clientpositive/list_bucket_dml_1.q.out ql/src/test/results/clientpositive/list_bucket_dml_1.q.out index 7c2d558..57546be 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_1.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_1.q.out @@ -47,6 +47,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 362000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -89,8 +90,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 2316 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 2 Data size: 2316 Basic stats: COMPLETE Column stats: COMPLETE @@ -214,6 +217,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 2316 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -224,6 +228,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -444,6 +449,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -454,6 +460,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:string escape.delim \ diff --git ql/src/test/results/clientpositive/list_bucket_dml_11.q.out ql/src/test/results/clientpositive/list_bucket_dml_11.q.out index 63f3c83..79a47df 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_11.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_11.q.out @@ -46,6 +46,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -88,8 +89,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE @@ -162,6 +165,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -172,6 +176,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -322,6 +327,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -332,6 +338,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:string escape.delim \ diff --git ql/src/test/results/clientpositive/list_bucket_dml_12.q.out ql/src/test/results/clientpositive/list_bucket_dml_12.q.out index 5bcd456..012cad2 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_12.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_12.q.out @@ -46,6 +46,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 500 Data size: 216500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -88,8 +89,10 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 2380 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 1 Data size: 2380 Basic stats: COMPLETE Column stats: COMPLETE @@ -162,6 +165,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 2380 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -172,6 +176,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6 columns.types struct:struct:struct:struct:struct:string:string escape.delim \ @@ -330,6 +335,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 613 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -340,6 +346,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6 columns.types string:string:string:string:string:string:string escape.delim \ @@ -467,6 +474,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 613 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -477,6 +485,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6 columns.types string:string:string:string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/list_bucket_dml_13.q.out ql/src/test/results/clientpositive/list_bucket_dml_13.q.out index ce1dbf9..09e0eaa 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_13.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_13.q.out @@ -46,6 +46,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 500 Data size: 216500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -88,8 +89,10 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 2397 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 1 Data size: 2397 Basic stats: COMPLETE Column stats: COMPLETE @@ -162,6 +165,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 2397 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -172,6 +176,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6 columns.types struct:struct:struct:struct:struct:string:string escape.delim \ @@ -330,6 +335,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 630 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -340,6 +346,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6 columns.types string:string:string:string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/list_bucket_dml_14.q.out ql/src/test/results/clientpositive/list_bucket_dml_14.q.out index 92b2e68..3e3e25a 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_14.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_14.q.out @@ -40,6 +40,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -83,7 +84,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -150,6 +153,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -160,6 +164,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -323,6 +328,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -333,6 +339,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:string escape.delim \ diff --git ql/src/test/results/clientpositive/list_bucket_dml_2.q.out ql/src/test/results/clientpositive/list_bucket_dml_2.q.out index e8df860..f39cadc 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_2.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_2.q.out @@ -52,6 +52,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -94,8 +95,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE @@ -219,6 +222,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -229,6 +233,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -401,6 +406,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 358 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -411,6 +417,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/list_bucket_dml_3.q.out ql/src/test/results/clientpositive/list_bucket_dml_3.q.out index 60d595e..1944074 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_3.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_3.q.out @@ -42,6 +42,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -84,8 +85,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE @@ -209,6 +212,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -219,6 +223,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -393,6 +398,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -403,6 +409,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:string escape.delim \ diff --git ql/src/test/results/clientpositive/list_bucket_dml_4.q.out ql/src/test/results/clientpositive/list_bucket_dml_4.q.out index 7cef24f..a0334a9 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_4.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_4.q.out @@ -52,6 +52,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -94,8 +95,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE @@ -219,6 +222,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -229,6 +233,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -389,6 +394,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -430,8 +436,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE @@ -555,6 +563,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -565,6 +574,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -853,6 +863,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 358 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -863,6 +874,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/list_bucket_dml_5.q.out ql/src/test/results/clientpositive/list_bucket_dml_5.q.out index 1d3b7b9..11c57fd 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_5.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_5.q.out @@ -47,6 +47,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 362000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -89,8 +90,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 2316 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 2 Data size: 2316 Basic stats: COMPLETE Column stats: COMPLETE @@ -214,6 +217,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 2316 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -224,6 +228,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -451,6 +456,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 456 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -461,6 +467,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/list_bucket_dml_6.q.out ql/src/test/results/clientpositive/list_bucket_dml_6.q.out index 3ec1d32..f7d7f2c 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_6.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_6.q.out @@ -51,6 +51,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 264000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -93,8 +94,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE @@ -218,6 +221,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -228,6 +232,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -432,6 +437,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 264000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -474,8 +480,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE @@ -599,6 +607,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -609,6 +618,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -949,6 +959,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 546 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -959,6 +970,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/list_bucket_dml_7.q.out ql/src/test/results/clientpositive/list_bucket_dml_7.q.out index c41ae71..b877e81 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_7.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_7.q.out @@ -51,6 +51,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 264000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -93,8 +94,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE @@ -218,6 +221,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -228,6 +232,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -432,6 +437,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 264000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -474,8 +480,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE @@ -599,6 +607,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -609,6 +618,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -949,6 +959,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 546 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -959,6 +970,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/list_bucket_dml_8.q.out ql/src/test/results/clientpositive/list_bucket_dml_8.q.out index 2b82c86..9851958 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_8.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_8.q.out @@ -51,6 +51,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 264000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -93,8 +94,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE @@ -218,6 +221,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -228,6 +232,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -501,6 +506,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 546 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -511,6 +517,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/list_bucket_dml_9.q.out ql/src/test/results/clientpositive/list_bucket_dml_9.q.out index 32c098b..c16fa5d 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_9.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_9.q.out @@ -52,6 +52,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -94,8 +95,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE @@ -219,6 +222,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -229,6 +233,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -389,6 +394,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -430,8 +436,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE @@ -555,6 +563,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -565,6 +574,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -853,6 +863,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 358 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -863,6 +874,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out index bfc9def..972152d 100644 --- ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out +++ ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out @@ -165,6 +165,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 4 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -175,6 +176,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types int escape.delim \ @@ -291,6 +293,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 188 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -301,6 +304,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types int:string escape.delim \ @@ -425,8 +429,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 188 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 188 Basic stats: PARTIAL Column stats: NONE @@ -494,6 +500,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 188 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -504,6 +511,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:bigint escape.delim \ @@ -574,8 +582,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 4 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: boolean) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: boolean) Statistics: Num rows: 1 Data size: 4 Basic stats: PARTIAL Column stats: NONE @@ -647,6 +657,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 4 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -657,6 +668,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types int:bigint escape.delim \ diff --git ql/src/test/results/clientpositive/llap/auto_sortmerge_join_1.q.out ql/src/test/results/clientpositive/llap/auto_sortmerge_join_1.q.out index 30914df..866f3a6 100644 --- ql/src/test/results/clientpositive/llap/auto_sortmerge_join_1.q.out +++ ql/src/test/results/clientpositive/llap/auto_sortmerge_join_1.q.out @@ -236,7 +236,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -535,7 +537,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -834,7 +838,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/llap/auto_sortmerge_join_11.q.out ql/src/test/results/clientpositive/llap/auto_sortmerge_join_11.q.out index 3dd0b23..fe88a32 100644 --- ql/src/test/results/clientpositive/llap/auto_sortmerge_join_11.q.out +++ ql/src/test/results/clientpositive/llap/auto_sortmerge_join_11.q.out @@ -153,8 +153,10 @@ outputColumnNames: _col0 Statistics: Num rows: 2 Data size: 368 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 2 Data size: 368 Basic stats: PARTIAL Column stats: NONE @@ -233,8 +235,10 @@ outputColumnNames: _col0 Statistics: Num rows: 211 Data size: 34294 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 211 Data size: 34294 Basic stats: PARTIAL Column stats: NONE @@ -367,7 +371,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -561,7 +567,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -845,7 +853,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -1125,8 +1135,10 @@ Position of Big Table: 1 Statistics: Num rows: 250 Data size: 165502 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 165502 Basic stats: PARTIAL Column stats: NONE @@ -1249,8 +1261,10 @@ predicate: key is not null (type: boolean) Statistics: Num rows: 228 Data size: 150457 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 key expressions: key (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: key (type: string) Statistics: Num rows: 228 Data size: 150457 Basic stats: PARTIAL Column stats: NONE @@ -1383,7 +1397,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/llap/auto_sortmerge_join_12.q.out ql/src/test/results/clientpositive/llap/auto_sortmerge_join_12.q.out index fe891c8..6be7e46 100644 --- ql/src/test/results/clientpositive/llap/auto_sortmerge_join_12.q.out +++ ql/src/test/results/clientpositive/llap/auto_sortmerge_join_12.q.out @@ -199,7 +199,9 @@ Select Operator Statistics: Num rows: 3 Data size: 1724 Basic stats: PARTIAL Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 3 Data size: 1724 Basic stats: PARTIAL Column stats: COMPLETE tag: 0 @@ -354,8 +356,10 @@ Position of Big Table: 0 Statistics: Num rows: 232 Data size: 37723 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 232 Data size: 37723 Basic stats: PARTIAL Column stats: NONE @@ -483,8 +487,10 @@ outputColumnNames: _col0 Statistics: Num rows: 2 Data size: 368 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 2 Data size: 368 Basic stats: PARTIAL Column stats: NONE @@ -566,7 +572,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -627,7 +635,9 @@ Select Operator Statistics: Num rows: 127 Data size: 20666 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 127 Data size: 20666 Basic stats: PARTIAL Column stats: NONE tag: 1 diff --git ql/src/test/results/clientpositive/llap/auto_sortmerge_join_2.q.out ql/src/test/results/clientpositive/llap/auto_sortmerge_join_2.q.out index 8ef914b..9959c05 100644 --- ql/src/test/results/clientpositive/llap/auto_sortmerge_join_2.q.out +++ ql/src/test/results/clientpositive/llap/auto_sortmerge_join_2.q.out @@ -218,7 +218,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -517,7 +519,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/llap/auto_sortmerge_join_3.q.out ql/src/test/results/clientpositive/llap/auto_sortmerge_join_3.q.out index f322641..707e51a 100644 --- ql/src/test/results/clientpositive/llap/auto_sortmerge_join_3.q.out +++ ql/src/test/results/clientpositive/llap/auto_sortmerge_join_3.q.out @@ -268,7 +268,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -567,7 +569,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -866,7 +870,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/llap/auto_sortmerge_join_4.q.out ql/src/test/results/clientpositive/llap/auto_sortmerge_join_4.q.out index 90186c7..dfe907e 100644 --- ql/src/test/results/clientpositive/llap/auto_sortmerge_join_4.q.out +++ ql/src/test/results/clientpositive/llap/auto_sortmerge_join_4.q.out @@ -284,7 +284,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -583,7 +585,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -882,7 +886,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/llap/auto_sortmerge_join_5.q.out ql/src/test/results/clientpositive/llap/auto_sortmerge_join_5.q.out index 4bb3bb8..50adce9 100644 --- ql/src/test/results/clientpositive/llap/auto_sortmerge_join_5.q.out +++ ql/src/test/results/clientpositive/llap/auto_sortmerge_join_5.q.out @@ -193,7 +193,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -430,7 +432,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -667,7 +671,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/llap/auto_sortmerge_join_7.q.out ql/src/test/results/clientpositive/llap/auto_sortmerge_join_7.q.out index 16a25de..75a37b0 100644 --- ql/src/test/results/clientpositive/llap/auto_sortmerge_join_7.q.out +++ ql/src/test/results/clientpositive/llap/auto_sortmerge_join_7.q.out @@ -303,7 +303,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -656,7 +658,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -1009,7 +1013,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/llap/auto_sortmerge_join_8.q.out ql/src/test/results/clientpositive/llap/auto_sortmerge_join_8.q.out index 9d0f2ff..d6f15ed 100644 --- ql/src/test/results/clientpositive/llap/auto_sortmerge_join_8.q.out +++ ql/src/test/results/clientpositive/llap/auto_sortmerge_join_8.q.out @@ -303,7 +303,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -656,7 +658,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -1009,7 +1013,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/llap/bucket2.q.out ql/src/test/results/clientpositive/llap/bucket2.q.out index 9b82a96..9a5decd 100644 --- ql/src/test/results/clientpositive/llap/bucket2.q.out +++ ql/src/test/results/clientpositive/llap/bucket2.q.out @@ -45,8 +45,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE @@ -117,6 +119,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -164,6 +167,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -174,6 +178,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/llap/bucket3.q.out ql/src/test/results/clientpositive/llap/bucket3.q.out index e8cd3b9..3b303bd 100644 --- ql/src/test/results/clientpositive/llap/bucket3.q.out +++ ql/src/test/results/clientpositive/llap/bucket3.q.out @@ -46,8 +46,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE @@ -118,6 +120,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -161,8 +164,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 949 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 949 Basic stats: COMPLETE Column stats: COMPLETE @@ -184,6 +189,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 965 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -194,6 +200,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:string escape.delim \ diff --git ql/src/test/results/clientpositive/llap/bucket4.q.out ql/src/test/results/clientpositive/llap/bucket4.q.out index ea9dc76..3d40bb6 100644 --- ql/src/test/results/clientpositive/llap/bucket4.q.out +++ ql/src/test/results/clientpositive/llap/bucket4.q.out @@ -45,8 +45,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE @@ -117,6 +119,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -165,6 +168,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -175,6 +179,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/llap/bucket5.q.out ql/src/test/results/clientpositive/llap/bucket5.q.out index 814c2e7..acb968d 100644 --- ql/src/test/results/clientpositive/llap/bucket5.q.out +++ ql/src/test/results/clientpositive/llap/bucket5.q.out @@ -65,8 +65,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE @@ -78,8 +80,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE @@ -152,6 +156,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 directory: hdfs://### HDFS PATH ### @@ -198,7 +203,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -214,6 +221,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### @@ -224,6 +232,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -244,6 +253,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 2 directory: hdfs://### HDFS PATH ### @@ -288,7 +298,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -304,6 +316,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### @@ -314,6 +327,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -430,6 +444,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### @@ -526,6 +541,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### diff --git ql/src/test/results/clientpositive/llap/bucket_many.q.out ql/src/test/results/clientpositive/llap/bucket_many.q.out index 92482db..b478b64 100644 --- ql/src/test/results/clientpositive/llap/bucket_many.q.out +++ ql/src/test/results/clientpositive/llap/bucket_many.q.out @@ -46,8 +46,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE @@ -118,6 +120,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -162,7 +165,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -178,6 +183,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -188,6 +194,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out index 85d2e19..414b143 100644 --- ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out +++ ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out @@ -2446,8 +2446,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: COMPLETE @@ -2488,6 +2490,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 4 Data size: 1619 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2498,6 +2501,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/llap/bucket_num_reducers.q.out ql/src/test/results/clientpositive/llap/bucket_num_reducers.q.out index 40d1661..ccc177b 100644 --- ql/src/test/results/clientpositive/llap/bucket_num_reducers.q.out +++ ql/src/test/results/clientpositive/llap/bucket_num_reducers.q.out @@ -43,8 +43,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE @@ -115,6 +117,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/llap/bucket_num_reducers2.q.out ql/src/test/results/clientpositive/llap/bucket_num_reducers2.q.out index 17f30f9..a35c4e6 100644 --- ql/src/test/results/clientpositive/llap/bucket_num_reducers2.q.out +++ ql/src/test/results/clientpositive/llap/bucket_num_reducers2.q.out @@ -43,8 +43,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE @@ -115,6 +117,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -162,6 +165,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -172,6 +176,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/llap/bucketmapjoin1.q.out ql/src/test/results/clientpositive/llap/bucketmapjoin1.q.out index fca31cb..a5b97e1 100644 --- ql/src/test/results/clientpositive/llap/bucketmapjoin1.q.out +++ ql/src/test/results/clientpositive/llap/bucketmapjoin1.q.out @@ -73,8 +73,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE @@ -99,8 +101,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE @@ -127,6 +131,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -137,6 +142,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:string escape.delim \ @@ -220,8 +226,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE @@ -246,8 +254,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE @@ -274,6 +284,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -284,6 +295,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:string escape.delim \ @@ -464,8 +476,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE @@ -543,8 +557,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 122 Data size: 18933 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 122 Data size: 18933 Basic stats: PARTIAL Column stats: NONE @@ -625,6 +641,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 134 Data size: 20826 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -668,7 +685,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -684,6 +703,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -694,6 +714,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -907,8 +928,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE @@ -986,8 +1009,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 122 Data size: 18933 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 122 Data size: 18933 Basic stats: PARTIAL Column stats: NONE @@ -1068,6 +1093,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 134 Data size: 20826 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -1111,7 +1137,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -1127,6 +1155,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1137,6 +1166,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/llap/bucketmapjoin2.q.out ql/src/test/results/clientpositive/llap/bucketmapjoin2.q.out index bc200ed..f5a8dc2 100644 --- ql/src/test/results/clientpositive/llap/bucketmapjoin2.q.out +++ ql/src/test/results/clientpositive/llap/bucketmapjoin2.q.out @@ -148,8 +148,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 122 Data size: 18933 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 122 Data size: 18933 Basic stats: PARTIAL Column stats: NONE @@ -228,8 +230,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 64 Data size: 10026 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 64 Data size: 10026 Basic stats: PARTIAL Column stats: NONE @@ -310,6 +314,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 134 Data size: 20826 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -353,7 +358,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -369,6 +376,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -379,6 +387,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -598,8 +607,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 122 Data size: 18933 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 122 Data size: 18933 Basic stats: PARTIAL Column stats: NONE @@ -678,8 +689,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 64 Data size: 10026 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 64 Data size: 10026 Basic stats: PARTIAL Column stats: NONE @@ -760,6 +773,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 134 Data size: 20826 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -803,7 +817,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -819,6 +835,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -829,6 +846,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -1067,8 +1085,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 122 Data size: 18933 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 122 Data size: 18933 Basic stats: PARTIAL Column stats: NONE @@ -1147,8 +1167,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 127 Data size: 19590 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 127 Data size: 19590 Basic stats: PARTIAL Column stats: NONE @@ -1279,6 +1301,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 139 Data size: 21549 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -1322,7 +1345,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -1338,6 +1363,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1348,6 +1374,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/llap/bucketmapjoin3.q.out ql/src/test/results/clientpositive/llap/bucketmapjoin3.q.out index 74b3fd6..e11bb74 100644 --- ql/src/test/results/clientpositive/llap/bucketmapjoin3.q.out +++ ql/src/test/results/clientpositive/llap/bucketmapjoin3.q.out @@ -172,8 +172,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 64 Data size: 10026 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 64 Data size: 10026 Basic stats: PARTIAL Column stats: NONE @@ -252,8 +254,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 122 Data size: 18933 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 122 Data size: 18933 Basic stats: PARTIAL Column stats: NONE @@ -334,6 +338,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 134 Data size: 20826 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -377,7 +382,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -393,6 +400,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -403,6 +411,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -622,8 +631,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 64 Data size: 10026 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 64 Data size: 10026 Basic stats: PARTIAL Column stats: NONE @@ -702,8 +713,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 122 Data size: 18933 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 122 Data size: 18933 Basic stats: PARTIAL Column stats: NONE @@ -784,6 +797,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 134 Data size: 20826 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -827,7 +841,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -843,6 +859,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -853,6 +870,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/llap/bucketmapjoin4.q.out ql/src/test/results/clientpositive/llap/bucketmapjoin4.q.out index 3bf5bf6..b4d9d3d 100644 --- ql/src/test/results/clientpositive/llap/bucketmapjoin4.q.out +++ ql/src/test/results/clientpositive/llap/bucketmapjoin4.q.out @@ -166,8 +166,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE @@ -245,8 +247,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE @@ -326,6 +330,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -369,7 +374,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -385,6 +392,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -395,6 +403,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -596,8 +605,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE @@ -675,8 +686,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE @@ -756,6 +769,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -799,7 +813,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -815,6 +831,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -825,6 +842,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/llap/bucketmapjoin7.q.out ql/src/test/results/clientpositive/llap/bucketmapjoin7.q.out index 6b47eb2..5b476ed 100644 --- ql/src/test/results/clientpositive/llap/bucketmapjoin7.q.out +++ ql/src/test/results/clientpositive/llap/bucketmapjoin7.q.out @@ -99,8 +99,10 @@ predicate: key is not null (type: boolean) Statistics: Num rows: 66 Data size: 26560 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: key (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: key (type: int) Statistics: Num rows: 66 Data size: 26560 Basic stats: PARTIAL Column stats: NONE @@ -175,8 +177,10 @@ predicate: key is not null (type: boolean) Statistics: Num rows: 66 Data size: 38352 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: key (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: key (type: int) Statistics: Num rows: 66 Data size: 38352 Basic stats: PARTIAL Column stats: NONE @@ -264,8 +268,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 72 Data size: 29216 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 72 Data size: 29216 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -284,6 +290,7 @@ Number of rows: 1 Statistics: Num rows: 1 Data size: 405 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -294,6 +301,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types int:string escape.delim \ diff --git ql/src/test/results/clientpositive/llap/column_table_stats.q.out ql/src/test/results/clientpositive/llap/column_table_stats.q.out index a898426..b6d3bb4 100644 --- ql/src/test/results/clientpositive/llap/column_table_stats.q.out +++ ql/src/test/results/clientpositive/llap/column_table_stats.q.out @@ -92,7 +92,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1248 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -161,6 +163,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 1248 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -171,6 +174,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -365,8 +369,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 196 Data size: 313792 Basic stats: PARTIAL Column stats: PARTIAL Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 196 Data size: 313792 Basic stats: PARTIAL Column stats: PARTIAL @@ -492,6 +498,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 98 Data size: 155424 Basic stats: PARTIAL Column stats: PARTIAL File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -502,6 +509,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -790,8 +798,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 196 Data size: 313792 Basic stats: PARTIAL Column stats: PARTIAL Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 196 Data size: 313792 Basic stats: PARTIAL Column stats: PARTIAL @@ -917,6 +927,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 98 Data size: 155424 Basic stats: PARTIAL Column stats: PARTIAL File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -927,6 +938,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -1212,8 +1224,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 117 Data size: 166072 Basic stats: PARTIAL Column stats: PARTIAL Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), '11' (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), '11' (type: string) Statistics: Num rows: 117 Data size: 166072 Basic stats: PARTIAL Column stats: PARTIAL @@ -1290,6 +1304,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 58 Data size: 81584 Basic stats: PARTIAL Column stats: PARTIAL File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1300,6 +1315,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/llap/column_table_stats_orc.q.out ql/src/test/results/clientpositive/llap/column_table_stats_orc.q.out index 5266770..7ca6622 100644 --- ql/src/test/results/clientpositive/llap/column_table_stats_orc.q.out +++ ql/src/test/results/clientpositive/llap/column_table_stats_orc.q.out @@ -95,7 +95,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -166,6 +168,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -176,6 +179,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -368,8 +372,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1248 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 1 Data size: 1248 Basic stats: COMPLETE Column stats: PARTIAL @@ -489,6 +495,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1248 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -499,6 +506,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -782,8 +790,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1150 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), '11' (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), '11' (type: string) Statistics: Num rows: 1 Data size: 1150 Basic stats: COMPLETE Column stats: PARTIAL @@ -857,6 +867,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1150 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -867,6 +878,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/llap/disable_merge_for_bucketing.q.out ql/src/test/results/clientpositive/llap/disable_merge_for_bucketing.q.out index 389a5f2..5da6244 100644 --- ql/src/test/results/clientpositive/llap/disable_merge_for_bucketing.q.out +++ ql/src/test/results/clientpositive/llap/disable_merge_for_bucketing.q.out @@ -45,8 +45,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE @@ -117,6 +119,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -164,6 +167,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -174,6 +178,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction.q.out ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction.q.out index 7076388..e379b14 100644 --- ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction.q.out +++ ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction.q.out @@ -1762,8 +1762,10 @@ outputColumnNames: _col0 Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE @@ -1881,8 +1883,10 @@ outputColumnNames: _col0 Statistics: Num rows: 20 Data size: 1740 Basic stats: PARTIAL Column stats: PARTIAL Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 20 Data size: 1740 Basic stats: PARTIAL Column stats: PARTIAL @@ -1899,7 +1903,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 639 Basic stats: PARTIAL Column stats: PARTIAL Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 639 Basic stats: PARTIAL Column stats: PARTIAL tag: -1 @@ -2021,7 +2027,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -2037,6 +2045,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2047,6 +2056,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -2068,7 +2078,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 639 Basic stats: PARTIAL Column stats: PARTIAL Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 639 Basic stats: PARTIAL Column stats: PARTIAL tag: -1 diff --git ql/src/test/results/clientpositive/llap/dynamic_semijoin_user_level.q.out ql/src/test/results/clientpositive/llap/dynamic_semijoin_user_level.q.out index c8fa625..fec7de2 100644 --- ql/src/test/results/clientpositive/llap/dynamic_semijoin_user_level.q.out +++ ql/src/test/results/clientpositive/llap/dynamic_semijoin_user_level.q.out @@ -961,8 +961,10 @@ outputColumnNames: _col0 Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE @@ -1085,8 +1087,10 @@ outputColumnNames: _col0 Statistics: Num rows: 20 Data size: 1740 Basic stats: PARTIAL Column stats: PARTIAL Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 20 Data size: 1740 Basic stats: PARTIAL Column stats: PARTIAL @@ -1103,7 +1107,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 639 Basic stats: PARTIAL Column stats: PARTIAL Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 639 Basic stats: PARTIAL Column stats: PARTIAL tag: -1 @@ -1230,7 +1236,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -1246,6 +1254,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### @@ -1256,6 +1265,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -1277,7 +1287,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 639 Basic stats: PARTIAL Column stats: PARTIAL Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 639 Basic stats: PARTIAL Column stats: PARTIAL tag: -1 diff --git ql/src/test/results/clientpositive/llap/filter_join_breaktask.q.out ql/src/test/results/clientpositive/llap/filter_join_breaktask.q.out index fc7b671..fc2f7f8 100644 --- ql/src/test/results/clientpositive/llap/filter_join_breaktask.q.out +++ ql/src/test/results/clientpositive/llap/filter_join_breaktask.q.out @@ -73,8 +73,10 @@ outputColumnNames: _col0 Statistics: Num rows: 15 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 15 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE @@ -151,8 +153,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 15 Data size: 1375 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 15 Data size: 1375 Basic stats: COMPLETE Column stats: COMPLETE @@ -230,8 +234,10 @@ outputColumnNames: _col0 Statistics: Num rows: 25 Data size: 2225 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 25 Data size: 2225 Basic stats: COMPLETE Column stats: COMPLETE @@ -306,8 +312,10 @@ Position of Big Table: 1 Statistics: Num rows: 25 Data size: 2305 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col2 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 25 Data size: 2305 Basic stats: COMPLETE Column stats: COMPLETE @@ -332,6 +340,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 32 Data size: 2956 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -342,6 +351,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types int:string escape.delim \ diff --git ql/src/test/results/clientpositive/llap/filter_union.q.out ql/src/test/results/clientpositive/llap/filter_union.q.out index 30cbf9b..50761eb 100644 --- ql/src/test/results/clientpositive/llap/filter_union.q.out +++ ql/src/test/results/clientpositive/llap/filter_union.q.out @@ -66,8 +66,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE @@ -147,8 +149,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE @@ -225,6 +229,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -235,6 +240,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types string:bigint:int escape.delim \ @@ -261,6 +267,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 24750 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -271,6 +278,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types string:bigint:int escape.delim \ diff --git ql/src/test/results/clientpositive/llap/join32_lessSize.q.out ql/src/test/results/clientpositive/llap/join32_lessSize.q.out index 8072a8f..c7b8bf6 100644 --- ql/src/test/results/clientpositive/llap/join32_lessSize.q.out +++ ql/src/test/results/clientpositive/llap/join32_lessSize.q.out @@ -90,8 +90,10 @@ Position of Big Table: 0 Statistics: Num rows: 39 Data size: 10374 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col3 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col3 (type: string) Statistics: Num rows: 39 Data size: 10374 Basic stats: COMPLETE Column stats: COMPLETE @@ -169,8 +171,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE @@ -264,6 +268,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 63 Data size: 16884 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -307,7 +312,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -379,6 +386,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -389,6 +397,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -637,8 +646,10 @@ Position of Big Table: 0 Statistics: Num rows: 39 Data size: 10296 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 39 Data size: 10296 Basic stats: COMPLETE Column stats: COMPLETE @@ -716,8 +727,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE @@ -807,8 +820,10 @@ Position of Big Table: 1 Statistics: Num rows: 61 Data size: 21655 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 61 Data size: 21655 Basic stats: COMPLETE Column stats: COMPLETE @@ -902,6 +917,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 99 Data size: 26334 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -945,7 +961,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1016,6 +1034,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1026,6 +1045,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -1270,8 +1290,10 @@ Position of Big Table: 0 Statistics: Num rows: 39 Data size: 6825 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col2 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 39 Data size: 6825 Basic stats: COMPLETE Column stats: COMPLETE @@ -1349,8 +1371,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE @@ -1444,6 +1468,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 63 Data size: 16758 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -1487,7 +1512,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1559,6 +1586,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1569,6 +1597,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -1803,8 +1832,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE @@ -1882,8 +1913,10 @@ outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE @@ -1976,6 +2009,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 104 Data size: 27664 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -2019,7 +2053,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -2095,8 +2131,10 @@ Position of Big Table: 0 Statistics: Num rows: 64 Data size: 11200 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 64 Data size: 11200 Basic stats: COMPLETE Column stats: COMPLETE @@ -2113,6 +2151,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2123,6 +2162,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/llap/list_bucket_dml_10.q.out ql/src/test/results/clientpositive/llap/list_bucket_dml_10.q.out index ba65d48..3ce63ff 100644 --- ql/src/test/results/clientpositive/llap/list_bucket_dml_10.q.out +++ ql/src/test/results/clientpositive/llap/list_bucket_dml_10.q.out @@ -53,6 +53,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -95,8 +96,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE @@ -173,6 +176,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -183,6 +187,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/llap/mapjoin_mapjoin.q.out ql/src/test/results/clientpositive/llap/mapjoin_mapjoin.q.out index 8f4c9bf..f69a950 100644 --- ql/src/test/results/clientpositive/llap/mapjoin_mapjoin.q.out +++ ql/src/test/results/clientpositive/llap/mapjoin_mapjoin.q.out @@ -80,6 +80,7 @@ Position of Big Table: 0 Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -90,6 +91,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types string escape.delim \ @@ -323,8 +325,10 @@ outputColumnNames: _col0 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -401,8 +405,10 @@ outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/llap/metadataonly1.q.out ql/src/test/results/clientpositive/llap/metadataonly1.q.out index 54cd83d..38a244d 100644 --- ql/src/test/results/clientpositive/llap/metadataonly1.q.out +++ ql/src/test/results/clientpositive/llap/metadataonly1.q.out @@ -45,7 +45,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -63,6 +65,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -73,6 +76,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types string escape.delim \ @@ -148,7 +152,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 368 Basic stats: PARTIAL Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 368 Basic stats: PARTIAL Column stats: COMPLETE tag: -1 @@ -218,6 +224,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 368 Basic stats: PARTIAL Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -228,6 +235,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types string escape.delim \ @@ -299,8 +307,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 192 Basic stats: PARTIAL Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 1 Data size: 192 Basic stats: PARTIAL Column stats: COMPLETE tag: -1 @@ -369,6 +379,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 200 Basic stats: PARTIAL Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -379,6 +390,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -449,7 +461,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 192 Basic stats: PARTIAL Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 192 Basic stats: PARTIAL Column stats: COMPLETE tag: -1 @@ -520,6 +534,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 192 Basic stats: PARTIAL Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -530,6 +545,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -614,8 +630,10 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 184 Basic stats: PARTIAL Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 184 Basic stats: PARTIAL Column stats: COMPLETE @@ -742,7 +760,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 368 Basic stats: PARTIAL Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 368 Basic stats: PARTIAL Column stats: COMPLETE tag: -1 @@ -871,7 +891,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -887,6 +909,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -897,6 +920,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -922,8 +946,10 @@ predicate: _col0 is not null (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: PARTIAL Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 368 Basic stats: PARTIAL Column stats: COMPLETE @@ -1041,8 +1067,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 376 Basic stats: PARTIAL Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 376 Basic stats: PARTIAL Column stats: COMPLETE @@ -1214,6 +1242,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 384 Basic stats: PARTIAL Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1224,6 +1253,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:bigint escape.delim \ @@ -1303,8 +1333,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 376 Basic stats: PARTIAL Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 376 Basic stats: PARTIAL Column stats: COMPLETE @@ -1478,6 +1510,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 376 Basic stats: PARTIAL Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1488,6 +1521,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:bigint escape.delim \ @@ -1563,7 +1597,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 368 Basic stats: PARTIAL Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 368 Basic stats: PARTIAL Column stats: COMPLETE tag: -1 @@ -1682,6 +1718,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 368 Basic stats: PARTIAL Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1692,6 +1729,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types string escape.delim \ @@ -1823,8 +1861,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 376 Basic stats: PARTIAL Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 376 Basic stats: PARTIAL Column stats: COMPLETE @@ -2096,6 +2136,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 384 Basic stats: PARTIAL Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2106,6 +2147,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:bigint escape.delim \ diff --git ql/src/test/results/clientpositive/llap/murmur_hash_migration.q.out ql/src/test/results/clientpositive/llap/murmur_hash_migration.q.out index f847a30..10de289 100644 --- ql/src/test/results/clientpositive/llap/murmur_hash_migration.q.out +++ ql/src/test/results/clientpositive/llap/murmur_hash_migration.q.out @@ -144,20 +144,22 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tab_part_n11 -PREHOOK: query: explain +PREHOOK: query: explain extended insert overwrite table tab_part_n11 partition (ds='2008-04-08') select key,value from srcbucket_mapjoin_part_n20 PREHOOK: type: QUERY PREHOOK: Input: default@srcbucket_mapjoin_part_n20 PREHOOK: Input: default@srcbucket_mapjoin_part_n20@ds=2008-04-08 PREHOOK: Output: default@tab_part_n11@ds=2008-04-08 -POSTHOOK: query: explain +POSTHOOK: query: explain extended insert overwrite table tab_part_n11 partition (ds='2008-04-08') select key,value from srcbucket_mapjoin_part_n20 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket_mapjoin_part_n20 POSTHOOK: Input: default@srcbucket_mapjoin_part_n20@ds=2008-04-08 POSTHOOK: Output: default@tab_part_n11@ds=2008-04-08 +OPTIMIZED SQL: SELECT `key`, `value` +FROM `default`.`srcbucket_mapjoin_part_n20` STAGE DEPENDENCIES: Stage-1 is a root stage Stage-2 depends on stages: Stage-1 @@ -177,34 +179,120 @@ TableScan alias: srcbucket_mapjoin_part_n20 Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + tag: -1 value expressions: _col1 (type: string) + auto parallelism: false Execution mode: vectorized, llap LLAP IO: no inputs + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} + bucket_count 4 + bucket_field_name key + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.srcbucket_mapjoin_part_n20 + numFiles 4 + numRows 150 + partition_columns ds + partition_columns.types string + rawDataSize 1602 + serialization.ddl struct srcbucket_mapjoin_part_n20 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 1752 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 4 + bucket_field_name key + bucketing_version 1 + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.srcbucket_mapjoin_part_n20 + partition_columns ds + partition_columns.types string + serialization.ddl struct srcbucket_mapjoin_part_n20 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcbucket_mapjoin_part_n20 + name: default.srcbucket_mapjoin_part_n20 + Truncated Path -> Alias: + /srcbucket_mapjoin_part_n20/ds=2008-04-08 [srcbucket_mapjoin_part_n20] Reducer 2 Execution mode: vectorized, llap + Needs Tagging: false Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Static Partition Specification: ds=2008-04-08/ Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE +#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 4 + bucket_field_name key + bucketing_version 2 + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.tab_part_n11 + partition_columns ds + partition_columns.types string + serialization.ddl struct tab_part_n11 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.tab_part_n11 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false Stage: Stage-2 Dependency Collection @@ -215,15 +303,33 @@ partition: ds 2008-04-08 replace: true +#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 4 + bucket_field_name key + bucketing_version 2 + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.tab_part_n11 + partition_columns ds + partition_columns.types string + serialization.ddl struct tab_part_n11 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.tab_part_n11 Stage: Stage-3 Stats Work Basic Stats Work: +#### A masked pattern was here #### PREHOOK: query: insert overwrite table tab_part_n11 partition (ds='2008-04-08') select key,value from srcbucket_mapjoin_part_n20 @@ -247,20 +353,22 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tab_n10 -PREHOOK: query: explain +PREHOOK: query: explain extended insert overwrite table tab_n10 partition (ds='2008-04-08') select key,value from srcbucket_mapjoin_n18 PREHOOK: type: QUERY PREHOOK: Input: default@srcbucket_mapjoin_n18 PREHOOK: Input: default@srcbucket_mapjoin_n18@ds=2008-04-08 PREHOOK: Output: default@tab_n10@ds=2008-04-08 -POSTHOOK: query: explain +POSTHOOK: query: explain extended insert overwrite table tab_n10 partition (ds='2008-04-08') select key,value from srcbucket_mapjoin_n18 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket_mapjoin_n18 POSTHOOK: Input: default@srcbucket_mapjoin_n18@ds=2008-04-08 POSTHOOK: Output: default@tab_n10@ds=2008-04-08 +OPTIMIZED SQL: SELECT `key`, `value` +FROM `default`.`srcbucket_mapjoin_n18` STAGE DEPENDENCIES: Stage-1 is a root stage Stage-2 depends on stages: Stage-1 @@ -280,34 +388,120 @@ TableScan alias: srcbucket_mapjoin_n18 Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + tag: -1 value expressions: _col1 (type: string) + auto parallelism: false Execution mode: vectorized, llap LLAP IO: no inputs + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} + bucket_count 2 + bucket_field_name key + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.srcbucket_mapjoin_n18 + numFiles 2 + numRows 150 + partition_columns ds + partition_columns.types string + rawDataSize 1598 + serialization.ddl struct srcbucket_mapjoin_n18 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 1748 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 2 + bucket_field_name key + bucketing_version 1 + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.srcbucket_mapjoin_n18 + partition_columns ds + partition_columns.types string + serialization.ddl struct srcbucket_mapjoin_n18 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcbucket_mapjoin_n18 + name: default.srcbucket_mapjoin_n18 + Truncated Path -> Alias: + /srcbucket_mapjoin_n18/ds=2008-04-08 [srcbucket_mapjoin_n18] Reducer 2 Execution mode: vectorized, llap + Needs Tagging: false Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Static Partition Specification: ds=2008-04-08/ Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE +#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 2 + bucket_field_name key + bucketing_version 2 + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.tab_n10 + partition_columns ds + partition_columns.types string + serialization.ddl struct tab_n10 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.tab_n10 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false Stage: Stage-2 Dependency Collection @@ -318,15 +512,33 @@ partition: ds 2008-04-08 replace: true +#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 2 + bucket_field_name key + bucketing_version 2 + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.tab_n10 + partition_columns ds + partition_columns.types string + serialization.ddl struct tab_n10 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.tab_n10 Stage: Stage-3 Stats Work Basic Stats Work: +#### A masked pattern was here #### PREHOOK: query: insert overwrite table tab_n10 partition (ds='2008-04-08') select key,value from srcbucket_mapjoin_n18 @@ -370,7 +582,7 @@ POSTHOOK: Output: default@tab_n10 POSTHOOK: Output: default@tab_n10@ds=2008-04-08 #### A masked pattern was here #### -PREHOOK: query: explain +PREHOOK: query: explain extended select t1.key, t1.value, t2.key, t2.value from srcbucket_mapjoin_n18 t1, srcbucket_mapjoin_part_n20 t2 where t1.key = t2.key order by t1.key, t1.value, t2.key, t2.value PREHOOK: type: QUERY PREHOOK: Input: default@srcbucket_mapjoin_n18 @@ -378,7 +590,7 @@ PREHOOK: Input: default@srcbucket_mapjoin_part_n20 PREHOOK: Input: default@srcbucket_mapjoin_part_n20@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: explain +POSTHOOK: query: explain extended select t1.key, t1.value, t2.key, t2.value from srcbucket_mapjoin_n18 t1, srcbucket_mapjoin_part_n20 t2 where t1.key = t2.key order by t1.key, t1.value, t2.key, t2.value POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket_mapjoin_n18 @@ -386,6 +598,14 @@ POSTHOOK: Input: default@srcbucket_mapjoin_part_n20 POSTHOOK: Input: default@srcbucket_mapjoin_part_n20@ds=2008-04-08 #### A masked pattern was here #### +OPTIMIZED SQL: SELECT * +FROM (SELECT `key`, `value` +FROM `default`.`srcbucket_mapjoin_n18` +WHERE `key` IS NOT NULL) AS `t0` +INNER JOIN (SELECT `key`, `value` +FROM `default`.`srcbucket_mapjoin_part_n20` +WHERE `key` IS NOT NULL) AS `t2` ON `t0`.`key` = `t2`.`key` +ORDER BY `t0`.`key`, `t0`.`value`, `t2`.`key`, `t2`.`value` STAGE DEPENDENCIES: Stage-1 is a root stage Stage-0 depends on stages: Stage-1 @@ -405,7 +625,9 @@ alias: t1 filterExpr: key is not null (type: boolean) Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false Filter Operator + isSamplingPred: false predicate: key is not null (type: boolean) Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE Select Operator @@ -415,27 +637,91 @@ Map Join Operator condition map: Inner Join 0 to 1 + Estimated key counts: Map 3 => 75 keys: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 input vertices: 1 Map 3 + Position of Big Table: 0 Statistics: Num rows: 220 Data size: 41800 Basic stats: COMPLETE Column stats: COMPLETE + BucketMapJoin: true Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string) null sort order: zzzz + numBuckets: -1 sort order: ++++ Statistics: Num rows: 220 Data size: 41800 Basic stats: COMPLETE Column stats: COMPLETE + tag: -1 + auto parallelism: false Execution mode: vectorized, llap LLAP IO: no inputs + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} + bucket_count 2 + bucket_field_name key + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.srcbucket_mapjoin_n18 + numFiles 2 + numRows 150 + partition_columns ds + partition_columns.types string + rawDataSize 1598 + serialization.ddl struct srcbucket_mapjoin_n18 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 1748 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 2 + bucket_field_name key + bucketing_version 1 + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.srcbucket_mapjoin_n18 + partition_columns ds + partition_columns.types string + serialization.ddl struct srcbucket_mapjoin_n18 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcbucket_mapjoin_n18 + name: default.srcbucket_mapjoin_n18 + Truncated Path -> Alias: + /srcbucket_mapjoin_n18/ds=2008-04-08 [t1] Map 3 Map Operator Tree: TableScan alias: t2 filterExpr: key is not null (type: boolean) Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false Filter Operator + isSamplingPred: false predicate: key is not null (type: boolean) Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE Select Operator @@ -443,28 +729,105 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 1 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + tag: 1 value expressions: _col1 (type: string) + auto parallelism: false Execution mode: vectorized, llap LLAP IO: no inputs + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} + bucket_count 4 + bucket_field_name key + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.srcbucket_mapjoin_part_n20 + numFiles 4 + numRows 150 + partition_columns ds + partition_columns.types string + rawDataSize 1602 + serialization.ddl struct srcbucket_mapjoin_part_n20 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 1752 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 4 + bucket_field_name key + bucketing_version 1 + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.srcbucket_mapjoin_part_n20 + partition_columns ds + partition_columns.types string + serialization.ddl struct srcbucket_mapjoin_part_n20 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcbucket_mapjoin_part_n20 + name: default.srcbucket_mapjoin_part_n20 + Truncated Path -> Alias: + /srcbucket_mapjoin_part_n20/ds=2008-04-08 [t2] Reducer 2 Execution mode: vectorized, llap + Needs Tagging: false Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: int), KEY.reducesinkkey3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 220 Data size: 41800 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 Statistics: Num rows: 220 Data size: 41800 Basic stats: COMPLETE Column stats: COMPLETE +#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + bucketing_version -1 + columns _col0,_col1,_col2,_col3 + columns.types int:string:int:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-0 Fetch Operator @@ -513,7 +876,7 @@ 417 val_417 417 val_417 417 val_417 417 val_417 446 val_446 446 val_446 -PREHOOK: query: explain +PREHOOK: query: explain extended select t1.key, t1.value, t2.key, t2.value from tab_part_n11 t1, tab_n10 t2 where t1.key = t2.key order by t1.key, t1.value, t2.key, t2.value PREHOOK: type: QUERY PREHOOK: Input: default@tab_n10 @@ -521,7 +884,7 @@ PREHOOK: Input: default@tab_part_n11 PREHOOK: Input: default@tab_part_n11@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: explain +POSTHOOK: query: explain extended select t1.key, t1.value, t2.key, t2.value from tab_part_n11 t1, tab_n10 t2 where t1.key = t2.key order by t1.key, t1.value, t2.key, t2.value POSTHOOK: type: QUERY POSTHOOK: Input: default@tab_n10 @@ -529,6 +892,14 @@ POSTHOOK: Input: default@tab_part_n11 POSTHOOK: Input: default@tab_part_n11@ds=2008-04-08 #### A masked pattern was here #### +OPTIMIZED SQL: SELECT * +FROM (SELECT `key`, `value` +FROM `default`.`tab_part_n11` +WHERE `key` IS NOT NULL) AS `t0` +INNER JOIN (SELECT `key`, `value` +FROM `default`.`tab_n10` +WHERE `key` IS NOT NULL) AS `t2` ON `t0`.`key` = `t2`.`key` +ORDER BY `t0`.`key`, `t0`.`value`, `t2`.`key`, `t2`.`value` STAGE DEPENDENCIES: Stage-1 is a root stage Stage-0 depends on stages: Stage-1 @@ -548,7 +919,9 @@ alias: t1 filterExpr: key is not null (type: boolean) Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false Filter Operator + isSamplingPred: false predicate: key is not null (type: boolean) Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE Select Operator @@ -558,27 +931,91 @@ Map Join Operator condition map: Inner Join 0 to 1 + Estimated key counts: Map 3 => 37 keys: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 input vertices: 1 Map 3 + Position of Big Table: 0 Statistics: Num rows: 220 Data size: 41800 Basic stats: COMPLETE Column stats: COMPLETE + BucketMapJoin: true Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string) null sort order: zzzz + numBuckets: -1 sort order: ++++ Statistics: Num rows: 220 Data size: 41800 Basic stats: COMPLETE Column stats: COMPLETE + tag: -1 + auto parallelism: false Execution mode: vectorized, llap LLAP IO: no inputs + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} + bucket_count 4 + bucket_field_name key + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.tab_part_n11 + numFiles 4 + numRows 150 + partition_columns ds + partition_columns.types string + rawDataSize 1602 + serialization.ddl struct tab_part_n11 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 1752 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 4 + bucket_field_name key + bucketing_version 2 + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.tab_part_n11 + partition_columns ds + partition_columns.types string + serialization.ddl struct tab_part_n11 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tab_part_n11 + name: default.tab_part_n11 + Truncated Path -> Alias: + /tab_part_n11/ds=2008-04-08 [t1] Map 3 Map Operator Tree: TableScan alias: t2 filterExpr: key is not null (type: boolean) Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false Filter Operator + isSamplingPred: false predicate: key is not null (type: boolean) Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE Select Operator @@ -586,28 +1023,105 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + tag: 1 value expressions: _col1 (type: string) + auto parallelism: false Execution mode: vectorized, llap LLAP IO: no inputs + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} + bucket_count 2 + bucket_field_name key + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.tab_n10 + numFiles 2 + numRows 150 + partition_columns ds + partition_columns.types string + rawDataSize 1598 + serialization.ddl struct tab_n10 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 1748 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 2 + bucket_field_name key + bucketing_version 2 + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.tab_n10 + partition_columns ds + partition_columns.types string + serialization.ddl struct tab_n10 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tab_n10 + name: default.tab_n10 + Truncated Path -> Alias: + /tab_n10/ds=2008-04-08 [t2] Reducer 2 Execution mode: vectorized, llap + Needs Tagging: false Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: int), KEY.reducesinkkey3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 220 Data size: 41800 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 Statistics: Num rows: 220 Data size: 41800 Basic stats: COMPLETE Column stats: COMPLETE +#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + bucketing_version -1 + columns _col0,_col1,_col2,_col3 + columns.types int:string:int:string + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-0 Fetch Operator @@ -656,336 +1170,3 @@ 417 val_417 417 val_417 417 val_417 417 val_417 446 val_446 446 val_446 -PREHOOK: query: create transactional table acid_ptn_bucket1 (a int, b int) partitioned by(ds string) -clustered by (a) into 2 buckets stored as ORC -TBLPROPERTIES('bucketing_version'='1', 'transactional'='true', 'transactional_properties'='default') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@acid_ptn_bucket1 -POSTHOOK: query: create transactional table acid_ptn_bucket1 (a int, b int) partitioned by(ds string) -clustered by (a) into 2 buckets stored as ORC -TBLPROPERTIES('bucketing_version'='1', 'transactional'='true', 'transactional_properties'='default') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@acid_ptn_bucket1 -PREHOOK: query: explain extended insert into acid_ptn_bucket1 partition (ds) values(1,2,'today'),(1,3,'today'),(1,4,'yesterday'),(2,2,'yesterday'),(2,3,'today'),(2,4,'today') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@acid_ptn_bucket1 -POSTHOOK: query: explain extended insert into acid_ptn_bucket1 partition (ds) values(1,2,'today'),(1,3,'today'),(1,4,'yesterday'),(2,2,'yesterday'),(2,3,'today'),(2,4,'today') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: _dummy_table - Row Limit Per Split: 1 - Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - expressions: array(const struct(1,2,'today'),const struct(1,3,'today'),const struct(1,4,'yesterday'),const struct(2,2,'yesterday'),const struct(2,3,'today'),const struct(2,4,'today')) (type: array>) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE - UDTF Operator - Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE - function name: inline - Select Operator - expressions: col1 (type: int), col2 (type: int), col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col2 (type: string), _bucket_number (type: string), _col0 (type: int) - null sort order: aaa - sort order: +++ - Map-reduce partition columns: _col2 (type: string) - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - tag: -1 - value expressions: _col1 (type: int) - auto parallelism: true - Execution mode: llap - LLAP IO: no inputs - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: dummy_path - input format: org.apache.hadoop.hive.ql.io.NullRowsInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns - columns.comments - columns.types -#### A masked pattern was here #### - name _dummy_database._dummy_table - serialization.ddl struct _dummy_table { } - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - serde: org.apache.hadoop.hive.serde2.NullStructSerDe - - input format: org.apache.hadoop.hive.ql.io.NullRowsInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns - columns.comments - columns.types -#### A masked pattern was here #### - name _dummy_database._dummy_table - serialization.ddl struct _dummy_table { } - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - serde: org.apache.hadoop.hive.serde2.NullStructSerDe - name: _dummy_database._dummy_table - name: _dummy_database._dummy_table - Truncated Path -> Alias: -#### A masked pattern was here #### - Reducer 2 - Execution mode: vectorized, llap - Needs Tagging: false - Reduce Operator Tree: - Select Operator - expressions: KEY._col0 (type: int), VALUE._col1 (type: int), KEY._col2 (type: string), KEY._bucket_number (type: string) - outputColumnNames: _col0, _col1, _col2, _bucket_number - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - Dp Sort State: PARTITION_BUCKET_SORTED - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - properties: - bucket_count 2 - bucket_field_name a - bucketing_version 1 - column.name.delimiter , - columns a,b - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.acid_ptn_bucket1 - partition_columns ds - partition_columns.types string - serialization.ddl struct acid_ptn_bucket1 { i32 a, i32 b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - transactional true - transactional_properties default -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.acid_ptn_bucket1 - TotalFiles: 1 - Write Type: INSERT - GatherStats: true - MultiFileSpray: false - - Stage: Stage-2 - Dependency Collection - - Stage: Stage-0 - Move Operator - tables: - partition: - ds - replace: false -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - properties: - bucket_count 2 - bucket_field_name a - bucketing_version 1 - column.name.delimiter , - columns a,b - columns.comments - columns.types int:int -#### A masked pattern was here #### - name default.acid_ptn_bucket1 - partition_columns ds - partition_columns.types string - serialization.ddl struct acid_ptn_bucket1 { i32 a, i32 b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - transactional true - transactional_properties default -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.acid_ptn_bucket1 - Write Type: INSERT - - Stage: Stage-3 - Stats Work - Basic Stats Work: -#### A masked pattern was here #### - -PREHOOK: query: insert into acid_ptn_bucket1 partition (ds) values(1,2,'today'),(1,3,'today'),(1,4,'yesterday'),(2,2,'yesterday'),(2,3,'today'),(2,4,'today') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@acid_ptn_bucket1 -POSTHOOK: query: insert into acid_ptn_bucket1 partition (ds) values(1,2,'today'),(1,3,'today'),(1,4,'yesterday'),(2,2,'yesterday'),(2,3,'today'),(2,4,'today') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@acid_ptn_bucket1@ds=today -POSTHOOK: Output: default@acid_ptn_bucket1@ds=yesterday -POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=today).a SCRIPT [] -POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=today).b SCRIPT [] -POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=yesterday).a SCRIPT [] -POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=yesterday).b SCRIPT [] -PREHOOK: query: alter table acid_ptn_bucket1 add columns(c int) -PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@acid_ptn_bucket1 -PREHOOK: Output: default@acid_ptn_bucket1 -POSTHOOK: query: alter table acid_ptn_bucket1 add columns(c int) -POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@acid_ptn_bucket1 -POSTHOOK: Output: default@acid_ptn_bucket1 -PREHOOK: query: insert into acid_ptn_bucket1 partition (ds) values(3,2,1000,'yesterday'),(3,3,1001,'today'),(3,4,1002,'yesterday'),(4,2,1003,'today'), (4,3,1004,'yesterday'),(4,4,1005,'today') -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@acid_ptn_bucket1 -POSTHOOK: query: insert into acid_ptn_bucket1 partition (ds) values(3,2,1000,'yesterday'),(3,3,1001,'today'),(3,4,1002,'yesterday'),(4,2,1003,'today'), (4,3,1004,'yesterday'),(4,4,1005,'today') -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@acid_ptn_bucket1@ds=today -POSTHOOK: Output: default@acid_ptn_bucket1@ds=yesterday -POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=today).a SCRIPT [] -POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=today).b SCRIPT [] -POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=today).c SCRIPT [] -POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=yesterday).a SCRIPT [] -POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=yesterday).b SCRIPT [] -POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=yesterday).c SCRIPT [] -PREHOOK: query: select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536870912 and ds='today' -PREHOOK: type: QUERY -PREHOOK: Input: default@acid_ptn_bucket1 -PREHOOK: Input: default@acid_ptn_bucket1@ds=today -#### A masked pattern was here #### -POSTHOOK: query: select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536870912 and ds='today' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@acid_ptn_bucket1 -POSTHOOK: Input: default@acid_ptn_bucket1@ds=today -#### A masked pattern was here #### -{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":0} 4 2 1003 today -{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":1} 4 4 1005 today -{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":0} 2 3 NULL today -{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":1} 2 4 NULL today -PREHOOK: query: select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536936448 and ds='today' -PREHOOK: type: QUERY -PREHOOK: Input: default@acid_ptn_bucket1 -PREHOOK: Input: default@acid_ptn_bucket1@ds=today -#### A masked pattern was here #### -POSTHOOK: query: select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536936448 and ds='today' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@acid_ptn_bucket1 -POSTHOOK: Input: default@acid_ptn_bucket1@ds=today -#### A masked pattern was here #### -{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":0} 3 3 1001 today -{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":0} 1 2 NULL today -{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":1} 1 3 NULL today -PREHOOK: query: create table s1 (key int, value int) stored as ORC -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@s1 -POSTHOOK: query: create table s1 (key int, value int) stored as ORC -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@s1 -PREHOOK: query: create table s2 (key int, value int) stored as ORC -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@s2 -POSTHOOK: query: create table s2 (key int, value int) stored as ORC -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@s2 -PREHOOK: query: insert into s1 values(111, 33), (10, 45), (103, 44), (129, 34), (128, 11) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@s1 -POSTHOOK: query: insert into s1 values(111, 33), (10, 45), (103, 44), (129, 34), (128, 11) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@s1 -POSTHOOK: Lineage: s1.key SCRIPT [] -POSTHOOK: Lineage: s1.value SCRIPT [] -PREHOOK: query: insert into s2 values(10, 45), (100, 45), (103, 44), (110, 12), (128, 34), (117, 71) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@s2 -POSTHOOK: query: insert into s2 values(10, 45), (100, 45), (103, 44), (110, 12), (128, 34), (117, 71) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@s2 -POSTHOOK: Lineage: s2.key SCRIPT [] -POSTHOOK: Lineage: s2.value SCRIPT [] -PREHOOK: query: insert into table acid_ptn_bucket1 partition(ds='today') select key, count(value), key from (select * from s1 union all select * from s2) sub group by key -PREHOOK: type: QUERY -PREHOOK: Input: default@s1 -PREHOOK: Input: default@s2 -PREHOOK: Output: default@acid_ptn_bucket1@ds=today -POSTHOOK: query: insert into table acid_ptn_bucket1 partition(ds='today') select key, count(value), key from (select * from s1 union all select * from s2) sub group by key -POSTHOOK: type: QUERY -POSTHOOK: Input: default@s1 -POSTHOOK: Input: default@s2 -POSTHOOK: Output: default@acid_ptn_bucket1@ds=today -POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=today).a EXPRESSION [(s1)s1.FieldSchema(name:key, type:int, comment:null), (s2)s2.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=today).b EXPRESSION [(s1)s1.FieldSchema(name:value, type:int, comment:null), (s2)s2.FieldSchema(name:value, type:int, comment:null), ] -POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=today).c EXPRESSION [(s1)s1.FieldSchema(name:key, type:int, comment:null), (s2)s2.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536870912 and ds='today' -PREHOOK: type: QUERY -PREHOOK: Input: default@acid_ptn_bucket1 -PREHOOK: Input: default@acid_ptn_bucket1@ds=today -#### A masked pattern was here #### -POSTHOOK: query: select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536870912 and ds='today' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@acid_ptn_bucket1 -POSTHOOK: Input: default@acid_ptn_bucket1@ds=today -#### A masked pattern was here #### -{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":0} 10 2 10 today -{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":1} 100 1 100 today -{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":2} 110 1 110 today -{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":3} 128 2 128 today -{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":0} 4 2 1003 today -{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":1} 4 4 1005 today -{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":0} 2 3 NULL today -{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":1} 2 4 NULL today -PREHOOK: query: select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536936448 and ds='today' -PREHOOK: type: QUERY -PREHOOK: Input: default@acid_ptn_bucket1 -PREHOOK: Input: default@acid_ptn_bucket1@ds=today -#### A masked pattern was here #### -POSTHOOK: query: select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536936448 and ds='today' -POSTHOOK: type: QUERY -POSTHOOK: Input: default@acid_ptn_bucket1 -POSTHOOK: Input: default@acid_ptn_bucket1@ds=today -#### A masked pattern was here #### -{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":0} 103 2 103 today -{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":1} 111 1 111 today -{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":2} 117 1 117 today -{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":3} 129 1 129 today -{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":0} 3 3 1001 today -{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":0} 1 2 NULL today -{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":1} 1 3 NULL today diff --git ql/src/test/results/clientpositive/llap/murmur_hash_migration2.q.out ql/src/test/results/clientpositive/llap/murmur_hash_migration2.q.out new file mode 100644 index 0000000..acbb0ef --- /dev/null +++ ql/src/test/results/clientpositive/llap/murmur_hash_migration2.q.out @@ -0,0 +1,993 @@ +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_n18_stage(key int, value string) partitioned by (ds string) STORED AS TEXTFILE TBLPROPERTIES("bucketing_version" = '1') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcbucket_mapjoin_n18_stage +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_n18_stage(key int, value string) partitioned by (ds string) STORED AS TEXTFILE TBLPROPERTIES("bucketing_version" = '1') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcbucket_mapjoin_n18_stage +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_n20_stage (key int, value string) partitioned by (ds string) STORED AS TEXTFILE TBLPROPERTIES("bucketing_version" = '1') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcbucket_mapjoin_part_n20_stage +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_n20_stage (key int, value string) partitioned by (ds string) STORED AS TEXTFILE TBLPROPERTIES("bucketing_version" = '1') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcbucket_mapjoin_part_n20_stage +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_n18(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE TBLPROPERTIES("bucketing_version" = '1') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcbucket_mapjoin_n18 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_n18(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE TBLPROPERTIES("bucketing_version" = '1') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcbucket_mapjoin_n18 +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_n20 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE TBLPROPERTIES("bucketing_version" = '1') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcbucket_mapjoin_part_n20 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_n20 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE TBLPROPERTIES("bucketing_version" = '1') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcbucket_mapjoin_part_n20 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n18_stage partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_n18_stage +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n18_stage partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_n18_stage +POSTHOOK: Output: default@srcbucket_mapjoin_n18_stage@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n18_stage partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_n18_stage@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n18_stage partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_n18_stage@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n20_stage partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part_n20_stage +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n20_stage partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part_n20_stage +POSTHOOK: Output: default@srcbucket_mapjoin_part_n20_stage@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n20_stage partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part_n20_stage@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n20_stage partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part_n20_stage@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n20_stage partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part_n20_stage@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n20_stage partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part_n20_stage@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n20_stage partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part_n20_stage@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n20_stage partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part_n20_stage@ds=2008-04-08 +PREHOOK: query: insert overwrite table srcbucket_mapjoin_n18 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n18_stage limit 150 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin_n18_stage +PREHOOK: Input: default@srcbucket_mapjoin_n18_stage@ds=2008-04-08 +PREHOOK: Output: default@srcbucket_mapjoin_n18@ds=2008-04-08 +POSTHOOK: query: insert overwrite table srcbucket_mapjoin_n18 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n18_stage limit 150 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin_n18_stage +POSTHOOK: Input: default@srcbucket_mapjoin_n18_stage@ds=2008-04-08 +POSTHOOK: Output: default@srcbucket_mapjoin_n18@ds=2008-04-08 +POSTHOOK: Lineage: srcbucket_mapjoin_n18 PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_n18_stage)srcbucket_mapjoin_n18_stage.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: srcbucket_mapjoin_n18 PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_n18_stage)srcbucket_mapjoin_n18_stage.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: insert overwrite table srcbucket_mapjoin_part_n20 partition (ds='2008-04-08') + select key,value from srcbucket_mapjoin_part_n20_stage limit 150 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin_part_n20_stage +PREHOOK: Input: default@srcbucket_mapjoin_part_n20_stage@ds=2008-04-08 +PREHOOK: Output: default@srcbucket_mapjoin_part_n20@ds=2008-04-08 +POSTHOOK: query: insert overwrite table srcbucket_mapjoin_part_n20 partition (ds='2008-04-08') + select key,value from srcbucket_mapjoin_part_n20_stage limit 150 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin_part_n20_stage +POSTHOOK: Input: default@srcbucket_mapjoin_part_n20_stage@ds=2008-04-08 +POSTHOOK: Output: default@srcbucket_mapjoin_part_n20@ds=2008-04-08 +POSTHOOK: Lineage: srcbucket_mapjoin_part_n20 PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part_n20_stage)srcbucket_mapjoin_part_n20_stage.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: srcbucket_mapjoin_part_n20 PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part_n20_stage)srcbucket_mapjoin_part_n20_stage.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: analyze table srcbucket_mapjoin_n18 compute statistics for columns +PREHOOK: type: ANALYZE_TABLE +PREHOOK: Input: default@srcbucket_mapjoin_n18 +PREHOOK: Input: default@srcbucket_mapjoin_n18@ds=2008-04-08 +PREHOOK: Output: default@srcbucket_mapjoin_n18 +PREHOOK: Output: default@srcbucket_mapjoin_n18@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: analyze table srcbucket_mapjoin_n18 compute statistics for columns +POSTHOOK: type: ANALYZE_TABLE +POSTHOOK: Input: default@srcbucket_mapjoin_n18 +POSTHOOK: Input: default@srcbucket_mapjoin_n18@ds=2008-04-08 +POSTHOOK: Output: default@srcbucket_mapjoin_n18 +POSTHOOK: Output: default@srcbucket_mapjoin_n18@ds=2008-04-08 +#### A masked pattern was here #### +PREHOOK: query: analyze table srcbucket_mapjoin_part_n20 compute statistics for columns +PREHOOK: type: ANALYZE_TABLE +PREHOOK: Input: default@srcbucket_mapjoin_part_n20 +PREHOOK: Input: default@srcbucket_mapjoin_part_n20@ds=2008-04-08 +PREHOOK: Output: default@srcbucket_mapjoin_part_n20 +PREHOOK: Output: default@srcbucket_mapjoin_part_n20@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: analyze table srcbucket_mapjoin_part_n20 compute statistics for columns +POSTHOOK: type: ANALYZE_TABLE +POSTHOOK: Input: default@srcbucket_mapjoin_part_n20 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n20@ds=2008-04-08 +POSTHOOK: Output: default@srcbucket_mapjoin_part_n20 +POSTHOOK: Output: default@srcbucket_mapjoin_part_n20@ds=2008-04-08 +#### A masked pattern was here #### +PREHOOK: query: CREATE TABLE tab_part_n11 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tab_part_n11 +POSTHOOK: query: CREATE TABLE tab_part_n11 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tab_part_n11 +PREHOOK: query: explain +insert overwrite table tab_part_n11 partition (ds='2008-04-08') + select key,value from srcbucket_mapjoin_part_n20 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin_part_n20 +PREHOOK: Input: default@srcbucket_mapjoin_part_n20@ds=2008-04-08 +PREHOOK: Output: default@tab_part_n11@ds=2008-04-08 +POSTHOOK: query: explain +insert overwrite table tab_part_n11 partition (ds='2008-04-08') + select key,value from srcbucket_mapjoin_part_n20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin_part_n20 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n20@ds=2008-04-08 +POSTHOOK: Output: default@tab_part_n11@ds=2008-04-08 +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcbucket_mapjoin_part_n20 + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: a + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: string) + Execution mode: vectorized, llap + LLAP IO: no inputs + Reducer 2 + Execution mode: vectorized, llap + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tab_part_n11 + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 2008-04-08 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tab_part_n11 + + Stage: Stage-3 + Stats Work + Basic Stats Work: + +PREHOOK: query: insert overwrite table tab_part_n11 partition (ds='2008-04-08') + select key,value from srcbucket_mapjoin_part_n20 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin_part_n20 +PREHOOK: Input: default@srcbucket_mapjoin_part_n20@ds=2008-04-08 +PREHOOK: Output: default@tab_part_n11@ds=2008-04-08 +POSTHOOK: query: insert overwrite table tab_part_n11 partition (ds='2008-04-08') + select key,value from srcbucket_mapjoin_part_n20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin_part_n20 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n20@ds=2008-04-08 +POSTHOOK: Output: default@tab_part_n11@ds=2008-04-08 +POSTHOOK: Lineage: tab_part_n11 PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part_n20)srcbucket_mapjoin_part_n20.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab_part_n11 PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part_n20)srcbucket_mapjoin_part_n20.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE tab_n10(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tab_n10 +POSTHOOK: query: CREATE TABLE tab_n10(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tab_n10 +PREHOOK: query: explain +insert overwrite table tab_n10 partition (ds='2008-04-08') + select key,value from srcbucket_mapjoin_n18 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin_n18 +PREHOOK: Input: default@srcbucket_mapjoin_n18@ds=2008-04-08 +PREHOOK: Output: default@tab_n10@ds=2008-04-08 +POSTHOOK: query: explain +insert overwrite table tab_n10 partition (ds='2008-04-08') + select key,value from srcbucket_mapjoin_n18 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin_n18 +POSTHOOK: Input: default@srcbucket_mapjoin_n18@ds=2008-04-08 +POSTHOOK: Output: default@tab_n10@ds=2008-04-08 +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcbucket_mapjoin_n18 + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: a + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: string) + Execution mode: vectorized, llap + LLAP IO: no inputs + Reducer 2 + Execution mode: vectorized, llap + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tab_n10 + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds 2008-04-08 + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tab_n10 + + Stage: Stage-3 + Stats Work + Basic Stats Work: + +PREHOOK: query: insert overwrite table tab_n10 partition (ds='2008-04-08') + select key,value from srcbucket_mapjoin_n18 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin_n18 +PREHOOK: Input: default@srcbucket_mapjoin_n18@ds=2008-04-08 +PREHOOK: Output: default@tab_n10@ds=2008-04-08 +POSTHOOK: query: insert overwrite table tab_n10 partition (ds='2008-04-08') + select key,value from srcbucket_mapjoin_n18 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin_n18 +POSTHOOK: Input: default@srcbucket_mapjoin_n18@ds=2008-04-08 +POSTHOOK: Output: default@tab_n10@ds=2008-04-08 +POSTHOOK: Lineage: tab_n10 PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_n18)srcbucket_mapjoin_n18.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab_n10 PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_n18)srcbucket_mapjoin_n18.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: analyze table tab_part_n11 compute statistics for columns +PREHOOK: type: ANALYZE_TABLE +PREHOOK: Input: default@tab_part_n11 +PREHOOK: Input: default@tab_part_n11@ds=2008-04-08 +PREHOOK: Output: default@tab_part_n11 +PREHOOK: Output: default@tab_part_n11@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: analyze table tab_part_n11 compute statistics for columns +POSTHOOK: type: ANALYZE_TABLE +POSTHOOK: Input: default@tab_part_n11 +POSTHOOK: Input: default@tab_part_n11@ds=2008-04-08 +POSTHOOK: Output: default@tab_part_n11 +POSTHOOK: Output: default@tab_part_n11@ds=2008-04-08 +#### A masked pattern was here #### +PREHOOK: query: analyze table tab_n10 compute statistics for columns +PREHOOK: type: ANALYZE_TABLE +PREHOOK: Input: default@tab_n10 +PREHOOK: Input: default@tab_n10@ds=2008-04-08 +PREHOOK: Output: default@tab_n10 +PREHOOK: Output: default@tab_n10@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: analyze table tab_n10 compute statistics for columns +POSTHOOK: type: ANALYZE_TABLE +POSTHOOK: Input: default@tab_n10 +POSTHOOK: Input: default@tab_n10@ds=2008-04-08 +POSTHOOK: Output: default@tab_n10 +POSTHOOK: Output: default@tab_n10@ds=2008-04-08 +#### A masked pattern was here #### +PREHOOK: query: explain +select t1.key, t1.value, t2.key, t2.value from srcbucket_mapjoin_n18 t1, srcbucket_mapjoin_part_n20 t2 where t1.key = t2.key order by t1.key, t1.value, t2.key, t2.value +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin_n18 +PREHOOK: Input: default@srcbucket_mapjoin_n18@ds=2008-04-08 +PREHOOK: Input: default@srcbucket_mapjoin_part_n20 +PREHOOK: Input: default@srcbucket_mapjoin_part_n20@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: explain +select t1.key, t1.value, t2.key, t2.value from srcbucket_mapjoin_n18 t1, srcbucket_mapjoin_part_n20 t2 where t1.key = t2.key order by t1.key, t1.value, t2.key, t2.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin_n18 +POSTHOOK: Input: default@srcbucket_mapjoin_n18@ds=2008-04-08 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n20 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n20@ds=2008-04-08 +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Map 1 <- Map 3 (CUSTOM_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t1 + filterExpr: key is not null (type: boolean) + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + input vertices: + 1 Map 3 + Statistics: Num rows: 220 Data size: 41800 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string) + null sort order: zzzz + sort order: ++++ + Statistics: Num rows: 220 Data size: 41800 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized, llap + LLAP IO: no inputs + Map 3 + Map Operator Tree: + TableScan + alias: t2 + filterExpr: key is not null (type: boolean) + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: string) + Execution mode: vectorized, llap + LLAP IO: no inputs + Reducer 2 + Execution mode: vectorized, llap + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: int), KEY.reducesinkkey3 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 220 Data size: 41800 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 220 Data size: 41800 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.key, t1.value, t2.key, t2.value from srcbucket_mapjoin_n18 t1, srcbucket_mapjoin_part_n20 t2 where t1.key = t2.key order by t1.key, t1.value, t2.key, t2.value +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin_n18 +PREHOOK: Input: default@srcbucket_mapjoin_n18@ds=2008-04-08 +PREHOOK: Input: default@srcbucket_mapjoin_part_n20 +PREHOOK: Input: default@srcbucket_mapjoin_part_n20@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select t1.key, t1.value, t2.key, t2.value from srcbucket_mapjoin_n18 t1, srcbucket_mapjoin_part_n20 t2 where t1.key = t2.key order by t1.key, t1.value, t2.key, t2.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin_n18 +POSTHOOK: Input: default@srcbucket_mapjoin_n18@ds=2008-04-08 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n20 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n20@ds=2008-04-08 +#### A masked pattern was here #### +82 val_82 82 val_82 +86 val_86 86 val_86 +145 val_145 145 val_145 +152 val_152 152 val_152 +152 val_152 152 val_152 +219 val_219 219 val_219 +219 val_219 219 val_219 +255 val_255 255 val_255 +255 val_255 255 val_255 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +277 val_277 277 val_277 +277 val_277 277 val_277 +277 val_277 277 val_277 +277 val_277 277 val_277 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +417 val_417 417 val_417 +417 val_417 417 val_417 +417 val_417 417 val_417 +446 val_446 446 val_446 +PREHOOK: query: explain +select t1.key, t1.value, t2.key, t2.value from tab_part_n11 t1, tab_n10 t2 where t1.key = t2.key order by t1.key, t1.value, t2.key, t2.value +PREHOOK: type: QUERY +PREHOOK: Input: default@tab_n10 +PREHOOK: Input: default@tab_n10@ds=2008-04-08 +PREHOOK: Input: default@tab_part_n11 +PREHOOK: Input: default@tab_part_n11@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: explain +select t1.key, t1.value, t2.key, t2.value from tab_part_n11 t1, tab_n10 t2 where t1.key = t2.key order by t1.key, t1.value, t2.key, t2.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tab_n10 +POSTHOOK: Input: default@tab_n10@ds=2008-04-08 +POSTHOOK: Input: default@tab_part_n11 +POSTHOOK: Input: default@tab_part_n11@ds=2008-04-08 +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Map 1 <- Map 3 (CUSTOM_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t1 + filterExpr: key is not null (type: boolean) + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + input vertices: + 1 Map 3 + Statistics: Num rows: 220 Data size: 41800 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string) + null sort order: zzzz + sort order: ++++ + Statistics: Num rows: 220 Data size: 41800 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized, llap + LLAP IO: no inputs + Map 3 + Map Operator Tree: + TableScan + alias: t2 + filterExpr: key is not null (type: boolean) + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 150 Data size: 14250 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: string) + Execution mode: vectorized, llap + LLAP IO: no inputs + Reducer 2 + Execution mode: vectorized, llap + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: int), KEY.reducesinkkey3 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 220 Data size: 41800 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 220 Data size: 41800 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select t1.key, t1.value, t2.key, t2.value from tab_part_n11 t1, tab_n10 t2 where t1.key = t2.key order by t1.key, t1.value, t2.key, t2.value +PREHOOK: type: QUERY +PREHOOK: Input: default@tab_n10 +PREHOOK: Input: default@tab_n10@ds=2008-04-08 +PREHOOK: Input: default@tab_part_n11 +PREHOOK: Input: default@tab_part_n11@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select t1.key, t1.value, t2.key, t2.value from tab_part_n11 t1, tab_n10 t2 where t1.key = t2.key order by t1.key, t1.value, t2.key, t2.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tab_n10 +POSTHOOK: Input: default@tab_n10@ds=2008-04-08 +POSTHOOK: Input: default@tab_part_n11 +POSTHOOK: Input: default@tab_part_n11@ds=2008-04-08 +#### A masked pattern was here #### +82 val_82 82 val_82 +86 val_86 86 val_86 +145 val_145 145 val_145 +152 val_152 152 val_152 +152 val_152 152 val_152 +219 val_219 219 val_219 +219 val_219 219 val_219 +255 val_255 255 val_255 +255 val_255 255 val_255 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +277 val_277 277 val_277 +277 val_277 277 val_277 +277 val_277 277 val_277 +277 val_277 277 val_277 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +417 val_417 417 val_417 +417 val_417 417 val_417 +417 val_417 417 val_417 +446 val_446 446 val_446 +PREHOOK: query: create transactional table acid_ptn_bucket1 (a int, b int) partitioned by(ds string) +clustered by (a) into 2 buckets stored as ORC +TBLPROPERTIES('bucketing_version'='1', 'transactional'='true', 'transactional_properties'='default') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_ptn_bucket1 +POSTHOOK: query: create transactional table acid_ptn_bucket1 (a int, b int) partitioned by(ds string) +clustered by (a) into 2 buckets stored as ORC +TBLPROPERTIES('bucketing_version'='1', 'transactional'='true', 'transactional_properties'='default') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid_ptn_bucket1 +PREHOOK: query: explain extended insert into acid_ptn_bucket1 partition (ds) values(1,2,'today'),(1,3,'today'),(1,4,'yesterday'),(2,2,'yesterday'),(2,3,'today'),(2,4,'today') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@acid_ptn_bucket1 +POSTHOOK: query: explain extended insert into acid_ptn_bucket1 partition (ds) values(1,2,'today'),(1,3,'today'),(1,4,'yesterday'),(2,2,'yesterday'),(2,3,'today'),(2,4,'today') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false + Select Operator + expressions: array(const struct(1,2,'today'),const struct(1,3,'today'),const struct(1,4,'yesterday'),const struct(2,2,'yesterday'),const struct(2,3,'today'),const struct(2,4,'today')) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), col2 (type: int), col3 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + bucketingVersion: 1 + key expressions: _col2 (type: string), _bucket_number (type: string), _col0 (type: int) + null sort order: aaa + numBuckets: 2 + sort order: +++ + Map-reduce partition columns: _col2 (type: string) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + tag: -1 + value expressions: _col1 (type: int) + auto parallelism: true + Execution mode: llap + LLAP IO: no inputs + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: dummy_path + input format: org.apache.hadoop.hive.ql.io.NullRowsInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + bucketing_version 2 + column.name.delimiter , + columns + columns.comments + columns.types +#### A masked pattern was here #### + name _dummy_database._dummy_table + serialization.ddl struct _dummy_table { } + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe + serde: org.apache.hadoop.hive.serde2.NullStructSerDe + + input format: org.apache.hadoop.hive.ql.io.NullRowsInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + bucketing_version 2 + column.name.delimiter , + columns + columns.comments + columns.types +#### A masked pattern was here #### + name _dummy_database._dummy_table + serialization.ddl struct _dummy_table { } + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe + serde: org.apache.hadoop.hive.serde2.NullStructSerDe + name: _dummy_database._dummy_table + name: _dummy_database._dummy_table + Truncated Path -> Alias: +#### A masked pattern was here #### + Reducer 2 + Execution mode: vectorized, llap + Needs Tagging: false + Reduce Operator Tree: + Select Operator + expressions: KEY._col0 (type: int), VALUE._col1 (type: int), KEY._col2 (type: string), KEY._bucket_number (type: string) + outputColumnNames: _col0, _col1, _col2, _bucket_number + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + Dp Sort State: PARTITION_BUCKET_SORTED + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name a + bucketing_version 1 + column.name.delimiter , + columns a,b + columns.comments + columns.types int:int +#### A masked pattern was here #### + name default.acid_ptn_bucket1 + partition_columns ds + partition_columns.types string + serialization.ddl struct acid_ptn_bucket1 { i32 a, i32 b} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true + transactional_properties default +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid_ptn_bucket1 + TotalFiles: 1 + Write Type: INSERT + GatherStats: true + MultiFileSpray: false + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds + replace: false +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name a + bucketing_version 1 + column.name.delimiter , + columns a,b + columns.comments + columns.types int:int +#### A masked pattern was here #### + name default.acid_ptn_bucket1 + partition_columns ds + partition_columns.types string + serialization.ddl struct acid_ptn_bucket1 { i32 a, i32 b} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true + transactional_properties default +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.acid_ptn_bucket1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: +#### A masked pattern was here #### + +PREHOOK: query: insert into acid_ptn_bucket1 partition (ds) values(1,2,'today'),(1,3,'today'),(1,4,'yesterday'),(2,2,'yesterday'),(2,3,'today'),(2,4,'today') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@acid_ptn_bucket1 +POSTHOOK: query: insert into acid_ptn_bucket1 partition (ds) values(1,2,'today'),(1,3,'today'),(1,4,'yesterday'),(2,2,'yesterday'),(2,3,'today'),(2,4,'today') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@acid_ptn_bucket1@ds=today +POSTHOOK: Output: default@acid_ptn_bucket1@ds=yesterday +POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=today).a SCRIPT [] +POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=today).b SCRIPT [] +POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=yesterday).a SCRIPT [] +POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=yesterday).b SCRIPT [] +PREHOOK: query: alter table acid_ptn_bucket1 add columns(c int) +PREHOOK: type: ALTERTABLE_ADDCOLS +PREHOOK: Input: default@acid_ptn_bucket1 +PREHOOK: Output: default@acid_ptn_bucket1 +POSTHOOK: query: alter table acid_ptn_bucket1 add columns(c int) +POSTHOOK: type: ALTERTABLE_ADDCOLS +POSTHOOK: Input: default@acid_ptn_bucket1 +POSTHOOK: Output: default@acid_ptn_bucket1 +PREHOOK: query: insert into acid_ptn_bucket1 partition (ds) values(3,2,1000,'yesterday'),(3,3,1001,'today'),(3,4,1002,'yesterday'),(4,2,1003,'today'), (4,3,1004,'yesterday'),(4,4,1005,'today') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@acid_ptn_bucket1 +POSTHOOK: query: insert into acid_ptn_bucket1 partition (ds) values(3,2,1000,'yesterday'),(3,3,1001,'today'),(3,4,1002,'yesterday'),(4,2,1003,'today'), (4,3,1004,'yesterday'),(4,4,1005,'today') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@acid_ptn_bucket1@ds=today +POSTHOOK: Output: default@acid_ptn_bucket1@ds=yesterday +POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=today).a SCRIPT [] +POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=today).b SCRIPT [] +POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=today).c SCRIPT [] +POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=yesterday).a SCRIPT [] +POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=yesterday).b SCRIPT [] +POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=yesterday).c SCRIPT [] +PREHOOK: query: select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536870912 and ds='today' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_ptn_bucket1 +PREHOOK: Input: default@acid_ptn_bucket1@ds=today +#### A masked pattern was here #### +POSTHOOK: query: select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536870912 and ds='today' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_ptn_bucket1 +POSTHOOK: Input: default@acid_ptn_bucket1@ds=today +#### A masked pattern was here #### +{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":0} 4 2 1003 today +{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":1} 4 4 1005 today +{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":0} 2 3 NULL today +{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":1} 2 4 NULL today +PREHOOK: query: select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536936448 and ds='today' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_ptn_bucket1 +PREHOOK: Input: default@acid_ptn_bucket1@ds=today +#### A masked pattern was here #### +POSTHOOK: query: select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536936448 and ds='today' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_ptn_bucket1 +POSTHOOK: Input: default@acid_ptn_bucket1@ds=today +#### A masked pattern was here #### +{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":0} 3 3 1001 today +{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":0} 1 2 NULL today +{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":1} 1 3 NULL today +PREHOOK: query: create table s1 (key int, value int) stored as ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@s1 +POSTHOOK: query: create table s1 (key int, value int) stored as ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@s1 +PREHOOK: query: create table s2 (key int, value int) stored as ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@s2 +POSTHOOK: query: create table s2 (key int, value int) stored as ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@s2 +PREHOOK: query: insert into s1 values(111, 33), (10, 45), (103, 44), (129, 34), (128, 11) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@s1 +POSTHOOK: query: insert into s1 values(111, 33), (10, 45), (103, 44), (129, 34), (128, 11) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@s1 +POSTHOOK: Lineage: s1.key SCRIPT [] +POSTHOOK: Lineage: s1.value SCRIPT [] +PREHOOK: query: insert into s2 values(10, 45), (100, 45), (103, 44), (110, 12), (128, 34), (117, 71) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@s2 +POSTHOOK: query: insert into s2 values(10, 45), (100, 45), (103, 44), (110, 12), (128, 34), (117, 71) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@s2 +POSTHOOK: Lineage: s2.key SCRIPT [] +POSTHOOK: Lineage: s2.value SCRIPT [] +PREHOOK: query: insert into table acid_ptn_bucket1 partition(ds='today') select key, count(value), key from (select * from s1 union all select * from s2) sub group by key +PREHOOK: type: QUERY +PREHOOK: Input: default@s1 +PREHOOK: Input: default@s2 +PREHOOK: Output: default@acid_ptn_bucket1@ds=today +POSTHOOK: query: insert into table acid_ptn_bucket1 partition(ds='today') select key, count(value), key from (select * from s1 union all select * from s2) sub group by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@s1 +POSTHOOK: Input: default@s2 +POSTHOOK: Output: default@acid_ptn_bucket1@ds=today +POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=today).a EXPRESSION [(s1)s1.FieldSchema(name:key, type:int, comment:null), (s2)s2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=today).b EXPRESSION [(s1)s1.FieldSchema(name:value, type:int, comment:null), (s2)s2.FieldSchema(name:value, type:int, comment:null), ] +POSTHOOK: Lineage: acid_ptn_bucket1 PARTITION(ds=today).c EXPRESSION [(s1)s1.FieldSchema(name:key, type:int, comment:null), (s2)s2.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536870912 and ds='today' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_ptn_bucket1 +PREHOOK: Input: default@acid_ptn_bucket1@ds=today +#### A masked pattern was here #### +POSTHOOK: query: select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536870912 and ds='today' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_ptn_bucket1 +POSTHOOK: Input: default@acid_ptn_bucket1@ds=today +#### A masked pattern was here #### +{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":0} 10 2 10 today +{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":1} 100 1 100 today +{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":2} 110 1 110 today +{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":3} 128 2 128 today +{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":0} 4 2 1003 today +{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":1} 4 4 1005 today +{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":0} 2 3 NULL today +{"writeid":### Masked writeid ###,"bucketid":536870912,"rowid":1} 2 4 NULL today +PREHOOK: query: select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536936448 and ds='today' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_ptn_bucket1 +PREHOOK: Input: default@acid_ptn_bucket1@ds=today +#### A masked pattern was here #### +POSTHOOK: query: select ROW__ID, * from acid_ptn_bucket1 where ROW__ID.bucketid = 536936448 and ds='today' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_ptn_bucket1 +POSTHOOK: Input: default@acid_ptn_bucket1@ds=today +#### A masked pattern was here #### +{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":0} 103 2 103 today +{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":1} 111 1 111 today +{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":2} 117 1 117 today +{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":3} 129 1 129 today +{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":0} 3 3 1001 today +{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":0} 1 2 NULL today +{"writeid":### Masked writeid ###,"bucketid":536936448,"rowid":1} 1 3 NULL today diff --git ql/src/test/results/clientpositive/llap/optimize_nullscan.q.out ql/src/test/results/clientpositive/llap/optimize_nullscan.q.out index bd0ec65..77ddaa2 100644 --- ql/src/test/results/clientpositive/llap/optimize_nullscan.q.out +++ ql/src/test/results/clientpositive/llap/optimize_nullscan.q.out @@ -75,8 +75,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE @@ -100,6 +102,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -110,6 +113,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -183,8 +187,10 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE @@ -258,8 +264,10 @@ Number of rows: 0 Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE @@ -484,6 +492,7 @@ Position of Big Table: 0 Statistics: Num rows: 1 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -494,6 +503,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:string escape.delim \ @@ -580,7 +590,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -657,7 +669,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -879,6 +893,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -889,6 +904,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -910,6 +926,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -920,6 +937,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -1008,7 +1026,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE tag: 0 @@ -1082,7 +1102,9 @@ Number of rows: 0 Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: COMPLETE tag: 1 @@ -1307,6 +1329,7 @@ Position of Big Table: 0 Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1317,6 +1340,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:string escape.delim \ @@ -1394,6 +1418,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1404,6 +1429,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types string escape.delim \ @@ -1481,8 +1507,10 @@ predicate: false (type: boolean) Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: key (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: key (type: string) Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE @@ -1553,8 +1581,10 @@ predicate: false (type: boolean) Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: key (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: key (type: string) Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE @@ -1576,6 +1606,7 @@ Position of Big Table: 0 Statistics: Num rows: 1 Data size: 95 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1586,6 +1617,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types string escape.delim \ @@ -1649,8 +1681,10 @@ predicate: false (type: boolean) Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: value (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: value (type: string) Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE @@ -1722,8 +1756,10 @@ predicate: false (type: boolean) Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: value (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: value (type: string) Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE @@ -1802,6 +1838,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1812,6 +1849,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ @@ -1877,7 +1915,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1947,6 +1987,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1957,6 +1998,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ diff --git ql/src/test/results/clientpositive/llap/ppd_union_view.q.out ql/src/test/results/clientpositive/llap/ppd_union_view.q.out index 98b03e2..d16d28b 100644 --- ql/src/test/results/clientpositive/llap/ppd_union_view.q.out +++ ql/src/test/results/clientpositive/llap/ppd_union_view.q.out @@ -195,6 +195,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 544 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -205,6 +206,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types string:string:string escape.delim \ @@ -236,8 +238,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 181 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 181 Basic stats: COMPLETE Column stats: COMPLETE @@ -319,8 +323,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 179 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 1 Data size: 179 Basic stats: COMPLETE Column stats: COMPLETE @@ -406,6 +412,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 544 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -416,6 +423,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types string:string:string escape.delim \ @@ -533,6 +541,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -543,6 +552,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types string:string:string escape.delim \ @@ -629,8 +639,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE @@ -657,8 +669,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: PARTIAL @@ -689,6 +703,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -699,6 +714,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/llap/reduce_deduplicate.q.out ql/src/test/results/clientpositive/llap/reduce_deduplicate.q.out index 9df5747..0a977b0 100644 --- ql/src/test/results/clientpositive/llap/reduce_deduplicate.q.out +++ ql/src/test/results/clientpositive/llap/reduce_deduplicate.q.out @@ -43,8 +43,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE @@ -117,6 +119,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 directory: hdfs://### HDFS PATH ### @@ -165,6 +168,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### @@ -175,6 +179,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -349,6 +354,7 @@ input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6 columns.types string,string,int,string,bigint,string,string field.delim 9 @@ -357,8 +363,10 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: PARTIAL @@ -378,6 +386,7 @@ input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6 columns.types string,string,int,string,bigint,string,string field.delim 9 @@ -386,6 +395,7 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 directory: hdfs://### HDFS PATH ### @@ -429,8 +439,10 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 1 Data size: 3142 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator + bucketingVersion: 2 key expressions: '2010-03-29' (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: '2010-03-29' (type: string) Statistics: Num rows: 1 Data size: 3142 Basic stats: COMPLETE Column stats: PARTIAL @@ -452,6 +464,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 1 Data size: 3174 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### @@ -462,6 +475,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7 columns.types struct:struct:struct:struct:struct:struct:struct:string escape.delim \ diff --git ql/src/test/results/clientpositive/llap/sample1.q.out ql/src/test/results/clientpositive/llap/sample1.q.out index f9c39d9..81a821d 100644 --- ql/src/test/results/clientpositive/llap/sample1.q.out +++ ql/src/test/results/clientpositive/llap/sample1.q.out @@ -52,6 +52,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 68750 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -95,7 +96,9 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1744 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1744 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -167,6 +170,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -177,6 +181,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/llap/sample10.q.out ql/src/test/results/clientpositive/llap/sample10.q.out index 4a3e778..e122629 100644 --- ql/src/test/results/clientpositive/llap/sample10.q.out +++ ql/src/test/results/clientpositive/llap/sample10.q.out @@ -88,8 +88,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: COMPLETE @@ -321,8 +323,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -337,6 +341,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -347,6 +352,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:bigint escape.delim \ diff --git ql/src/test/results/clientpositive/llap/sharedwork.q.out ql/src/test/results/clientpositive/llap/sharedwork.q.out index f8d3b4b..cd98afa 100644 --- ql/src/test/results/clientpositive/llap/sharedwork.q.out +++ ql/src/test/results/clientpositive/llap/sharedwork.q.out @@ -147,8 +147,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 592 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col2 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 1 Data size: 592 Basic stats: COMPLETE Column stats: NONE @@ -226,8 +228,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE @@ -243,8 +247,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE @@ -322,8 +328,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 372 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 372 Basic stats: COMPLETE Column stats: NONE @@ -401,8 +409,10 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE @@ -477,8 +487,10 @@ Position of Big Table: 0 Statistics: Num rows: 1 Data size: 651 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 651 Basic stats: COMPLETE Column stats: NONE @@ -499,8 +511,10 @@ Position of Big Table: 0 Statistics: Num rows: 1 Data size: 716 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col7 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col7 (type: string) Statistics: Num rows: 1 Data size: 716 Basic stats: COMPLETE Column stats: NONE @@ -521,8 +535,10 @@ Position of Big Table: 0 Statistics: Num rows: 1 Data size: 787 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 787 Basic stats: COMPLETE Column stats: NONE @@ -695,8 +711,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 13 Data size: 260 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 13 Data size: 260 Basic stats: COMPLETE Column stats: COMPLETE @@ -718,8 +736,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 13 Data size: 1404 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: string) Statistics: Num rows: 13 Data size: 1404 Basic stats: COMPLETE Column stats: COMPLETE @@ -787,8 +807,10 @@ Statistics: Num rows: 26 Data size: 5954 Basic stats: COMPLETE Column stats: COMPLETE GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: (p_size + 1) (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: (p_size + 1) (type: int) Statistics: Num rows: 26 Data size: 5954 Basic stats: COMPLETE Column stats: COMPLETE @@ -872,16 +894,20 @@ outputColumnNames: _col0 Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 auto parallelism: true Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: COMPLETE @@ -973,8 +999,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 13 Data size: 260 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 13 Data size: 260 Basic stats: COMPLETE Column stats: COMPLETE @@ -995,8 +1023,10 @@ Position of Big Table: 0 Statistics: Num rows: 32 Data size: 7600 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col4 (type: string), (_col5 + 1) (type: int) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col4 (type: string), (_col5 + 1) (type: int) Statistics: Num rows: 32 Data size: 7600 Basic stats: COMPLETE Column stats: COMPLETE @@ -1064,8 +1094,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 13 Data size: 1404 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: int) Statistics: Num rows: 13 Data size: 1404 Basic stats: COMPLETE Column stats: COMPLETE @@ -1090,8 +1122,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 13 Data size: 1456 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: int) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int) Statistics: Num rows: 13 Data size: 1456 Basic stats: COMPLETE Column stats: COMPLETE @@ -1108,8 +1142,10 @@ outputColumnNames: _col0 Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: COMPLETE diff --git ql/src/test/results/clientpositive/llap/smb_mapjoin_15.q.out ql/src/test/results/clientpositive/llap/smb_mapjoin_15.q.out index 4a7adb6..a4515eb 100644 --- ql/src/test/results/clientpositive/llap/smb_mapjoin_15.q.out +++ ql/src/test/results/clientpositive/llap/smb_mapjoin_15.q.out @@ -166,8 +166,10 @@ Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE top n: 10 Reduce Output Operator + bucketingVersion: 1 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -394,8 +396,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: int) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE @@ -477,8 +481,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: int) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE @@ -564,8 +570,10 @@ Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE top n: 10 Reduce Output Operator + bucketingVersion: 1 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -683,8 +691,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: int) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE @@ -766,8 +776,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: int) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE @@ -853,8 +865,10 @@ Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE top n: 10 Reduce Output Operator + bucketingVersion: 1 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -972,8 +986,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: int), _col2 (type: string) Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE @@ -1055,8 +1071,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: int), _col2 (type: string) Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE @@ -1142,8 +1160,10 @@ Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE top n: 10 Reduce Output Operator + bucketingVersion: 1 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: COMPLETE tag: -1 diff --git ql/src/test/results/clientpositive/llap/stats11.q.out ql/src/test/results/clientpositive/llap/stats11.q.out index 0fd3570..71a1d9d 100644 --- ql/src/test/results/clientpositive/llap/stats11.q.out +++ ql/src/test/results/clientpositive/llap/stats11.q.out @@ -345,8 +345,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE @@ -424,8 +426,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 122 Data size: 18933 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 122 Data size: 18933 Basic stats: PARTIAL Column stats: NONE @@ -506,6 +510,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 134 Data size: 20826 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -549,7 +554,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -565,6 +572,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -575,6 +583,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ @@ -788,8 +797,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE @@ -867,8 +878,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 122 Data size: 18933 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 122 Data size: 18933 Basic stats: PARTIAL Column stats: NONE @@ -949,6 +962,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 134 Data size: 20826 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -992,7 +1006,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -1008,6 +1024,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1320 Basic stats: PARTIAL Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1018,6 +1035,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/llap/tez_fixed_bucket_pruning.q.out ql/src/test/results/clientpositive/llap/tez_fixed_bucket_pruning.q.out index 0dc9821..bbb7d37 100644 --- ql/src/test/results/clientpositive/llap/tez_fixed_bucket_pruning.q.out +++ ql/src/test/results/clientpositive/llap/tez_fixed_bucket_pruning.q.out @@ -558,8 +558,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 30 Data size: 248 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: bigint), _col1 (type: bigint) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 30 Data size: 248 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -632,7 +634,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE tag: 1 @@ -709,8 +713,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 90170 Data size: 2164080 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: bigint), _col2 (type: bigint) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: bigint), _col2 (type: bigint) Statistics: Num rows: 90170 Data size: 2164080 Basic stats: COMPLETE Column stats: COMPLETE @@ -790,8 +796,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: bigint) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: bigint) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE @@ -869,6 +877,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 5 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -879,6 +888,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types bigint:bigint:bigint escape.delim \ @@ -1073,8 +1083,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 30 Data size: 248 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: bigint), _col1 (type: bigint) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 30 Data size: 248 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1147,7 +1159,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE tag: 1 @@ -1224,8 +1238,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 90170 Data size: 2164080 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: bigint), _col2 (type: bigint) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: bigint), _col2 (type: bigint) Statistics: Num rows: 90170 Data size: 2164080 Basic stats: COMPLETE Column stats: COMPLETE @@ -1305,8 +1321,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: bigint) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: bigint) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE @@ -1384,6 +1402,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 5 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1394,6 +1413,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types bigint:bigint:bigint escape.delim \ @@ -1527,8 +1547,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 4 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 4 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1600,6 +1622,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 4 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1610,6 +1633,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:string escape.delim \ @@ -1681,8 +1705,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 1 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1754,6 +1780,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1764,6 +1791,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out index fe0f101..0b042b9 100644 --- ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out +++ ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out @@ -194,8 +194,10 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 1 Data size: 3212 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 3212 Basic stats: COMPLETE Column stats: NONE @@ -295,6 +297,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 Statistics: Num rows: 1 Data size: 3533 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -305,6 +308,7 @@ input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + bucketing_version -1 columns contact_event_id,ce_create_dt,ce_end_dt,contact_type,cnctevs_cd,contact_mode,cntvnst_stts_cd,total_transfers,ce_notes,svcrqst_id,svcrqct_cds,svcrtyp_cd,cmpltyp_cd,src,cnctmd_cd,notes columns.types string:string:string:string:string:string:string:int:array:string:array:string:string:string:string:array name default.ct_events1_test @@ -1199,8 +1203,10 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 1 Data size: 3212 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 3212 Basic stats: COMPLETE Column stats: NONE @@ -1300,6 +1306,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 Statistics: Num rows: 1 Data size: 3533 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -1310,6 +1317,7 @@ input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + bucketing_version -1 columns contact_event_id,ce_create_dt,ce_end_dt,contact_type,cnctevs_cd,contact_mode,cntvnst_stts_cd,total_transfers,ce_notes,svcrqst_id,svcrqct_cds,svcrtyp_cd,cmpltyp_cd,src,cnctmd_cd,notes columns.types string:string:string:string:string:string:string:int:array:string:array:string:string:string:string:array name default.ct_events1_test diff --git ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out index aa92f46..93f4e47 100644 --- ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out +++ ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out @@ -3918,8 +3918,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 387 Data size: 108402 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) null sort order: zzzz + numBuckets: -1 sort order: ++++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) Statistics: Num rows: 387 Data size: 108402 Basic stats: COMPLETE Column stats: COMPLETE @@ -4024,8 +4026,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 387 Data size: 108402 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) null sort order: zzzz + numBuckets: -1 sort order: ++++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) Statistics: Num rows: 387 Data size: 108402 Basic stats: COMPLETE Column stats: COMPLETE @@ -4103,8 +4107,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 55 Data size: 14575 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 14575 Basic stats: COMPLETE Column stats: COMPLETE @@ -4177,6 +4183,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 387 Data size: 108402 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 directory: hdfs://### HDFS PATH ### @@ -4220,8 +4227,10 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 1845 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 1845 Basic stats: COMPLETE Column stats: COMPLETE @@ -4243,6 +4252,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 1845 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### @@ -4253,6 +4263,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4 columns.types struct:struct:struct:struct:string escape.delim \ @@ -5257,8 +5268,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: bigint) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE @@ -5339,8 +5352,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 51 Data size: 4845 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 51 Data size: 4845 Basic stats: COMPLETE Column stats: COMPLETE @@ -5425,8 +5440,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: bigint) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE @@ -5510,8 +5527,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: bigint) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE @@ -5590,8 +5609,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 77 Data size: 7315 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: bigint) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) Statistics: Num rows: 77 Data size: 7315 Basic stats: COMPLETE Column stats: COMPLETE @@ -5613,8 +5634,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: bigint) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE @@ -5636,8 +5659,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 77 Data size: 7315 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: bigint) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) Statistics: Num rows: 77 Data size: 7315 Basic stats: COMPLETE Column stats: COMPLETE @@ -5653,6 +5678,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 77 Data size: 7315 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### @@ -5663,6 +5689,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:bigint escape.delim \ @@ -5810,8 +5837,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: bigint) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE @@ -5895,8 +5924,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: bigint) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE @@ -5974,8 +6005,10 @@ outputColumnNames: _col0 Statistics: Num rows: 103 Data size: 8961 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 103 Data size: 8961 Basic stats: COMPLETE Column stats: COMPLETE @@ -6075,8 +6108,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: bigint) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE @@ -6154,8 +6189,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: bigint) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE @@ -6171,6 +6208,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### @@ -6181,6 +6219,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:bigint escape.delim \ @@ -6322,8 +6361,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: bigint) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE @@ -6407,8 +6448,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: bigint) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE @@ -6505,8 +6548,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 51 Data size: 4845 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 51 Data size: 4845 Basic stats: COMPLETE Column stats: COMPLETE @@ -6585,8 +6630,10 @@ outputColumnNames: _col0 Statistics: Num rows: 103 Data size: 8961 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 103 Data size: 8961 Basic stats: COMPLETE Column stats: COMPLETE @@ -6664,8 +6711,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 77 Data size: 7315 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: bigint) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) Statistics: Num rows: 77 Data size: 7315 Basic stats: COMPLETE Column stats: COMPLETE @@ -6681,6 +6730,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 77 Data size: 7315 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 directory: hdfs://### HDFS PATH ### @@ -6691,6 +6741,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:bigint escape.delim \ @@ -6719,8 +6770,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 77 Data size: 7315 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: bigint) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) Statistics: Num rows: 77 Data size: 7315 Basic stats: COMPLETE Column stats: COMPLETE diff --git ql/src/test/results/clientpositive/llap/union_fast_stats.q.out ql/src/test/results/clientpositive/llap/union_fast_stats.q.out index 40f469b..9b0ac52 100644 --- ql/src/test/results/clientpositive/llap/union_fast_stats.q.out +++ ql/src/test/results/clientpositive/llap/union_fast_stats.q.out @@ -175,12 +175,12 @@ #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"cbigint\":\"true\",\"cboolean1\":\"true\",\"cboolean2\":\"true\",\"cdouble\":\"true\",\"cfloat\":\"true\",\"cint\":\"true\",\"csmallint\":\"true\",\"cstring1\":\"true\",\"cstring2\":\"true\",\"ctimestamp1\":\"true\",\"ctimestamp2\":\"true\",\"ctinyint\":\"true\"}} bucketing_version 2 numFiles 3 numRows 15 - rawDataSize 3315 - totalSize 4152 + rawDataSize 3370 + totalSize 4273 #### A masked pattern was here #### # Storage Information @@ -228,12 +228,12 @@ #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"cbigint\":\"true\",\"cboolean1\":\"true\",\"cboolean2\":\"true\",\"cdouble\":\"true\",\"cfloat\":\"true\",\"cint\":\"true\",\"csmallint\":\"true\",\"cstring1\":\"true\",\"cstring2\":\"true\",\"ctimestamp1\":\"true\",\"ctimestamp2\":\"true\",\"ctinyint\":\"true\"}} bucketing_version 2 numFiles 3 numRows 15 - rawDataSize 3483 - totalSize 4152 + rawDataSize 3538 + totalSize 4273 #### A masked pattern was here #### # Storage Information @@ -293,12 +293,12 @@ #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"cbigint\":\"true\",\"cboolean1\":\"true\",\"cboolean2\":\"true\",\"cdouble\":\"true\",\"cfloat\":\"true\",\"cint\":\"true\",\"csmallint\":\"true\",\"cstring1\":\"true\",\"cstring2\":\"true\",\"ctimestamp1\":\"true\",\"ctimestamp2\":\"true\",\"ctinyint\":\"true\"}} bucketing_version 2 numFiles 4 numRows 20 - rawDataSize 4468 - totalSize 5569 + rawDataSize 4523 + totalSize 5681 #### A masked pattern was here #### # Storage Information @@ -508,12 +508,12 @@ #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"cbigint\":\"true\",\"cboolean1\":\"true\",\"cboolean2\":\"true\",\"cdouble\":\"true\",\"cfloat\":\"true\",\"cint\":\"true\",\"csmallint\":\"true\",\"cstring1\":\"true\",\"cstring2\":\"true\",\"ctimestamp1\":\"true\",\"ctimestamp2\":\"true\",\"ctinyint\":\"true\"}} bucketing_version 2 numFiles 1 numRows 15 - rawDataSize 3315 - totalSize 3318 + rawDataSize 3370 + totalSize 3373 #### A masked pattern was here #### # Storage Information @@ -579,12 +579,12 @@ #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"cbigint\":\"true\",\"cboolean1\":\"true\",\"cboolean2\":\"true\",\"cdouble\":\"true\",\"cfloat\":\"true\",\"cint\":\"true\",\"csmallint\":\"true\",\"cstring1\":\"true\",\"cstring2\":\"true\",\"ctimestamp1\":\"true\",\"ctimestamp2\":\"true\",\"ctinyint\":\"true\"}} bucketing_version 2 numFiles 1 numRows 15 - rawDataSize 3320 - totalSize 3318 + rawDataSize 3380 + totalSize 3373 #### A masked pattern was here #### # Storage Information @@ -644,12 +644,12 @@ #### A masked pattern was here #### Table Type: MANAGED_TABLE Table Parameters: - COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"cbigint\":\"true\",\"cboolean1\":\"true\",\"cboolean2\":\"true\",\"cdouble\":\"true\",\"cfloat\":\"true\",\"cint\":\"true\",\"csmallint\":\"true\",\"cstring1\":\"true\",\"cstring2\":\"true\",\"ctimestamp1\":\"true\",\"ctimestamp2\":\"true\",\"ctinyint\":\"true\"}} bucketing_version 2 numFiles 2 numRows 20 - rawDataSize 4305 - totalSize 4735 + rawDataSize 4365 + totalSize 4781 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/llap/vectorization_0.q.out ql/src/test/results/clientpositive/llap/vectorization_0.q.out index af394f5..2c00a79 100644 --- ql/src/test/results/clientpositive/llap/vectorization_0.q.out +++ ql/src/test/results/clientpositive/llap/vectorization_0.q.out @@ -1304,7 +1304,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1375,6 +1377,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1385,6 +1388,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -30120,6 +30124,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Statistics: Num rows: 3 Data size: 930 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -30130,6 +30135,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 columns.types tinyint:smallint:int:bigint:float:double:string:string:timestamp:timestamp:boolean:boolean escape.delim \ @@ -30245,6 +30251,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Statistics: Num rows: 1 Data size: 310 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -30255,6 +30262,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 columns.types tinyint:smallint:int:bigint:float:double:string:string:timestamp:timestamp:boolean:boolean escape.delim \ @@ -30370,6 +30378,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Statistics: Num rows: 2 Data size: 620 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -30380,6 +30389,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 columns.types tinyint:smallint:int:bigint:float:double:string:string:timestamp:timestamp:boolean:boolean escape.delim \ @@ -30502,8 +30512,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: COMPLETE @@ -30580,8 +30592,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -30596,6 +30610,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -30606,6 +30621,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types bigint:string escape.delim \ diff --git ql/src/test/results/clientpositive/load_dyn_part8.q.out ql/src/test/results/clientpositive/load_dyn_part8.q.out index 5342d25..a577b5b 100644 --- ql/src/test/results/clientpositive/load_dyn_part8.q.out +++ ql/src/test/results/clientpositive/load_dyn_part8.q.out @@ -80,6 +80,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 666 Data size: 363636 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -121,8 +122,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 2 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE @@ -138,6 +141,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 666 Data size: 241092 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 2 #### A masked pattern was here #### @@ -180,6 +184,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 2316 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -414,6 +419,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -424,6 +430,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ @@ -515,8 +522,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: '2008-12-31' (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: '2008-12-31' (type: string), _col1 (type: string) Statistics: Num rows: 2 Data size: 2316 Basic stats: COMPLETE Column stats: COMPLETE @@ -564,6 +573,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 2316 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -574,6 +584,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/louter_join_ppr.q.out ql/src/test/results/clientpositive/louter_join_ppr.q.out index 3aa6207..a116abe 100644 --- ql/src/test/results/clientpositive/louter_join_ppr.q.out +++ ql/src/test/results/clientpositive/louter_join_ppr.q.out @@ -55,8 +55,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE @@ -77,8 +79,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE @@ -249,6 +253,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 55 Data size: 19580 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -259,6 +264,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ @@ -372,8 +378,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE @@ -394,8 +402,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE @@ -566,6 +576,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 55 Data size: 19580 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -576,6 +587,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ @@ -689,8 +701,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE @@ -711,8 +725,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE @@ -883,6 +899,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 55 Data size: 19580 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -893,6 +910,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ @@ -1006,8 +1024,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE @@ -1028,8 +1048,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE @@ -1200,6 +1222,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 55 Data size: 19580 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1210,6 +1233,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out index ea6aa83..2771619 100644 --- ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out +++ ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out @@ -118,6 +118,7 @@ Position of Big Table: 0 Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -128,6 +129,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types string escape.delim \ diff --git ql/src/test/results/clientpositive/merge3.q.out ql/src/test/results/clientpositive/merge3.q.out index fe83b8d..32c712d 100644 --- ql/src/test/results/clientpositive/merge3.q.out +++ ql/src/test/results/clientpositive/merge3.q.out @@ -89,6 +89,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 2000 Data size: 356000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -99,6 +100,7 @@ input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + bucketing_version -1 columns key,value columns.types string:string name default.merge_src2 @@ -120,7 +122,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -187,6 +191,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -197,6 +202,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -248,6 +254,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: -1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -256,6 +263,7 @@ input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + bucketing_version -1 columns key,value columns.types string:string name default.merge_src2 @@ -275,6 +283,7 @@ input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + bucketing_version -1 columns key,value columns.types string:string name default.merge_src2 @@ -285,6 +294,7 @@ input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + bucketing_version -1 columns key,value columns.types string:string name default.merge_src2 @@ -302,6 +312,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: -1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -310,6 +321,7 @@ input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + bucketing_version -1 columns key,value columns.types string:string name default.merge_src2 @@ -329,6 +341,7 @@ input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + bucketing_version -1 columns key,value columns.types string:string name default.merge_src2 @@ -339,6 +352,7 @@ input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + bucketing_version -1 columns key,value columns.types string:string name default.merge_src2 @@ -2465,6 +2479,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2000 Data size: 724000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -2506,8 +2521,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 2128 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 2 Data size: 2128 Basic stats: COMPLETE Column stats: COMPLETE @@ -2629,6 +2646,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 2128 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2639,6 +2657,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:string escape.delim \ @@ -2704,6 +2723,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2785,6 +2805,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4964,8 +4985,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2000 Data size: 724000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col2 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 2000 Data size: 724000 Basic stats: COMPLETE Column stats: COMPLETE @@ -5082,6 +5105,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2000 Data size: 724000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -5126,6 +5150,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 2128 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -5136,6 +5161,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:string escape.delim \ @@ -5201,6 +5227,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -5282,6 +5309,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/mm_buckets.q.out ql/src/test/results/clientpositive/mm_buckets.q.out index efcfba4..e2c3163 100644 --- ql/src/test/results/clientpositive/mm_buckets.q.out +++ ql/src/test/results/clientpositive/mm_buckets.q.out @@ -92,8 +92,8 @@ POSTHOOK: Input: default@bucket0_mm #### A masked pattern was here #### 10 10 -98 98 97 97 +98 98 PREHOOK: query: select * from bucket0_mm tablesample (bucket 2 out of 2) s PREHOOK: type: QUERY PREHOOK: Input: default@bucket0_mm @@ -150,11 +150,11 @@ POSTHOOK: Input: default@bucket0_mm #### A masked pattern was here #### 10 10 +97 97 98 98 10 10 +97 97 98 98 -97 97 -97 97 PREHOOK: query: select * from bucket0_mm tablesample (bucket 2 out of 2) s PREHOOK: type: QUERY PREHOOK: Input: default@bucket0_mm @@ -165,10 +165,10 @@ #### A masked pattern was here #### 0 0 100 100 +103 103 0 0 100 100 103 103 -103 103 PREHOOK: query: drop table bucket0_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@bucket0_mm diff --git ql/src/test/results/clientpositive/offset_limit_global_optimizer.q.out ql/src/test/results/clientpositive/offset_limit_global_optimizer.q.out index 7e1a2c0..5b20c2b 100644 --- ql/src/test/results/clientpositive/offset_limit_global_optimizer.q.out +++ ql/src/test/results/clientpositive/offset_limit_global_optimizer.q.out @@ -38,8 +38,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2000 Data size: 1082000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) null sort order: zzzz + numBuckets: -1 sort order: ++++ Statistics: Num rows: 2000 Data size: 1082000 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -262,6 +264,7 @@ Offset of rows: 400 Statistics: Num rows: 10 Data size: 5410 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -272,6 +275,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ @@ -356,8 +360,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2000 Data size: 1082000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) null sort order: zzzz + numBuckets: -1 sort order: ++++ Statistics: Num rows: 2000 Data size: 1082000 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -580,6 +586,7 @@ Offset of rows: 490 Statistics: Num rows: 10 Data size: 5410 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -590,6 +597,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ @@ -674,8 +682,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2000 Data size: 1082000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) null sort order: zzzz + numBuckets: -1 sort order: ++++ Statistics: Num rows: 2000 Data size: 1082000 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -898,6 +908,7 @@ Offset of rows: 490 Statistics: Num rows: 20 Data size: 10820 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -908,6 +919,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ @@ -1002,8 +1014,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2000 Data size: 1082000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) null sort order: zzzz + numBuckets: -1 sort order: ++++ Statistics: Num rows: 2000 Data size: 1082000 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1226,6 +1240,7 @@ Offset of rows: 490 Statistics: Num rows: 600 Data size: 324600 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1236,6 +1251,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ @@ -1905,8 +1921,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2000 Data size: 1082000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) null sort order: zzzz + numBuckets: -1 sort order: ++++ Statistics: Num rows: 2000 Data size: 1082000 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -2129,6 +2147,7 @@ Offset of rows: 400 Statistics: Num rows: 10 Data size: 5410 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2139,6 +2158,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ @@ -2218,8 +2238,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2000 Data size: 1082000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) null sort order: zzzz + numBuckets: -1 sort order: ++++ Statistics: Num rows: 2000 Data size: 1082000 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -2442,6 +2464,7 @@ Offset of rows: 490 Statistics: Num rows: 10 Data size: 5410 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2452,6 +2475,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ @@ -2531,8 +2555,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2000 Data size: 1082000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) null sort order: zzzz + numBuckets: -1 sort order: ++++ Statistics: Num rows: 2000 Data size: 1082000 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -2755,6 +2781,7 @@ Offset of rows: 490 Statistics: Num rows: 20 Data size: 10820 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2765,6 +2792,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ @@ -2854,8 +2882,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2000 Data size: 1082000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) null sort order: zzzz + numBuckets: -1 sort order: ++++ Statistics: Num rows: 2000 Data size: 1082000 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -3078,6 +3108,7 @@ Offset of rows: 490 Statistics: Num rows: 600 Data size: 324600 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3088,6 +3119,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/outer_join_ppr.q.out ql/src/test/results/clientpositive/outer_join_ppr.q.out index e73d9b9..8f6841d 100644 --- ql/src/test/results/clientpositive/outer_join_ppr.q.out +++ ql/src/test/results/clientpositive/outer_join_ppr.q.out @@ -55,8 +55,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE @@ -77,8 +79,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE @@ -249,6 +253,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 55 Data size: 19580 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -259,6 +264,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ @@ -372,8 +378,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE @@ -394,8 +402,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE @@ -566,6 +576,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 55 Data size: 19580 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -576,6 +587,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/parquet_vectorization_0.q.out ql/src/test/results/clientpositive/parquet_vectorization_0.q.out index e7a884d..ca1c640 100644 --- ql/src/test/results/clientpositive/parquet_vectorization_0.q.out +++ ql/src/test/results/clientpositive/parquet_vectorization_0.q.out @@ -1120,7 +1120,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1188,6 +1190,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1198,6 +1201,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -29930,6 +29934,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Statistics: Num rows: 3 Data size: 930 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -29940,6 +29945,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 columns.types tinyint:smallint:int:bigint:float:double:string:string:timestamp:timestamp:boolean:boolean escape.delim \ @@ -30051,6 +30057,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Statistics: Num rows: 1 Data size: 310 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -30061,6 +30068,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 columns.types tinyint:smallint:int:bigint:float:double:string:string:timestamp:timestamp:boolean:boolean escape.delim \ @@ -30171,6 +30179,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Statistics: Num rows: 2 Data size: 620 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -30181,6 +30190,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 columns.types tinyint:smallint:int:bigint:float:double:string:string:timestamp:timestamp:boolean:boolean escape.delim \ @@ -30296,8 +30306,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: COMPLETE @@ -30371,6 +30383,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -30395,8 +30408,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -30437,6 +30452,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -30447,6 +30463,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types bigint:string escape.delim \ diff --git ql/src/test/results/clientpositive/pcr.q.out ql/src/test/results/clientpositive/pcr.q.out index 65952e7..cf6f1b0 100644 --- ql/src/test/results/clientpositive/pcr.q.out +++ ql/src/test/results/clientpositive/pcr.q.out @@ -86,8 +86,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 11 Data size: 3058 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 11 Data size: 3058 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -203,6 +205,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 11 Data size: 3058 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -213,6 +216,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:string escape.delim \ @@ -293,8 +297,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 36 Data size: 3384 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 36 Data size: 3384 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -459,6 +465,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 36 Data size: 3384 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -469,6 +476,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types int:string escape.delim \ @@ -584,8 +592,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 11 Data size: 3058 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 11 Data size: 3058 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -701,6 +711,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 11 Data size: 3058 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -711,6 +722,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:string escape.delim \ @@ -791,8 +803,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 4 Data size: 1112 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 4 Data size: 1112 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -908,6 +922,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 4 Data size: 1112 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -918,6 +933,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:string escape.delim \ @@ -1002,8 +1018,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 7 Data size: 1946 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 7 Data size: 1946 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1168,6 +1186,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 7 Data size: 1946 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1178,6 +1197,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:string escape.delim \ @@ -1272,8 +1292,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 15 Data size: 4170 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 15 Data size: 4170 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1438,6 +1460,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 15 Data size: 4170 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1448,6 +1471,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:string escape.delim \ @@ -1549,8 +1573,10 @@ outputColumnNames: _col0 Statistics: Num rows: 3 Data size: 270 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 3 Data size: 270 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1665,6 +1691,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 282 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1675,6 +1702,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types int:string escape.delim \ @@ -1739,8 +1767,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 40 Data size: 3760 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 40 Data size: 3760 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1855,6 +1885,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 40 Data size: 3760 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1865,6 +1896,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types int:string escape.delim \ @@ -1971,8 +2003,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 60 Data size: 5640 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 60 Data size: 5640 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -2136,6 +2170,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 60 Data size: 5640 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2146,6 +2181,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types int:string escape.delim \ @@ -2276,8 +2312,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -2392,6 +2430,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2402,6 +2441,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:string escape.delim \ @@ -2475,8 +2515,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE @@ -2497,8 +2539,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE @@ -2569,6 +2613,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 30 Data size: 5640 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2593,8 +2638,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 30 Data size: 5640 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -2635,6 +2682,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 30 Data size: 11280 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2645,6 +2693,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5 columns.types int:string:string:int:string:string escape.delim \ @@ -2755,8 +2804,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE @@ -2777,8 +2828,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE @@ -2898,6 +2951,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 30 Data size: 5640 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2922,8 +2976,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 30 Data size: 5640 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -2964,6 +3020,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 30 Data size: 11280 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2974,6 +3031,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5 columns.types int:string:string:int:string:string escape.delim \ @@ -3094,8 +3152,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 14 Data size: 3892 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 14 Data size: 3892 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -3308,6 +3368,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 14 Data size: 3892 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3318,6 +3379,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:string escape.delim \ @@ -3434,8 +3496,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 22 Data size: 6116 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 22 Data size: 6116 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -3599,6 +3663,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 22 Data size: 6116 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3609,6 +3674,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:string escape.delim \ @@ -3742,6 +3808,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -3785,7 +3852,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -3796,6 +3865,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 2 #### A masked pattern was here #### @@ -3839,6 +3909,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3917,6 +3988,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3927,6 +3999,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -3993,6 +4066,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4083,6 +4157,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4218,7 +4293,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -4260,6 +4337,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4270,6 +4348,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -4350,6 +4429,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -4393,7 +4473,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -4408,6 +4490,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 2 #### A masked pattern was here #### @@ -4451,6 +4534,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4529,6 +4613,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4539,6 +4624,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -4605,6 +4691,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4695,6 +4782,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4830,7 +4918,9 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -4872,6 +4962,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -4882,6 +4973,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -4947,8 +5039,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -5021,6 +5115,7 @@ Number of rows: 10 Statistics: Num rows: 10 Data size: 1780 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -5031,6 +5126,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:string escape.delim \ @@ -5096,8 +5192,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 181000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 500 Data size: 181000 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -5215,6 +5313,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 500 Data size: 228000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -5225,6 +5324,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ @@ -5296,8 +5396,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 181000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 500 Data size: 181000 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -5415,6 +5517,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 500 Data size: 224000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -5425,6 +5528,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/pcs.q.out ql/src/test/results/clientpositive/pcs.q.out index d10a070..4d1ff15 100644 --- ql/src/test/results/clientpositive/pcs.q.out +++ ql/src/test/results/clientpositive/pcs.q.out @@ -124,8 +124,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -240,6 +242,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -250,6 +253,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:string escape.delim \ @@ -318,6 +322,7 @@ outputColumnNames: _col0 Statistics: Num rows: 20 Data size: 3680 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -328,6 +333,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types string escape.delim \ @@ -499,6 +505,7 @@ outputColumnNames: _col0 Statistics: Num rows: 20 Data size: 3680 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -509,6 +516,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types string escape.delim \ @@ -677,8 +685,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 1128 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 6 Data size: 1128 Basic stats: COMPLETE Column stats: COMPLETE @@ -695,8 +705,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 40 Data size: 7520 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 40 Data size: 7520 Basic stats: COMPLETE Column stats: COMPLETE @@ -824,6 +836,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 30 Data size: 5640 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -834,6 +847,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:int escape.delim \ @@ -901,6 +915,7 @@ outputColumnNames: _col0 Statistics: Num rows: 20 Data size: 3680 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -911,6 +926,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types string escape.delim \ @@ -1185,8 +1201,10 @@ Union Statistics: Num rows: 2 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 2 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1207,8 +1225,10 @@ Union Statistics: Num rows: 2 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 2 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1220,6 +1240,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1230,6 +1251,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:string escape.delim \ @@ -1413,6 +1435,7 @@ outputColumnNames: _col0 Statistics: Num rows: 20 Data size: 3680 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1423,6 +1446,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types string escape.delim \ @@ -1583,6 +1607,7 @@ outputColumnNames: _col0 Statistics: Num rows: 10 Data size: 1840 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1593,6 +1618,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types string escape.delim \ @@ -1819,6 +1845,7 @@ outputColumnNames: _col0 Statistics: Num rows: 8 Data size: 1472 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1829,6 +1856,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types string escape.delim \ diff --git ql/src/test/results/clientpositive/pointlookup2.q.out ql/src/test/results/clientpositive/pointlookup2.q.out index ad9839e..b893ecf 100644 --- ql/src/test/results/clientpositive/pointlookup2.q.out +++ ql/src/test/results/clientpositive/pointlookup2.q.out @@ -132,8 +132,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 1668 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 6 Data size: 1668 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -248,6 +250,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 1668 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -258,6 +261,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:string escape.delim \ @@ -326,8 +330,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE @@ -348,8 +354,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE @@ -420,6 +428,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 30 Data size: 5640 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -444,8 +453,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 30 Data size: 5640 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -486,6 +497,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 30 Data size: 11280 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -496,6 +508,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5 columns.types int:string:string:int:string:string escape.delim \ @@ -566,8 +579,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE @@ -588,8 +603,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE @@ -709,6 +726,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 30 Data size: 5640 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -733,8 +751,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 30 Data size: 5640 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -775,6 +795,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 30 Data size: 11280 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -785,6 +806,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5 columns.types int:string:string:int:string:string escape.delim \ @@ -853,7 +875,9 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 40 Data size: 11440 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 40 Data size: 11440 Basic stats: COMPLETE Column stats: COMPLETE tag: 0 @@ -873,7 +897,9 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 195 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 195 Basic stats: COMPLETE Column stats: COMPLETE tag: 1 @@ -1049,6 +1075,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 20 Data size: 9300 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1073,8 +1100,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col4 (type: int), _col5 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 20 Data size: 9300 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1115,6 +1144,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 20 Data size: 9300 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1125,6 +1155,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5 columns.types int:string:string:string:int:string escape.delim \ @@ -1199,7 +1230,9 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 10 Data size: 2860 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 10 Data size: 2860 Basic stats: COMPLETE Column stats: COMPLETE tag: 0 @@ -1219,7 +1252,9 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 195 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 195 Basic stats: COMPLETE Column stats: COMPLETE tag: 1 @@ -1444,6 +1479,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 4 Data size: 1860 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1468,8 +1504,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 4 Data size: 1860 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1510,6 +1548,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 4 Data size: 1860 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1520,6 +1559,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5 columns.types int:string:string:string:int:string escape.delim \ @@ -1762,8 +1802,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1878,6 +1920,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1888,6 +1931,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:string escape.delim \ @@ -1956,8 +2000,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE @@ -1978,8 +2024,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE @@ -2050,6 +2098,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 30 Data size: 5640 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2074,8 +2123,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 30 Data size: 5640 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -2116,6 +2167,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 30 Data size: 11280 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2126,6 +2178,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5 columns.types int:string:string:int:string:string escape.delim \ @@ -2196,8 +2249,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE @@ -2218,8 +2273,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE @@ -2339,6 +2396,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 30 Data size: 5640 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2363,8 +2421,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 30 Data size: 5640 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -2405,6 +2465,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 30 Data size: 11280 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2415,6 +2476,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5 columns.types int:string:string:int:string:string escape.delim \ @@ -2475,7 +2537,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 40 Data size: 11120 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 40 Data size: 11120 Basic stats: COMPLETE Column stats: COMPLETE tag: 0 @@ -2495,7 +2559,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 187 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 187 Basic stats: COMPLETE Column stats: COMPLETE tag: 1 @@ -2667,6 +2733,7 @@ predicate: (struct(_col2,_col4)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) (type: boolean) Statistics: Num rows: 40 Data size: 18600 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2691,8 +2758,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col4 (type: int), _col5 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 40 Data size: 18600 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -2733,6 +2802,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 40 Data size: 18600 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2743,6 +2813,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5 columns.types int:string:string:string:int:string escape.delim \ @@ -2809,7 +2880,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 9 Data size: 2502 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 9 Data size: 2502 Basic stats: COMPLETE Column stats: COMPLETE tag: 0 @@ -2829,7 +2902,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 187 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 187 Basic stats: COMPLETE Column stats: COMPLETE tag: 1 @@ -3050,6 +3125,7 @@ predicate: (struct(_col0,_col3)) IN (const struct(1,'2000-04-08'), const struct(2,'2000-04-09')) (type: boolean) Statistics: Num rows: 9 Data size: 4185 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3074,8 +3150,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 9 Data size: 4185 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -3116,6 +3194,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 9 Data size: 4185 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3126,6 +3205,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5 columns.types int:string:string:string:int:string escape.delim \ diff --git ql/src/test/results/clientpositive/pointlookup3.q.out ql/src/test/results/clientpositive/pointlookup3.q.out index 84e6bbf..6b3a50d 100644 --- ql/src/test/results/clientpositive/pointlookup3.q.out +++ ql/src/test/results/clientpositive/pointlookup3.q.out @@ -86,8 +86,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 2772 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string) null sort order: zzzz + numBuckets: -1 sort order: ++++ Statistics: Num rows: 6 Data size: 2772 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -204,6 +206,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 2772 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -214,6 +217,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types int:string:string:string escape.delim \ @@ -277,8 +281,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 556 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 2 Data size: 556 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -345,6 +351,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 744 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -355,6 +362,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types int:string:string:string escape.delim \ @@ -423,8 +431,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE @@ -445,8 +455,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE @@ -518,6 +530,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 30 Data size: 16680 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -542,8 +555,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col3 (type: int), _col4 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 30 Data size: 16680 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -584,6 +599,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 30 Data size: 22320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -594,6 +610,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7 columns.types int:string:string:string:int:string:string:string escape.delim \ @@ -664,8 +681,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE @@ -686,8 +705,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE @@ -809,6 +830,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 30 Data size: 16680 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -833,8 +855,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col3 (type: int), _col4 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 30 Data size: 16680 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -875,6 +899,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 30 Data size: 22320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -885,6 +910,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7 columns.types int:string:string:string:int:string:string:string escape.delim \ @@ -953,7 +979,9 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 40 Data size: 18800 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 40 Data size: 18800 Basic stats: COMPLETE Column stats: COMPLETE tag: 0 @@ -973,7 +1001,9 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 10 Data size: 4700 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 10 Data size: 4700 Basic stats: COMPLETE Column stats: COMPLETE tag: 1 @@ -1152,6 +1182,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 200 Data size: 184800 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1176,8 +1207,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col4 (type: int), _col5 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 200 Data size: 184800 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1218,6 +1251,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 200 Data size: 184800 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1228,6 +1262,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7 columns.types int:string:string:string:int:string:string:string escape.delim \ @@ -1518,8 +1553,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 20 Data size: 9240 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string) null sort order: zzzz + numBuckets: -1 sort order: ++++ Statistics: Num rows: 20 Data size: 9240 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1636,6 +1673,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 20 Data size: 9240 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1646,6 +1684,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types int:string:string:string escape.delim \ @@ -1709,8 +1748,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 10 Data size: 2780 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 10 Data size: 2780 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1777,6 +1818,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 10 Data size: 3720 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1787,6 +1829,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types int:string:string:string escape.delim \ @@ -1855,8 +1898,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE @@ -1877,8 +1922,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE @@ -1950,6 +1997,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 30 Data size: 16680 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1974,8 +2022,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col3 (type: int), _col4 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 30 Data size: 16680 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -2016,6 +2066,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 30 Data size: 22320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2026,6 +2077,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7 columns.types int:string:string:string:int:string:string:string escape.delim \ @@ -2096,8 +2148,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE @@ -2118,8 +2172,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE @@ -2241,6 +2297,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 30 Data size: 16680 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2265,8 +2322,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col3 (type: int), _col4 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 30 Data size: 16680 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -2307,6 +2366,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 30 Data size: 22320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2317,6 +2377,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7 columns.types int:string:string:string:int:string:string:string escape.delim \ @@ -2377,7 +2438,9 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 40 Data size: 18480 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 40 Data size: 18480 Basic stats: COMPLETE Column stats: COMPLETE tag: 0 @@ -2397,7 +2460,9 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 9 Data size: 4158 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 9 Data size: 4158 Basic stats: COMPLETE Column stats: COMPLETE tag: 1 @@ -2572,6 +2637,7 @@ predicate: (struct(_col2,_col4)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) (type: boolean) Statistics: Num rows: 180 Data size: 166320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2596,8 +2662,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col4 (type: int), _col5 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 180 Data size: 166320 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -2638,6 +2706,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 180 Data size: 166320 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2648,6 +2717,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7 columns.types int:string:string:string:int:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/pointlookup4.q.out ql/src/test/results/clientpositive/pointlookup4.q.out index f95d557..2967c62 100644 --- ql/src/test/results/clientpositive/pointlookup4.q.out +++ ql/src/test/results/clientpositive/pointlookup4.q.out @@ -86,8 +86,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 2772 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string) null sort order: zzzz + numBuckets: -1 sort order: ++++ Statistics: Num rows: 6 Data size: 2772 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -204,6 +206,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 2772 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -214,6 +217,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types int:string:string:string escape.delim \ @@ -297,8 +301,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 20 Data size: 9240 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string) null sort order: zzzz + numBuckets: -1 sort order: ++++ Statistics: Num rows: 20 Data size: 9240 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -415,6 +421,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 20 Data size: 9240 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -425,6 +432,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types int:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/ppd_join_filter.q.out ql/src/test/results/clientpositive/ppd_join_filter.q.out index 051e676..8f05102 100644 --- ql/src/test/results/clientpositive/ppd_join_filter.q.out +++ ql/src/test/results/clientpositive/ppd_join_filter.q.out @@ -63,8 +63,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 67750 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 67750 Basic stats: COMPLETE Column stats: COMPLETE @@ -142,6 +144,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 8549 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -177,8 +180,10 @@ outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE @@ -187,8 +192,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 83 Data size: 8549 Basic stats: COMPLETE Column stats: COMPLETE @@ -286,6 +293,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 131 Data size: 13493 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -296,6 +304,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types string:double:double escape.delim \ @@ -413,8 +422,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 67750 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 67750 Basic stats: COMPLETE Column stats: COMPLETE @@ -492,6 +503,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 8549 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -527,8 +539,10 @@ outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE @@ -537,8 +551,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 83 Data size: 8549 Basic stats: COMPLETE Column stats: COMPLETE @@ -636,6 +652,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 131 Data size: 13493 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -646,6 +663,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types string:double:double escape.delim \ @@ -762,8 +780,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 67750 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 67750 Basic stats: COMPLETE Column stats: COMPLETE @@ -841,6 +861,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 8549 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -875,8 +896,10 @@ outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE @@ -885,8 +908,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 83 Data size: 8549 Basic stats: COMPLETE Column stats: COMPLETE @@ -984,6 +1009,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 131 Data size: 13493 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -994,6 +1020,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types string:double:double escape.delim \ @@ -1111,8 +1138,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 67750 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 67750 Basic stats: COMPLETE Column stats: COMPLETE @@ -1190,6 +1219,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 8549 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1225,8 +1255,10 @@ outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE @@ -1235,8 +1267,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 83 Data size: 8549 Basic stats: COMPLETE Column stats: COMPLETE @@ -1334,6 +1368,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 131 Data size: 13493 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1344,6 +1379,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types string:double:double escape.delim \ diff --git ql/src/test/results/clientpositive/ppd_vc.q.out ql/src/test/results/clientpositive/ppd_vc.q.out index 2918638..9f24333 100644 --- ql/src/test/results/clientpositive/ppd_vc.q.out +++ ql/src/test/results/clientpositive/ppd_vc.q.out @@ -41,6 +41,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 666 Data size: 363636 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -51,6 +52,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ @@ -380,8 +382,10 @@ outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE @@ -401,8 +405,10 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 666 Data size: 368964 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 666 Data size: 368964 Basic stats: COMPLETE Column stats: PARTIAL @@ -677,6 +683,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1053 Data size: 583362 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -701,8 +708,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col2 (type: string), _col3 (type: string), _col4 (type: bigint) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 1053 Data size: 583362 Basic stats: COMPLETE Column stats: PARTIAL tag: -1 @@ -743,6 +752,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1053 Data size: 583362 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -753,6 +763,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4 columns.types string:string:string:string:bigint escape.delim \ diff --git ql/src/test/results/clientpositive/push_or.q.out ql/src/test/results/clientpositive/push_or.q.out index 5cf34ae..f97df8f 100644 --- ql/src/test/results/clientpositive/push_or.q.out +++ ql/src/test/results/clientpositive/push_or.q.out @@ -68,8 +68,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 40 Data size: 11120 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 40 Data size: 11120 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -185,6 +187,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 40 Data size: 11120 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -195,6 +198,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/rand_partitionpruner2.q.out ql/src/test/results/clientpositive/rand_partitionpruner2.q.out index 8dc6a63..e80c298 100644 --- ql/src/test/results/clientpositive/rand_partitionpruner2.q.out +++ ql/src/test/results/clientpositive/rand_partitionpruner2.q.out @@ -52,6 +52,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 333 Data size: 151848 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -95,7 +96,9 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -213,6 +216,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -223,6 +227,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:struct:struct escape.delim \ @@ -289,6 +294,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -379,6 +385,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/regexp_extract.q.out ql/src/test/results/clientpositive/regexp_extract.q.out index 20a5987..95f7c22 100644 --- ql/src/test/results/clientpositive/regexp_extract.q.out +++ ql/src/test/results/clientpositive/regexp_extract.q.out @@ -42,6 +42,7 @@ input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string,string field.delim 9 @@ -55,8 +56,10 @@ predicate: (_col0 < 100) (type: boolean) Statistics: Num rows: 166 Data size: 30876 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 166 Data size: 30876 Basic stats: COMPLETE Column stats: COMPLETE @@ -123,6 +126,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 166 Data size: 44986 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -133,6 +137,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:string escape.delim \ @@ -299,6 +304,7 @@ input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string,string field.delim 9 @@ -312,8 +318,10 @@ predicate: (_col0 < 100) (type: boolean) Statistics: Num rows: 166 Data size: 30876 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 166 Data size: 30876 Basic stats: COMPLETE Column stats: COMPLETE @@ -380,6 +388,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 166 Data size: 44986 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -390,6 +399,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:string escape.delim \ diff --git ql/src/test/results/clientpositive/router_join_ppr.q.out ql/src/test/results/clientpositive/router_join_ppr.q.out index 5fc39ec..832612f 100644 --- ql/src/test/results/clientpositive/router_join_ppr.q.out +++ ql/src/test/results/clientpositive/router_join_ppr.q.out @@ -55,8 +55,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE @@ -77,8 +79,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE @@ -249,6 +253,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 55 Data size: 19580 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -259,6 +264,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ @@ -372,8 +378,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE @@ -394,8 +402,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE @@ -566,6 +576,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 55 Data size: 19580 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -576,6 +587,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ @@ -689,8 +701,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE @@ -711,8 +725,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE @@ -883,6 +899,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 55 Data size: 19580 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -893,6 +910,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ @@ -1006,8 +1024,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE @@ -1028,8 +1048,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE @@ -1200,6 +1222,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 55 Data size: 19580 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1210,6 +1233,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/sample1.q.out ql/src/test/results/clientpositive/sample1.q.out index 522dd98..ae1c056 100644 --- ql/src/test/results/clientpositive/sample1.q.out +++ ql/src/test/results/clientpositive/sample1.q.out @@ -50,6 +50,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 68750 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -93,7 +94,9 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1744 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1744 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -161,6 +164,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -171,6 +175,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types struct:struct:struct:struct escape.delim \ @@ -237,6 +242,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -327,6 +333,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/sample5.q.out ql/src/test/results/clientpositive/sample5.q.out index 6c633e2..798392c 100644 --- ql/src/test/results/clientpositive/sample5.q.out +++ ql/src/test/results/clientpositive/sample5.q.out @@ -47,6 +47,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -90,7 +91,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -159,6 +162,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -169,6 +173,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -235,6 +240,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -325,6 +331,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/sample6.q.out ql/src/test/results/clientpositive/sample6.q.out index bf00e65..ab20f27 100644 --- ql/src/test/results/clientpositive/sample6.q.out +++ ql/src/test/results/clientpositive/sample6.q.out @@ -46,6 +46,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -89,7 +90,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -158,6 +161,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -168,6 +172,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -234,6 +239,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -324,6 +330,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -737,8 +744,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -806,6 +815,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -816,6 +826,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types int:string escape.delim \ @@ -1148,8 +1159,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1217,6 +1230,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1227,6 +1241,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types int:string escape.delim \ @@ -1782,8 +1797,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1851,6 +1868,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1861,6 +1879,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types int:string escape.delim \ @@ -2298,8 +2317,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -2367,6 +2388,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2377,6 +2399,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types int:string escape.delim \ @@ -2754,8 +2777,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -2823,6 +2848,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -2833,6 +2859,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types int:string escape.delim \ @@ -3136,8 +3163,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -3205,6 +3234,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3215,6 +3245,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types int:string escape.delim \ @@ -3409,8 +3440,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -3478,6 +3511,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -3488,6 +3522,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types int:string escape.delim \ diff --git ql/src/test/results/clientpositive/sample7.q.out ql/src/test/results/clientpositive/sample7.q.out index 723b607..9cec21c 100644 --- ql/src/test/results/clientpositive/sample7.q.out +++ ql/src/test/results/clientpositive/sample7.q.out @@ -48,6 +48,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 400 Data size: 38000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -91,7 +92,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -160,6 +163,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -170,6 +174,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -236,6 +241,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -326,6 +332,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/sample8.q.out ql/src/test/results/clientpositive/sample8.q.out index e6050aa..0431aa7 100644 --- ql/src/test/results/clientpositive/sample8.q.out +++ ql/src/test/results/clientpositive/sample8.q.out @@ -40,8 +40,10 @@ predicate: ((((hash(key) & 2147483647) % 10) = 0) and value is not null and (((hash(key) & 2147483647) % 1) = 0)) (type: boolean) Statistics: Num rows: 125 Data size: 22250 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: key (type: string), value (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) Statistics: Num rows: 125 Data size: 22250 Basic stats: COMPLETE Column stats: COMPLETE @@ -56,8 +58,10 @@ predicate: ((((hash(key) & 2147483647) % 1) = 0) and value is not null and (((hash(key) & 2147483647) % 10) = 0)) (type: boolean) Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: key (type: string), value (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE @@ -286,6 +290,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 49 Data size: 17444 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -296,6 +301,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \ diff --git ql/src/test/results/clientpositive/smb_mapjoin_11.q.out ql/src/test/results/clientpositive/smb_mapjoin_11.q.out index 8ebebc7..d6efbbb 100644 --- ql/src/test/results/clientpositive/smb_mapjoin_11.q.out +++ ql/src/test/results/clientpositive/smb_mapjoin_11.q.out @@ -88,8 +88,10 @@ expressions: _col0 (type: int), _col7 (type: string) outputColumnNames: _col0, _col1 Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) tag: -1 @@ -260,8 +262,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: '1' (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: '1' (type: string) tag: -1 diff --git ql/src/test/results/clientpositive/smb_mapjoin_12.q.out ql/src/test/results/clientpositive/smb_mapjoin_12.q.out index 5199f19..3cfb7a2 100644 --- ql/src/test/results/clientpositive/smb_mapjoin_12.q.out +++ ql/src/test/results/clientpositive/smb_mapjoin_12.q.out @@ -316,8 +316,10 @@ expressions: _col0 (type: int), concat(_col1, _col7) (type: string) outputColumnNames: _col0, _col1 Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) tag: -1 @@ -490,8 +492,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: '2' (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: '2' (type: string) tag: -1 diff --git ql/src/test/results/clientpositive/smb_mapjoin_13.q.out ql/src/test/results/clientpositive/smb_mapjoin_13.q.out index 71ea244..fe2561b 100644 --- ql/src/test/results/clientpositive/smb_mapjoin_13.q.out +++ ql/src/test/results/clientpositive/smb_mapjoin_13.q.out @@ -102,8 +102,10 @@ expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + tag: -1 TopN: 10 @@ -289,8 +291,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 550 Data size: 52250 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 550 Data size: 52250 Basic stats: COMPLETE Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out index 9432dcf..ea30d3d 100644 --- ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out +++ ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out @@ -97,7 +97,9 @@ mode: hash outputColumnNames: _col0 Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: tag: -1 value expressions: _col0 (type: bigint) diff --git ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out index 1ac6c43..17f3b0b 100644 --- ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out +++ ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out @@ -179,7 +179,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -252,6 +254,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -262,6 +265,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ diff --git ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out index 6f062f8..51bb46b 100644 --- ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out +++ ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out @@ -259,7 +259,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -382,6 +384,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -392,6 +395,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ diff --git ql/src/test/results/clientpositive/spark/auto_join_reordering_values.q.out ql/src/test/results/clientpositive/spark/auto_join_reordering_values.q.out index b8c467f..7b5f92e 100644 --- ql/src/test/results/clientpositive/spark/auto_join_reordering_values.q.out +++ ql/src/test/results/clientpositive/spark/auto_join_reordering_values.q.out @@ -133,8 +133,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE @@ -211,8 +213,10 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE @@ -288,8 +292,10 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE @@ -365,8 +371,10 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE @@ -442,8 +450,10 @@ outputColumnNames: _col0 Statistics: Num rows: 100 Data size: 288 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 100 Data size: 288 Basic stats: COMPLETE Column stats: NONE @@ -515,8 +525,10 @@ outputColumnNames: _col0, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 39 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 39 Basic stats: COMPLETE Column stats: NONE @@ -535,8 +547,10 @@ outputColumnNames: _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 42 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col2 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col2 (type: int) Statistics: Num rows: 1 Data size: 42 Basic stats: COMPLETE Column stats: NONE @@ -555,8 +569,10 @@ outputColumnNames: _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 46 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col3 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col3 (type: int) Statistics: Num rows: 1 Data size: 46 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/auto_sortmerge_join_1.q.out ql/src/test/results/clientpositive/spark/auto_sortmerge_join_1.q.out index e6112b2..4221707 100644 --- ql/src/test/results/clientpositive/spark/auto_sortmerge_join_1.q.out +++ ql/src/test/results/clientpositive/spark/auto_sortmerge_join_1.q.out @@ -168,7 +168,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -400,7 +402,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -632,7 +636,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out index b36a3a2..c74acfe 100644 --- ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out +++ ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out @@ -198,7 +198,9 @@ Select Operator Statistics: Num rows: 3 Data size: 1700 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 3 Data size: 1700 Basic stats: PARTIAL Column stats: NONE tag: 0 @@ -275,8 +277,10 @@ outputColumnNames: _col0 Statistics: Num rows: 240 Data size: 116240 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 240 Data size: 116240 Basic stats: PARTIAL Column stats: NONE @@ -405,8 +409,10 @@ outputColumnNames: _col0 Statistics: Num rows: 3 Data size: 1700 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 1700 Basic stats: PARTIAL Column stats: NONE @@ -484,8 +490,10 @@ outputColumnNames: _col0 Statistics: Num rows: 2 Data size: 1140 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 1 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 2 Data size: 1140 Basic stats: PARTIAL Column stats: NONE @@ -564,7 +572,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -625,7 +635,9 @@ Select Operator Statistics: Num rows: 264 Data size: 127864 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 264 Data size: 127864 Basic stats: PARTIAL Column stats: NONE tag: 1 diff --git ql/src/test/results/clientpositive/spark/auto_sortmerge_join_2.q.out ql/src/test/results/clientpositive/spark/auto_sortmerge_join_2.q.out index 7071892..fc55bb0 100644 --- ql/src/test/results/clientpositive/spark/auto_sortmerge_join_2.q.out +++ ql/src/test/results/clientpositive/spark/auto_sortmerge_join_2.q.out @@ -150,7 +150,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -382,7 +384,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/auto_sortmerge_join_3.q.out ql/src/test/results/clientpositive/spark/auto_sortmerge_join_3.q.out index 095c796..8b5e8d45 100644 --- ql/src/test/results/clientpositive/spark/auto_sortmerge_join_3.q.out +++ ql/src/test/results/clientpositive/spark/auto_sortmerge_join_3.q.out @@ -150,7 +150,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -331,7 +333,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -512,7 +516,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out index 52323b1..1c6ba4a 100644 --- ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out +++ ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out @@ -166,7 +166,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -347,7 +349,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -528,7 +532,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out index 99f061a..2b88368 100644 --- ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out +++ ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out @@ -125,7 +125,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -294,7 +296,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -463,7 +467,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out index b831189..40f28b5 100644 --- ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out +++ ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out @@ -185,7 +185,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -421,7 +423,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -657,7 +661,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out index 42a4cec..2b85c75 100644 --- ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out +++ ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out @@ -185,7 +185,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -421,7 +423,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -657,7 +661,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/bucket2.q.out ql/src/test/results/clientpositive/spark/bucket2.q.out index b4b8f2f..3c252ff 100644 --- ql/src/test/results/clientpositive/spark/bucket2.q.out +++ ql/src/test/results/clientpositive/spark/bucket2.q.out @@ -43,8 +43,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/bucket3.q.out ql/src/test/results/clientpositive/spark/bucket3.q.out index 56590d7..9c01cbe8 100644 --- ql/src/test/results/clientpositive/spark/bucket3.q.out +++ ql/src/test/results/clientpositive/spark/bucket3.q.out @@ -43,8 +43,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/bucket4.q.out ql/src/test/results/clientpositive/spark/bucket4.q.out index 29485df..4424480 100644 --- ql/src/test/results/clientpositive/spark/bucket4.q.out +++ ql/src/test/results/clientpositive/spark/bucket4.q.out @@ -43,8 +43,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/bucket4.q.out_spark ql/src/test/results/clientpositive/spark/bucket4.q.out_spark index 8f4de0a..d9037a6 100644 --- ql/src/test/results/clientpositive/spark/bucket4.q.out_spark +++ ql/src/test/results/clientpositive/spark/bucket4.q.out_spark @@ -43,8 +43,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/bucket5.q.out ql/src/test/results/clientpositive/spark/bucket5.q.out index 784e959..c9a69ce 100644 --- ql/src/test/results/clientpositive/spark/bucket5.q.out +++ ql/src/test/results/clientpositive/spark/bucket5.q.out @@ -56,8 +56,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -131,8 +133,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/bucket_map_join_1.q.out ql/src/test/results/clientpositive/spark/bucket_map_join_1.q.out index fd6956a..406c530 100644 --- ql/src/test/results/clientpositive/spark/bucket_map_join_1.q.out +++ ql/src/test/results/clientpositive/spark/bucket_map_join_1.q.out @@ -173,7 +173,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/bucket_map_join_2.q.out ql/src/test/results/clientpositive/spark/bucket_map_join_2.q.out index 5fc4aea..215babb 100644 --- ql/src/test/results/clientpositive/spark/bucket_map_join_2.q.out +++ ql/src/test/results/clientpositive/spark/bucket_map_join_2.q.out @@ -173,7 +173,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out index fe61e1f..1029459 100644 --- ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out +++ ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out @@ -375,8 +375,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE @@ -452,8 +454,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 149 Data size: 58120 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 149 Data size: 58120 Basic stats: PARTIAL Column stats: NONE @@ -753,8 +757,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE @@ -830,8 +836,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 149 Data size: 58120 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 149 Data size: 58120 Basic stats: PARTIAL Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/bucketmapjoin10.q.out ql/src/test/results/clientpositive/spark/bucketmapjoin10.q.out index b77eb49..996fe05 100644 --- ql/src/test/results/clientpositive/spark/bucketmapjoin10.q.out +++ ql/src/test/results/clientpositive/spark/bucketmapjoin10.q.out @@ -317,7 +317,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/bucketmapjoin11.q.out ql/src/test/results/clientpositive/spark/bucketmapjoin11.q.out index 51e1fe5..90e04c6 100644 --- ql/src/test/results/clientpositive/spark/bucketmapjoin11.q.out +++ ql/src/test/results/clientpositive/spark/bucketmapjoin11.q.out @@ -331,7 +331,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -705,7 +707,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/bucketmapjoin12.q.out ql/src/test/results/clientpositive/spark/bucketmapjoin12.q.out index b101a0d..6433447 100644 --- ql/src/test/results/clientpositive/spark/bucketmapjoin12.q.out +++ ql/src/test/results/clientpositive/spark/bucketmapjoin12.q.out @@ -237,7 +237,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -496,7 +498,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/bucketmapjoin13.q.out ql/src/test/results/clientpositive/spark/bucketmapjoin13.q.out index e17e04c..4113c63 100644 --- ql/src/test/results/clientpositive/spark/bucketmapjoin13.q.out +++ ql/src/test/results/clientpositive/spark/bucketmapjoin13.q.out @@ -202,7 +202,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -518,7 +520,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -794,7 +798,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -1070,7 +1076,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/bucketmapjoin2.q.out ql/src/test/results/clientpositive/spark/bucketmapjoin2.q.out index cf975b2..e2def51 100644 --- ql/src/test/results/clientpositive/spark/bucketmapjoin2.q.out +++ ql/src/test/results/clientpositive/spark/bucketmapjoin2.q.out @@ -145,8 +145,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 149 Data size: 58120 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 149 Data size: 58120 Basic stats: PARTIAL Column stats: NONE @@ -223,8 +225,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 78 Data size: 30620 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 78 Data size: 30620 Basic stats: PARTIAL Column stats: NONE @@ -530,8 +534,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 149 Data size: 58120 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 149 Data size: 58120 Basic stats: PARTIAL Column stats: NONE @@ -608,8 +614,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 78 Data size: 30620 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 78 Data size: 30620 Basic stats: PARTIAL Column stats: NONE @@ -934,8 +942,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 149 Data size: 58120 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 149 Data size: 58120 Basic stats: PARTIAL Column stats: NONE @@ -1012,8 +1022,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 156 Data size: 61240 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 156 Data size: 61240 Basic stats: PARTIAL Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out index 7bf2668..e379457 100644 --- ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out +++ ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out @@ -169,8 +169,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 78 Data size: 30620 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 78 Data size: 30620 Basic stats: PARTIAL Column stats: NONE @@ -247,8 +249,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 149 Data size: 58120 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 149 Data size: 58120 Basic stats: PARTIAL Column stats: NONE @@ -554,8 +558,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 78 Data size: 30620 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 78 Data size: 30620 Basic stats: PARTIAL Column stats: NONE @@ -632,8 +638,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 149 Data size: 58120 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 149 Data size: 58120 Basic stats: PARTIAL Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/bucketmapjoin4.q.out ql/src/test/results/clientpositive/spark/bucketmapjoin4.q.out index 110cceb..a693448 100644 --- ql/src/test/results/clientpositive/spark/bucketmapjoin4.q.out +++ ql/src/test/results/clientpositive/spark/bucketmapjoin4.q.out @@ -163,8 +163,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE @@ -240,8 +242,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE @@ -528,8 +532,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE @@ -605,8 +611,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 27500 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out index 3a797e1..f79684b 100644 --- ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out +++ ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out @@ -200,8 +200,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 75 Data size: 30250 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 75 Data size: 30250 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out_spark ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out_spark index e46a0a3..90e6730 100644 --- ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out_spark +++ ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out_spark @@ -197,8 +197,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 75 Data size: 30250 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 75 Data size: 30250 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/bucketmapjoin8.q.out ql/src/test/results/clientpositive/spark/bucketmapjoin8.q.out index 604c561..1d58060 100644 --- ql/src/test/results/clientpositive/spark/bucketmapjoin8.q.out +++ ql/src/test/results/clientpositive/spark/bucketmapjoin8.q.out @@ -203,7 +203,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -477,7 +479,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/bucketmapjoin9.q.out ql/src/test/results/clientpositive/spark/bucketmapjoin9.q.out index 262ece1..6060891 100644 --- ql/src/test/results/clientpositive/spark/bucketmapjoin9.q.out +++ ql/src/test/results/clientpositive/spark/bucketmapjoin9.q.out @@ -205,7 +205,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 @@ -501,7 +503,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out index 51fe882..5171c64 100644 --- ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out +++ ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out @@ -43,8 +43,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out_spark ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out_spark index 5506079..0c0f02a 100644 --- ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out_spark +++ ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out_spark @@ -43,8 +43,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/filter_join_breaktask.q.out ql/src/test/results/clientpositive/spark/filter_join_breaktask.q.out index e46192a..c91f544 100644 --- ql/src/test/results/clientpositive/spark/filter_join_breaktask.q.out +++ ql/src/test/results/clientpositive/spark/filter_join_breaktask.q.out @@ -72,8 +72,10 @@ outputColumnNames: _col0 Statistics: Num rows: 25 Data size: 211 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 25 Data size: 211 Basic stats: COMPLETE Column stats: NONE @@ -149,8 +151,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 25 Data size: 211 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 25 Data size: 211 Basic stats: COMPLETE Column stats: NONE @@ -227,8 +231,10 @@ outputColumnNames: _col0 Statistics: Num rows: 25 Data size: 211 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 25 Data size: 211 Basic stats: COMPLETE Column stats: NONE @@ -300,8 +306,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 27 Data size: 232 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: int) Statistics: Num rows: 27 Data size: 232 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/groupby_map_ppr.q.out ql/src/test/results/clientpositive/spark/groupby_map_ppr.q.out index 9e6da9d..1139cf8 100644 --- ql/src/test/results/clientpositive/spark/groupby_map_ppr.q.out +++ ql/src/test/results/clientpositive/spark/groupby_map_ppr.q.out @@ -63,8 +63,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/groupby_map_ppr_multi_distinct.q.out ql/src/test/results/clientpositive/spark/groupby_map_ppr_multi_distinct.q.out index 95323d8..df882b8 100644 --- ql/src/test/results/clientpositive/spark/groupby_map_ppr_multi_distinct.q.out +++ ql/src/test/results/clientpositive/spark/groupby_map_ppr_multi_distinct.q.out @@ -63,8 +63,10 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/groupby_ppr.q.out ql/src/test/results/clientpositive/spark/groupby_ppr.q.out index 96d44d8..beec5a3 100644 --- ql/src/test/results/clientpositive/spark/groupby_ppr.q.out +++ ql/src/test/results/clientpositive/spark/groupby_ppr.q.out @@ -56,8 +56,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/groupby_ppr_multi_distinct.q.out ql/src/test/results/clientpositive/spark/groupby_ppr_multi_distinct.q.out index 9cfe52a..dd8f29d 100644 --- ql/src/test/results/clientpositive/spark/groupby_ppr_multi_distinct.q.out +++ ql/src/test/results/clientpositive/spark/groupby_ppr_multi_distinct.q.out @@ -56,8 +56,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE @@ -338,8 +340,10 @@ outputColumnNames: $f0, $f1, $f2 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: $f0 (type: string), $f1 (type: string), $f2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Map-reduce partition columns: $f0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out index 6339517..cd8cb71 100644 --- ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out +++ ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out @@ -283,8 +283,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE @@ -1108,8 +1110,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE @@ -1329,8 +1333,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: double) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: double) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE @@ -1562,8 +1568,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: double) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: double) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE @@ -2239,8 +2247,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: double) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: double) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE @@ -2488,8 +2498,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE @@ -2572,8 +2584,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE @@ -2811,8 +2825,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE @@ -2896,8 +2912,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE @@ -3006,8 +3024,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE @@ -3087,8 +3107,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out index 00f4d53..69f8165 100644 --- ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out +++ ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out @@ -284,8 +284,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: rand() (type: double) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE @@ -361,8 +363,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE @@ -1129,8 +1133,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: rand() (type: double) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE @@ -1206,8 +1212,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE @@ -1370,8 +1378,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: double) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: rand() (type: double) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE @@ -1447,8 +1457,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: double) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: double) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE @@ -1623,8 +1635,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: double) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: rand() (type: double) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE @@ -1700,8 +1714,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: double) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: double) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE @@ -2320,8 +2336,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: double) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: rand() (type: double) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE @@ -2397,8 +2415,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: double) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: double) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE @@ -2588,8 +2608,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE @@ -2672,8 +2694,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE @@ -2912,8 +2936,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE @@ -2997,8 +3023,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: rand() (type: double) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE @@ -3107,8 +3135,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE @@ -3126,8 +3156,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE @@ -3208,8 +3240,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: rand() (type: double) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE @@ -3285,8 +3319,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out index 9efcf98..ea8d7b5 100644 --- ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out +++ ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out @@ -6,7 +6,7 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table_n0 -PREHOOK: query: EXPLAIN +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE test_table_n0 PARTITION (ds = '2008-04-08', hr) SELECT key2, value, cast(hr as int) FROM (SELECT if ((key % 3) < 2, 0, 1) as key2, value, (key % 2) as hr @@ -18,7 +18,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Output: default@test_table_n0@ds=2008-04-08 -POSTHOOK: query: EXPLAIN +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE test_table_n0 PARTITION (ds = '2008-04-08', hr) SELECT key2, value, cast(hr as int) FROM (SELECT if ((key % 3) < 2, 0, 1) as key2, value, (key % 2) as hr @@ -48,46 +48,190 @@ alias: srcpart filterExpr: (ds = '2008-04-08') (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + GatherStats: false Select Operator expressions: if(((key % 3) < 2), 0, 1) (type: int), value (type: string), UDFToInteger((key % 2)) (type: int) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + tag: -1 value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int) + auto parallelism: false Execution mode: vectorized + Path -> Alias: + hdfs://### HDFS PATH ### [a:srcpart] + hdfs://### HDFS PATH ### [a:srcpart] + Path -> Partition: + hdfs://### HDFS PATH ### + Partition + base file name: hr=11 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + hr 11 + properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} + bucket_count -1 + column.name.delimiter , + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + location hdfs://### HDFS PATH ### + name default.srcpart + numFiles 1 + numRows 500 + partition_columns ds/hr + partition_columns.types string:string + rawDataSize 5312 + serialization.ddl struct srcpart { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + bucketing_version 2 + column.name.delimiter , + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + location hdfs://### HDFS PATH ### + name default.srcpart + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct srcpart { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcpart + name: default.srcpart + hdfs://### HDFS PATH ### + Partition + base file name: hr=12 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + hr 12 + properties: + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} + bucket_count -1 + column.name.delimiter , + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + location hdfs://### HDFS PATH ### + name default.srcpart + numFiles 1 + numRows 500 + partition_columns ds/hr + partition_columns.types string:string + rawDataSize 5312 + serialization.ddl struct srcpart { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + bucketing_version 2 + column.name.delimiter , + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + location hdfs://### HDFS PATH ### + name default.srcpart + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct srcpart { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcpart + name: default.srcpart + Truncated Path -> Alias: + /srcpart/ds=2008-04-08/hr=11 [a:srcpart] + /srcpart/ds=2008-04-08/hr=12 [a:srcpart] Reducer 2 Execution mode: vectorized + Needs Tagging: false Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), CAST( VALUE._col2 AS STRING) (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col2 (type: string) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + tag: -1 value expressions: _col0 (type: int), _col1 (type: string) + auto parallelism: false Reducer 3 Execution mode: vectorized + Needs Tagging: false Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string) outputColumnNames: _col0, _col1, _col2 File Output Operator compressed: false + GlobalTableId: 1 + directory: hdfs://### HDFS PATH ### Dp Sort State: PARTITION_SORTED + NumFilesPerFileSink: 1 + Static Partition Specification: ds=2008-04-08/ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Stats Publishing Key Prefix: hdfs://### HDFS PATH ### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + bucketing_version 2 + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + location hdfs://### HDFS PATH ### + name default.test_table_n0 + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct test_table_n0 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.test_table_n0 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false Stage: Stage-0 Move Operator @@ -96,15 +240,33 @@ ds 2008-04-08 hr replace: true + source: hdfs://### HDFS PATH ### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + bucketing_version 2 + column.name.delimiter , + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + location hdfs://### HDFS PATH ### + name default.test_table_n0 + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct test_table_n0 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.test_table_n0 Stage: Stage-2 Stats Work Basic Stats Work: + Stats Aggregation Key Prefix: hdfs://### HDFS PATH ### PREHOOK: query: INSERT OVERWRITE TABLE test_table_n0 PARTITION (ds = '2008-04-08', hr) SELECT key2, value, cast(hr as int) FROM diff --git ql/src/test/results/clientpositive/spark/join17.q.out ql/src/test/results/clientpositive/spark/join17.q.out index 2f2e32b..53be323 100644 --- ql/src/test/results/clientpositive/spark/join17.q.out +++ ql/src/test/results/clientpositive/spark/join17.q.out @@ -53,8 +53,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -131,8 +133,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/join32_lessSize.q.out ql/src/test/results/clientpositive/spark/join32_lessSize.q.out index b11cba3..301e37a 100644 --- ql/src/test/results/clientpositive/spark/join32_lessSize.q.out +++ ql/src/test/results/clientpositive/spark/join32_lessSize.q.out @@ -167,8 +167,10 @@ Position of Big Table: 0 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE @@ -248,8 +250,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -663,8 +667,10 @@ Position of Big Table: 0 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE @@ -742,8 +748,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -820,8 +828,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -1234,8 +1244,10 @@ Position of Big Table: 0 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col2 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE @@ -1314,8 +1326,10 @@ outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -1636,8 +1650,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -1714,8 +1730,10 @@ outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -1791,8 +1809,10 @@ outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -1865,8 +1885,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/join34.q.out ql/src/test/results/clientpositive/spark/join34.q.out index fc1a369..331d404 100644 --- ql/src/test/results/clientpositive/spark/join34.q.out +++ ql/src/test/results/clientpositive/spark/join34.q.out @@ -71,8 +71,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 148 Data size: 1572 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 296 Data size: 3144 Basic stats: COMPLETE Column stats: NONE @@ -149,8 +151,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 148 Data size: 1572 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 296 Data size: 3144 Basic stats: COMPLETE Column stats: NONE @@ -227,8 +231,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 23 Data size: 175 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 23 Data size: 175 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/join35.q.out ql/src/test/results/clientpositive/spark/join35.q.out index 87ee934..a51502e 100644 --- ql/src/test/results/clientpositive/spark/join35.q.out +++ ql/src/test/results/clientpositive/spark/join35.q.out @@ -78,8 +78,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 148 Data size: 1572 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 148 Data size: 1572 Basic stats: COMPLETE Column stats: NONE @@ -159,8 +161,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 148 Data size: 1572 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 148 Data size: 1572 Basic stats: COMPLETE Column stats: NONE @@ -237,8 +241,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 23 Data size: 175 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 23 Data size: 175 Basic stats: COMPLETE Column stats: NONE @@ -310,8 +316,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 74 Data size: 786 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 148 Data size: 1572 Basic stats: COMPLETE Column stats: NONE @@ -377,8 +385,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 74 Data size: 786 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 148 Data size: 1572 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/join9.q.out ql/src/test/results/clientpositive/spark/join9.q.out index ee92802..080d67f 100644 --- ql/src/test/results/clientpositive/spark/join9.q.out +++ ql/src/test/results/clientpositive/spark/join9.q.out @@ -57,8 +57,10 @@ outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -135,8 +137,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out index 8abb78f..007e826 100644 --- ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out +++ ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out @@ -49,8 +49,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE @@ -127,8 +129,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE @@ -205,8 +209,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE @@ -385,8 +391,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE @@ -458,8 +466,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE @@ -536,8 +546,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE @@ -716,8 +728,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE @@ -789,8 +803,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE @@ -867,8 +883,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE @@ -1044,8 +1062,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE @@ -1117,8 +1137,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE @@ -1195,8 +1217,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE @@ -1273,8 +1297,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE @@ -1446,8 +1472,10 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE @@ -1524,8 +1552,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE @@ -1602,8 +1632,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE @@ -1680,8 +1712,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 6 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out index cb54858..b9c14cb 100644 --- ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out +++ ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out @@ -85,8 +85,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col2 (type: string), _col3 (type: string) null sort order: aa + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col2 (type: string), _col3 (type: string) Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE @@ -313,8 +315,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col2 (type: string) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out index 4821f85..8c85a93 100644 --- ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out +++ ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out @@ -60,8 +60,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE @@ -138,8 +140,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE @@ -391,8 +395,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE @@ -520,8 +526,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE @@ -722,8 +730,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE @@ -800,8 +810,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE @@ -1053,8 +1065,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE @@ -1182,8 +1196,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out index ae69a48..2436ee1 100644 --- ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out +++ ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out @@ -74,8 +74,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE @@ -180,8 +182,10 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE @@ -254,8 +258,10 @@ Number of rows: 0 Statistics: Num rows: 0 Data size: 0 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE @@ -572,7 +578,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -648,7 +656,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -995,7 +1005,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE tag: 0 @@ -1068,7 +1080,9 @@ Number of rows: 0 Statistics: Num rows: 0 Data size: 0 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: 1 @@ -1461,8 +1475,10 @@ predicate: false (type: boolean) Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: key (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: key (type: string) Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE @@ -1532,8 +1548,10 @@ predicate: false (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: key (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: key (type: string) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE @@ -1619,8 +1637,10 @@ predicate: false (type: boolean) Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: value (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: value (type: string) Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE @@ -1691,8 +1711,10 @@ predicate: false (type: boolean) Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: value (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: value (type: string) Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE @@ -1842,7 +1864,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 diff --git ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out index 38007f7..7abc6bc 100644 --- ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out +++ ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out @@ -60,8 +60,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE @@ -138,8 +140,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE @@ -391,8 +395,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE @@ -469,8 +475,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/parquet_vectorization_0.q.out ql/src/test/results/clientpositive/spark/parquet_vectorization_0.q.out index ae93297..03d9aad 100644 --- ql/src/test/results/clientpositive/spark/parquet_vectorization_0.q.out +++ ql/src/test/results/clientpositive/spark/parquet_vectorization_0.q.out @@ -1229,7 +1229,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -30421,8 +30423,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 12288 Data size: 593563 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 12288 Data size: 593563 Basic stats: COMPLETE Column stats: NONE @@ -30498,8 +30502,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6144 Data size: 296781 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 6144 Data size: 296781 Basic stats: COMPLETE Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/pcr.q.out ql/src/test/results/clientpositive/spark/pcr.q.out index 52a0e0e..6dd27eb 100644 --- ql/src/test/results/clientpositive/spark/pcr.q.out +++ ql/src/test/results/clientpositive/spark/pcr.q.out @@ -91,8 +91,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 13 Data size: 104 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 13 Data size: 104 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -305,8 +307,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -603,8 +607,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 13 Data size: 104 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 13 Data size: 104 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -817,8 +823,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -1035,8 +1043,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 16 Data size: 128 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 16 Data size: 128 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -1312,8 +1322,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 33 Data size: 264 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 33 Data size: 264 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -1596,8 +1608,10 @@ outputColumnNames: _col0 Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -1793,8 +1807,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -2032,8 +2048,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 60 Data size: 480 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 60 Data size: 480 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -2344,8 +2362,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -2550,8 +2570,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE @@ -2628,8 +2650,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE @@ -2702,8 +2726,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -2843,8 +2869,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE @@ -2921,8 +2949,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE @@ -2995,8 +3025,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -3146,8 +3178,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 48 Data size: 384 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 48 Data size: 384 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -3493,8 +3527,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 30 Data size: 240 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 30 Data size: 240 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -4311,8 +4347,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -4467,8 +4505,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -4674,8 +4714,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out index a78553d..6af3333 100644 --- ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out +++ ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out @@ -65,8 +65,10 @@ outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -145,8 +147,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -263,8 +267,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE @@ -379,8 +385,10 @@ outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -459,8 +467,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -577,8 +587,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE @@ -692,8 +704,10 @@ outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -771,8 +785,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -889,8 +905,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE @@ -1005,8 +1023,10 @@ outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -1085,8 +1105,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -1203,8 +1225,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/reduce_deduplicate.q.out ql/src/test/results/clientpositive/spark/reduce_deduplicate.q.out index ad3cbc2..607c3bc 100644 --- ql/src/test/results/clientpositive/spark/reduce_deduplicate.q.out +++ ql/src/test/results/clientpositive/spark/reduce_deduplicate.q.out @@ -41,8 +41,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -308,8 +310,10 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/router_join_ppr.q.out ql/src/test/results/clientpositive/spark/router_join_ppr.q.out index b2d9bba..1c638a4 100644 --- ql/src/test/results/clientpositive/spark/router_join_ppr.q.out +++ ql/src/test/results/clientpositive/spark/router_join_ppr.q.out @@ -60,8 +60,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE @@ -138,8 +140,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE @@ -391,8 +395,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE @@ -520,8 +526,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE @@ -722,8 +730,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE @@ -800,8 +810,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE @@ -1053,8 +1065,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE @@ -1182,8 +1196,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/sample10.q.out ql/src/test/results/clientpositive/spark/sample10.q.out index a8e5d97..b3073d7 100644 --- ql/src/test/results/clientpositive/spark/sample10.q.out +++ ql/src/test/results/clientpositive/spark/sample10.q.out @@ -87,8 +87,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 120 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 20 Data size: 120 Basic stats: COMPLETE Column stats: NONE @@ -319,8 +321,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 10 Data size: 60 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 10 Data size: 60 Basic stats: COMPLETE Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/sample6.q.out ql/src/test/results/clientpositive/spark/sample6.q.out index 40e8aaf..261cec3 100644 --- ql/src/test/results/clientpositive/spark/sample6.q.out +++ ql/src/test/results/clientpositive/spark/sample6.q.out @@ -495,8 +495,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -913,8 +915,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -1554,8 +1558,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -2077,8 +2083,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -2540,8 +2548,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -2929,8 +2939,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -3209,8 +3221,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/sample8.q.out ql/src/test/results/clientpositive/spark/sample8.q.out index 18bbfe3..6343192 100644 --- ql/src/test/results/clientpositive/spark/sample8.q.out +++ ql/src/test/results/clientpositive/spark/sample8.q.out @@ -45,8 +45,10 @@ predicate: ((((hash(key) & 2147483647) % 10) = 0) and value is not null and (((hash(key) & 2147483647) % 1) = 0)) (type: boolean) Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: key (type: string), value (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE @@ -118,8 +120,10 @@ predicate: ((((hash(key) & 2147483647) % 1) = 0) and value is not null and (((hash(key) & 2147483647) % 10) = 0)) (type: boolean) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: key (type: string), value (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out index ef10c41..edbb912 100644 --- ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out +++ ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out @@ -94,8 +94,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out index 1d2040f..11e1bd0 100644 --- ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out +++ ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out @@ -110,8 +110,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1650 Data size: 17529 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1650 Data size: 17529 Basic stats: COMPLETE Column stats: NONE @@ -342,8 +344,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 3392 Data size: 36194 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3392 Data size: 36194 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out index 2c427b7..26e77a4d 100644 --- ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out +++ ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out @@ -109,8 +109,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -367,8 +369,10 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out index 79b6929..6891e69 100644 --- ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out +++ ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out @@ -90,8 +90,10 @@ Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -326,8 +328,10 @@ Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -510,8 +514,10 @@ Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -685,8 +691,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: int), _col2 (type: string) Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: NONE @@ -767,8 +775,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col2 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: int), _col2 (type: string) Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: NONE @@ -845,8 +855,10 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: ###Masked### Data size: ###Masked### Basic stats: COMPLETE Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/spark/transform_ppr1.q.out ql/src/test/results/clientpositive/spark/transform_ppr1.q.out index e7459bf..f9982c8 100644 --- ql/src/test/results/clientpositive/spark/transform_ppr1.q.out +++ ql/src/test/results/clientpositive/spark/transform_ppr1.q.out @@ -69,8 +69,10 @@ predicate: ((_col1 < 100) and (_col0 = '2008-04-08')) (type: boolean) Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/transform_ppr2.q.out ql/src/test/results/clientpositive/spark/transform_ppr2.q.out index 8693b60..fd86b47 100644 --- ql/src/test/results/clientpositive/spark/transform_ppr2.q.out +++ ql/src/test/results/clientpositive/spark/transform_ppr2.q.out @@ -68,8 +68,10 @@ predicate: (_col1 < 100) (type: boolean) Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/union24.q.out ql/src/test/results/clientpositive/spark/union24.q.out index ac14fe2..2150181 100644 --- ql/src/test/results/clientpositive/spark/union24.q.out +++ ql/src/test/results/clientpositive/spark/union24.q.out @@ -397,8 +397,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE @@ -804,8 +806,10 @@ outputColumnNames: _col0 Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE @@ -881,8 +885,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE @@ -1282,8 +1288,10 @@ outputColumnNames: _col0 Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE @@ -1359,8 +1367,10 @@ outputColumnNames: _col0 Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE @@ -1439,8 +1449,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 113 Data size: 543 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 113 Data size: 543 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/spark/union_ppr.q.out ql/src/test/results/clientpositive/spark/union_ppr.q.out index e3f926a..0b6b563 100644 --- ql/src/test/results/clientpositive/spark/union_ppr.q.out +++ ql/src/test/results/clientpositive/spark/union_ppr.q.out @@ -55,9 +55,17 @@ outputColumnNames: _col0, _col1, _col3 Statistics: Num rows: 666 Data size: 7074 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator +<<<<<<< HEAD + bucketingVersion: 2 + key expressions: _col0 (type: string), _col1 (type: string), '2008-04-08' (type: string), _col3 (type: string) + null sort order: zzzz + numBuckets: -1 + sort order: ++++ +======= key expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string) null sort order: zzz sort order: +++ +>>>>>>> apache/master Statistics: Num rows: 666 Data size: 7074 Basic stats: COMPLETE Column stats: NONE tag: -1 auto parallelism: false @@ -186,9 +194,17 @@ outputColumnNames: _col0, _col1, _col3 Statistics: Num rows: 666 Data size: 7074 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator +<<<<<<< HEAD + bucketingVersion: 2 + key expressions: _col0 (type: string), _col1 (type: string), '2008-04-08' (type: string), _col3 (type: string) + null sort order: zzzz + numBuckets: -1 + sort order: ++++ +======= key expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string) null sort order: zzz sort order: +++ +>>>>>>> apache/master Statistics: Num rows: 666 Data size: 7074 Basic stats: COMPLETE Column stats: NONE tag: -1 auto parallelism: false diff --git ql/src/test/results/clientpositive/spark/vectorization_0.q.out ql/src/test/results/clientpositive/spark/vectorization_0.q.out index 068def4..4e779ea 100644 --- ql/src/test/results/clientpositive/spark/vectorization_0.q.out +++ ql/src/test/results/clientpositive/spark/vectorization_0.q.out @@ -1289,7 +1289,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -30481,8 +30483,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 12288 Data size: 2907994 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 12288 Data size: 2907994 Basic stats: COMPLETE Column stats: NONE @@ -30558,8 +30562,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 6144 Data size: 1453997 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: z + numBuckets: -1 sort order: + Statistics: Num rows: 6144 Data size: 1453997 Basic stats: COMPLETE Column stats: NONE tag: -1 diff --git ql/src/test/results/clientpositive/stats0.q.out ql/src/test/results/clientpositive/stats0.q.out index 6ce92aa..112247b 100644 --- ql/src/test/results/clientpositive/stats0.q.out +++ ql/src/test/results/clientpositive/stats0.q.out @@ -38,6 +38,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -81,7 +82,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -148,6 +151,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -158,6 +162,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -1460,6 +1465,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -1503,7 +1509,9 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -1570,6 +1578,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1580,6 +1589,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types struct:struct escape.delim \ @@ -1646,6 +1656,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1736,6 +1747,7 @@ TableScan GatherStats: false File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/temp_table_alter_partition_coltype.q.out ql/src/test/results/clientpositive/temp_table_alter_partition_coltype.q.out index 740a270..2b729ae 100644 --- ql/src/test/results/clientpositive/temp_table_alter_partition_coltype.q.out +++ ql/src/test/results/clientpositive/temp_table_alter_partition_coltype.q.out @@ -165,7 +165,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -289,6 +291,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -299,6 +302,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ @@ -371,7 +375,9 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -495,6 +501,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -505,6 +512,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types bigint escape.delim \ diff --git ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out index 505e83c..436af6b 100644 --- ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out +++ ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out @@ -248,7 +248,9 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1480 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 1 Data size: 1480 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -315,6 +317,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 1512 Basic stats: COMPLETE Column stats: NONE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -325,6 +328,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types struct:struct:struct escape.delim \ diff --git ql/src/test/results/clientpositive/timestamp.q.out ql/src/test/results/clientpositive/timestamp.q.out index 16749a1..90a46f5 100644 --- ql/src/test/results/clientpositive/timestamp.q.out +++ ql/src/test/results/clientpositive/timestamp.q.out @@ -120,8 +120,10 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: boolean) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: boolean) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE @@ -144,8 +146,10 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: boolean) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: boolean) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE @@ -216,6 +220,7 @@ outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -226,6 +231,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0 columns.types timestamp escape.delim \ diff --git ql/src/test/results/clientpositive/topnkey_grouping_sets.q.out ql/src/test/results/clientpositive/topnkey_grouping_sets.q.out new file mode 100644 index 0000000..0cc87b1 --- /dev/null +++ ql/src/test/results/clientpositive/topnkey_grouping_sets.q.out @@ -0,0 +1,1235 @@ +PREHOOK: query: CREATE TABLE t_test_grouping_sets( + a int, + b int, + c int +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t_test_grouping_sets +POSTHOOK: query: CREATE TABLE t_test_grouping_sets( + a int, + b int, + c int +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t_test_grouping_sets +PREHOOK: query: INSERT INTO t_test_grouping_sets VALUES +(NULL, NULL, NULL), +(5, 2, 3), +(10, 11, 12), +(NULL, NULL, NULL), +(NULL, NULL, NULL), +(6, 2, 1), +(7, 8, 4), (7, 8, 4), (7, 8, 4), +(5, 1, 2), (5, 1, 2), (5, 1, 2), +(NULL, NULL, NULL) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@t_test_grouping_sets +POSTHOOK: query: INSERT INTO t_test_grouping_sets VALUES +(NULL, NULL, NULL), +(5, 2, 3), +(10, 11, 12), +(NULL, NULL, NULL), +(NULL, NULL, NULL), +(6, 2, 1), +(7, 8, 4), (7, 8, 4), (7, 8, 4), +(5, 1, 2), (5, 1, 2), (5, 1, 2), +(NULL, NULL, NULL) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@t_test_grouping_sets +POSTHOOK: Lineage: t_test_grouping_sets.a SCRIPT [] +POSTHOOK: Lineage: t_test_grouping_sets.b SCRIPT [] +POSTHOOK: Lineage: t_test_grouping_sets.c SCRIPT [] +PREHOOK: query: EXPLAIN +SELECT a, b, grouping(a), grouping(b), grouping(a, b) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN +SELECT a, b, grouping(a), grouping(b), grouping(a, b) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: t_test_grouping_sets + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), b (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) + minReductionHashAggr: 0.99 + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + null sort order: zzz + sort order: +++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: int), _col1 (type: int), grouping(_col2, 1L) (type: bigint), grouping(_col2, 0L) (type: bigint), grouping(_col2, 1L, 0L) (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 26 Data size: 776 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int) + null sort order: zz + sort order: ++ + Statistics: Num rows: 26 Data size: 776 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + value expressions: _col2 (type: bigint), _col3 (type: bigint), _col4 (type: bigint) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int), VALUE._col0 (type: bigint), VALUE._col1 (type: bigint), VALUE._col2 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 26 Data size: 776 Basic stats: COMPLETE Column stats: COMPLETE + Limit + Number of rows: 3 + Statistics: Num rows: 3 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 3 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 3 + Processor Tree: + ListSink + +PREHOOK: query: SELECT a, b, grouping(a), grouping(b), grouping(a, b) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b, grouping(a), grouping(b), grouping(a, b) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +5 1 0 0 0 +5 2 0 0 0 +5 NULL 0 1 1 +PREHOOK: query: SELECT a, b, grouping(a), grouping(b), grouping(a, b) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b, grouping(a), grouping(b), grouping(a, b) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +5 1 0 0 0 +5 2 0 0 0 +5 NULL 0 1 1 +PREHOOK: query: EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: t_test_grouping_sets + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), b (type: int) + outputColumnNames: a, b + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: a (type: int), b (type: int), 0L (type: bigint) + minReductionHashAggr: 0.99 + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + null sort order: zzz + sort order: +++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + pruneGroupingSetId: true + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int) + null sort order: zz + sort order: ++ + Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 26 Data size: 152 Basic stats: COMPLETE Column stats: COMPLETE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + ListSink + +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +5 1 +5 2 +5 NULL +6 2 +6 NULL +7 8 +7 NULL +10 11 +10 NULL +NULL 1 +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +5 1 +5 2 +5 NULL +6 2 +6 NULL +7 8 +7 NULL +10 11 +10 NULL +NULL 1 +PREHOOK: query: EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: t_test_grouping_sets + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), b (type: int) + outputColumnNames: a, b + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: a (type: int), b (type: int), 0L (type: bigint) + minReductionHashAggr: 0.99 + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + null sort order: zzz + sort order: +++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + pruneGroupingSetId: true + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int) + null sort order: zz + sort order: ++ + Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 26 Data size: 152 Basic stats: COMPLETE Column stats: COMPLETE + Limit + Number of rows: 3 + Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 3 + Processor Tree: + ListSink + +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +5 1 +5 2 +5 NULL +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +5 1 +5 2 +5 NULL +PREHOOK: query: EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: t_test_grouping_sets + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), b (type: int) + outputColumnNames: a, b + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: a (type: int), b (type: int), 0L (type: bigint) + minReductionHashAggr: 0.99 + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + null sort order: zzz + sort order: +++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + pruneGroupingSetId: true + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int) + null sort order: zz + sort order: ++ + Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 26 Data size: 152 Basic stats: COMPLETE Column stats: COMPLETE + Limit + Number of rows: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +5 1 +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a,b LIMIT 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +5 1 +PREHOOK: query: EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a), (b)) ORDER BY a,b LIMIT 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a), (b)) ORDER BY a,b LIMIT 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: t_test_grouping_sets + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), b (type: int) + outputColumnNames: a, b + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: a (type: int), b (type: int), 0L (type: bigint) + minReductionHashAggr: 0.99 + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + null sort order: zzz + sort order: +++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + pruneGroupingSetId: true + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int) + null sort order: zz + sort order: ++ + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Limit + Number of rows: 7 + Statistics: Num rows: 7 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 7 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 7 + Processor Tree: + ListSink + +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a), (b)) ORDER BY a,b LIMIT 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a), (b)) ORDER BY a,b LIMIT 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +5 NULL +6 NULL +7 NULL +10 NULL +NULL 1 +NULL 2 +NULL 8 +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a), (b)) ORDER BY a,b LIMIT 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a), (b)) ORDER BY a,b LIMIT 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +5 NULL +6 NULL +7 NULL +10 NULL +NULL 1 +NULL 2 +NULL 8 +PREHOOK: query: EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a)) ORDER BY a DESC, b ASC LIMIT 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a)) ORDER BY a DESC, b ASC LIMIT 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: t_test_grouping_sets + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), b (type: int) + outputColumnNames: a, b + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: a (type: int), b (type: int), 0L (type: bigint) + minReductionHashAggr: 0.99 + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + null sort order: zzz + sort order: -++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + pruneGroupingSetId: true + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int) + null sort order: zz + sort order: -+ + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Limit + Number of rows: 7 + Statistics: Num rows: 7 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 7 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 7 + Processor Tree: + ListSink + +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a)) ORDER BY a DESC, b ASC LIMIT 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a)) ORDER BY a DESC, b ASC LIMIT 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +10 11 +10 NULL +7 8 +7 NULL +6 2 +6 NULL +5 1 +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a)) ORDER BY a DESC, b ASC LIMIT 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a)) ORDER BY a DESC, b ASC LIMIT 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +10 11 +10 NULL +7 8 +7 NULL +6 2 +6 NULL +5 1 +PREHOOK: query: EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (b)) ORDER BY a DESC, b ASC LIMIT 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (b)) ORDER BY a DESC, b ASC LIMIT 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: t_test_grouping_sets + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), b (type: int) + outputColumnNames: a, b + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: a (type: int), b (type: int), 0L (type: bigint) + minReductionHashAggr: 0.99 + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + null sort order: zzz + sort order: -++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + pruneGroupingSetId: true + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int) + null sort order: zz + sort order: -+ + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Limit + Number of rows: 7 + Statistics: Num rows: 7 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 7 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 7 + Processor Tree: + ListSink + +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (b)) ORDER BY a DESC, b ASC LIMIT 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (b)) ORDER BY a DESC, b ASC LIMIT 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +10 11 +7 8 +6 2 +5 1 +5 2 +NULL 1 +NULL 2 +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (b)) ORDER BY a DESC, b ASC LIMIT 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (b)) ORDER BY a DESC, b ASC LIMIT 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +10 11 +7 8 +6 2 +5 1 +5 2 +NULL 1 +NULL 2 +PREHOOK: query: EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), ()) ORDER BY a DESC, b ASC LIMIT 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), ()) ORDER BY a DESC, b ASC LIMIT 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: t_test_grouping_sets + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), b (type: int) + outputColumnNames: a, b + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: a (type: int), b (type: int), 0L (type: bigint) + minReductionHashAggr: 0.99 + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + null sort order: zzz + sort order: -++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + pruneGroupingSetId: true + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int) + null sort order: zz + sort order: -+ + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Limit + Number of rows: 7 + Statistics: Num rows: 7 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 7 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 7 + Processor Tree: + ListSink + +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), ()) ORDER BY a DESC, b ASC LIMIT 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), ()) ORDER BY a DESC, b ASC LIMIT 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +10 11 +7 8 +6 2 +5 1 +5 2 +NULL NULL +NULL NULL +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), ()) ORDER BY a DESC, b ASC LIMIT 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), ()) ORDER BY a DESC, b ASC LIMIT 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +10 11 +7 8 +6 2 +5 1 +5 2 +NULL NULL +NULL NULL +PREHOOK: query: EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((), (a,b)) ORDER BY a DESC, b ASC LIMIT 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((), (a,b)) ORDER BY a DESC, b ASC LIMIT 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: t_test_grouping_sets + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), b (type: int) + outputColumnNames: a, b + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: a (type: int), b (type: int), 0L (type: bigint) + minReductionHashAggr: 0.99 + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + null sort order: zzz + sort order: -++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + pruneGroupingSetId: true + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int) + null sort order: zz + sort order: -+ + Statistics: Num rows: 13 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Limit + Number of rows: 7 + Statistics: Num rows: 7 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 7 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 7 + Processor Tree: + ListSink + +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((), (a,b)) ORDER BY a DESC, b ASC LIMIT 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((), (a,b)) ORDER BY a DESC, b ASC LIMIT 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +10 11 +7 8 +6 2 +5 1 +5 2 +NULL NULL +NULL NULL +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((), (a,b)) ORDER BY a DESC, b ASC LIMIT 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((), (a,b)) ORDER BY a DESC, b ASC LIMIT 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +10 11 +7 8 +6 2 +5 1 +5 2 +NULL NULL +NULL NULL +PREHOOK: query: EXPLAIN +SELECT tmp.a, tmp.b, max(tmp.c) FROM + (SELECT a, b, c FROM t_test_grouping_sets GROUP BY a, b, c) tmp +GROUP BY tmp.a, tmp.b GROUPING SETS ((), (tmp.a,tmp.b)) ORDER BY tmp.a DESC, tmp.b ASC LIMIT 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN +SELECT tmp.a, tmp.b, max(tmp.c) FROM + (SELECT a, b, c FROM t_test_grouping_sets GROUP BY a, b, c) tmp +GROUP BY tmp.a, tmp.b GROUPING SETS ((), (tmp.a,tmp.b)) ORDER BY tmp.a DESC, tmp.b ASC LIMIT 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-3 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: t_test_grouping_sets + Statistics: Num rows: 13 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), b (type: int), c (type: int) + outputColumnNames: a, b, c + Statistics: Num rows: 13 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: a (type: int), b (type: int), c (type: int) + minReductionHashAggr: 0.99 + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 6 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) + null sort order: zzz + sort order: +++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: int) + Statistics: Num rows: 6 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 6 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: max(_col2) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) + minReductionHashAggr: 0.99 + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 6 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + null sort order: zzz + sort order: -++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) + Statistics: Num rows: 6 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + value expressions: _col3 (type: int) + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint) + mode: mergepartial + outputColumnNames: _col0, _col1, _col3 + Statistics: Num rows: 6 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + pruneGroupingSetId: true + Select Operator + expressions: _col0 (type: int), _col1 (type: int), _col3 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 6 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: int) + null sort order: zz + sort order: -+ + Statistics: Num rows: 6 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE + TopN Hash Memory Usage: 0.1 + value expressions: _col2 (type: int) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int), VALUE._col0 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 6 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE + Limit + Number of rows: 7 + Statistics: Num rows: 6 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 6 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 7 + Processor Tree: + ListSink + +PREHOOK: query: SELECT tmp.a, tmp.b, max(tmp.c) FROM + (SELECT a, b, c FROM t_test_grouping_sets GROUP BY a, b, c) tmp +GROUP BY tmp.a, tmp.b GROUPING SETS ((), (tmp.a,tmp.b)) ORDER BY tmp.a DESC, tmp.b ASC LIMIT 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT tmp.a, tmp.b, max(tmp.c) FROM + (SELECT a, b, c FROM t_test_grouping_sets GROUP BY a, b, c) tmp +GROUP BY tmp.a, tmp.b GROUPING SETS ((), (tmp.a,tmp.b)) ORDER BY tmp.a DESC, tmp.b ASC LIMIT 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +10 11 12 +7 8 4 +6 2 1 +5 1 2 +5 2 3 +NULL NULL 12 +NULL NULL NULL +PREHOOK: query: SELECT tmp.a, tmp.b, max(tmp.c) FROM + (SELECT a, b, c FROM t_test_grouping_sets GROUP BY a, b, c) tmp +GROUP BY tmp.a, tmp.b GROUPING SETS ((), (tmp.a,tmp.b)) ORDER BY tmp.a DESC, tmp.b ASC LIMIT 7 +PREHOOK: type: QUERY +PREHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +POSTHOOK: query: SELECT tmp.a, tmp.b, max(tmp.c) FROM + (SELECT a, b, c FROM t_test_grouping_sets GROUP BY a, b, c) tmp +GROUP BY tmp.a, tmp.b GROUPING SETS ((), (tmp.a,tmp.b)) ORDER BY tmp.a DESC, tmp.b ASC LIMIT 7 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t_test_grouping_sets +#### A masked pattern was here #### +10 11 12 +7 8 4 +6 2 1 +5 1 2 +5 2 3 +NULL NULL 12 +NULL NULL NULL +PREHOOK: query: DROP TABLE IF EXISTS t_test_grouping_sets +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@t_test_grouping_sets +PREHOOK: Output: default@t_test_grouping_sets +POSTHOOK: query: DROP TABLE IF EXISTS t_test_grouping_sets +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@t_test_grouping_sets +POSTHOOK: Output: default@t_test_grouping_sets diff --git ql/src/test/results/clientpositive/topnkey_windowing.q.out ql/src/test/results/clientpositive/topnkey_windowing.q.out index 9f64dca..58704db 100644 --- ql/src/test/results/clientpositive/topnkey_windowing.q.out +++ ql/src/test/results/clientpositive/topnkey_windowing.q.out @@ -360,8 +360,10 @@ Statistics: Num rows: 26 Data size: 1969 Basic stats: COMPLETE Column stats: COMPLETE GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: 0 (type: int), tw_value (type: double) null sort order: az + numBuckets: -1 sort order: ++ Map-reduce partition columns: 0 (type: int) Statistics: Num rows: 26 Data size: 1969 Basic stats: COMPLETE Column stats: COMPLETE @@ -459,6 +461,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 8 Data size: 457 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -469,6 +472,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:int escape.delim \ @@ -545,8 +549,10 @@ Statistics: Num rows: 26 Data size: 1969 Basic stats: COMPLETE Column stats: COMPLETE GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: 0 (type: int), tw_value (type: double) null sort order: az + numBuckets: -1 sort order: ++ Map-reduce partition columns: 0 (type: int) Statistics: Num rows: 26 Data size: 1969 Basic stats: COMPLETE Column stats: COMPLETE @@ -645,6 +651,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 8 Data size: 457 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -655,6 +662,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:int escape.delim \ diff --git ql/src/test/results/clientpositive/transform_ppr1.q.out ql/src/test/results/clientpositive/transform_ppr1.q.out index a074610..25468bc 100644 --- ql/src/test/results/clientpositive/transform_ppr1.q.out +++ ql/src/test/results/clientpositive/transform_ppr1.q.out @@ -52,6 +52,7 @@ input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types string,string,string field.delim 9 @@ -64,8 +65,10 @@ predicate: ((_col1 < 100) and (_col0 = '2008-04-08')) (type: boolean) Statistics: Num rows: 333 Data size: 120546 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 333 Data size: 120546 Basic stats: COMPLETE Column stats: COMPLETE @@ -283,6 +286,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 333 Data size: 59274 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -293,6 +297,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:string escape.delim \ diff --git ql/src/test/results/clientpositive/transform_ppr2.q.out ql/src/test/results/clientpositive/transform_ppr2.q.out index d2c0484..8aeb688 100644 --- ql/src/test/results/clientpositive/transform_ppr2.q.out +++ ql/src/test/results/clientpositive/transform_ppr2.q.out @@ -51,6 +51,7 @@ input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types string,string,string field.delim 9 @@ -63,8 +64,10 @@ predicate: (_col1 < 100) (type: boolean) Statistics: Num rows: 333 Data size: 90576 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col1 (type: string) null sort order: a + numBuckets: -1 sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 333 Data size: 90576 Basic stats: COMPLETE Column stats: COMPLETE @@ -182,6 +185,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 333 Data size: 59274 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -192,6 +196,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:string escape.delim \ diff --git ql/src/test/results/clientpositive/truncate_column_buckets.q.out ql/src/test/results/clientpositive/truncate_column_buckets.q.out index 4642c19..f44946b 100644 --- ql/src/test/results/clientpositive/truncate_column_buckets.q.out +++ ql/src/test/results/clientpositive/truncate_column_buckets.q.out @@ -30,8 +30,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@test_tab #### A masked pattern was here #### -258 -242 +248 +252 PREHOOK: query: TRUNCATE TABLE test_tab COLUMNS (value) PREHOOK: type: TRUNCATETABLE PREHOOK: Input: default@test_tab @@ -54,5 +54,5 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@test_tab #### A masked pattern was here #### -258 -242 +248 +252 diff --git ql/src/test/results/clientpositive/udf_explode.q.out ql/src/test/results/clientpositive/udf_explode.q.out index 815bef5..0143f31 100644 --- ql/src/test/results/clientpositive/udf_explode.q.out +++ ql/src/test/results/clientpositive/udf_explode.q.out @@ -76,8 +76,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -146,6 +148,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -156,6 +159,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types int:bigint escape.delim \ @@ -273,8 +277,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: string) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -343,6 +349,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -353,6 +360,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:bigint escape.delim \ diff --git ql/src/test/results/clientpositive/udtf_explode.q.out ql/src/test/results/clientpositive/udtf_explode.q.out index 66c1394..1b941b8 100644 --- ql/src/test/results/clientpositive/udtf_explode.q.out +++ ql/src/test/results/clientpositive/udtf_explode.q.out @@ -82,7 +82,9 @@ Number of rows: 3 Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -160,6 +162,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -184,8 +187,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -229,6 +234,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -239,6 +245,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types int:bigint escape.delim \ @@ -361,7 +368,9 @@ Number of rows: 3 Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 null sort order: + numBuckets: -1 sort order: Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -439,6 +448,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -463,8 +473,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: int), _col1 (type: string) null sort order: zz + numBuckets: -1 sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: string) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -508,6 +520,7 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -518,6 +531,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2 columns.types int:string:bigint escape.delim \ diff --git ql/src/test/results/clientpositive/union22.q.out ql/src/test/results/clientpositive/union22.q.out index fab4a58..de36e44 100644 --- ql/src/test/results/clientpositive/union22.q.out +++ ql/src/test/results/clientpositive/union22.q.out @@ -219,6 +219,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 221 Data size: 49306 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -360,6 +361,7 @@ Union Statistics: Num rows: 387 Data size: 108402 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -402,8 +404,10 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 1845 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 1845 Basic stats: COMPLETE Column stats: COMPLETE @@ -415,6 +419,7 @@ Union Statistics: Num rows: 387 Data size: 108402 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 1 #### A masked pattern was here #### @@ -457,8 +462,10 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 1845 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1 Data size: 1845 Basic stats: COMPLETE Column stats: COMPLETE @@ -554,6 +561,7 @@ outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 1845 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -564,6 +572,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3,_col4 columns.types struct:struct:struct:struct:string escape.delim \ @@ -631,8 +640,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 166 Data size: 30212 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 166 Data size: 30212 Basic stats: COMPLETE Column stats: COMPLETE @@ -653,8 +664,10 @@ outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 55 Data size: 14575 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 55 Data size: 14575 Basic stats: COMPLETE Column stats: COMPLETE @@ -783,6 +796,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 221 Data size: 49306 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/union24.q.out ql/src/test/results/clientpositive/union24.q.out index 03a2c2c..32a86e7 100644 --- ql/src/test/results/clientpositive/union24.q.out +++ ql/src/test/results/clientpositive/union24.q.out @@ -120,8 +120,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 51 Data size: 4845 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 51 Data size: 4845 Basic stats: COMPLETE Column stats: COMPLETE @@ -191,6 +193,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 51 Data size: 4845 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -228,6 +231,7 @@ Union Statistics: Num rows: 360 Data size: 34200 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -238,6 +242,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:bigint escape.delim \ @@ -265,6 +270,7 @@ Union Statistics: Num rows: 360 Data size: 34200 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -275,6 +281,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:bigint escape.delim \ @@ -302,6 +309,7 @@ Union Statistics: Num rows: 360 Data size: 34200 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -312,6 +320,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:bigint escape.delim \ @@ -328,6 +337,7 @@ Union Statistics: Num rows: 360 Data size: 34200 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -338,6 +348,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:bigint escape.delim \ @@ -650,8 +661,10 @@ outputColumnNames: _col0 Statistics: Num rows: 103 Data size: 8961 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 103 Data size: 8961 Basic stats: COMPLETE Column stats: COMPLETE @@ -671,8 +684,10 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE @@ -796,6 +811,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 103 Data size: 9785 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -833,6 +849,7 @@ Union Statistics: Num rows: 309 Data size: 29355 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -843,6 +860,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:bigint escape.delim \ @@ -870,6 +888,7 @@ Union Statistics: Num rows: 309 Data size: 29355 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -880,6 +899,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:bigint escape.delim \ @@ -896,6 +916,7 @@ Union Statistics: Num rows: 309 Data size: 29355 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -906,6 +927,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:bigint escape.delim \ @@ -1161,8 +1183,10 @@ outputColumnNames: _col0 Statistics: Num rows: 103 Data size: 8961 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 103 Data size: 8961 Basic stats: COMPLETE Column stats: COMPLETE @@ -1182,8 +1206,10 @@ outputColumnNames: _col0 Statistics: Num rows: 103 Data size: 8961 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 103 Data size: 8961 Basic stats: COMPLETE Column stats: COMPLETE @@ -1309,6 +1335,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 51 Data size: 4845 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1333,8 +1360,10 @@ TableScan GatherStats: false Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string) null sort order: z + numBuckets: -1 sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 51 Data size: 4845 Basic stats: COMPLETE Column stats: COMPLETE @@ -1378,6 +1407,7 @@ outputColumnNames: _col0, _col1 Statistics: Num rows: 51 Data size: 4845 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 1 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1415,6 +1445,7 @@ Union Statistics: Num rows: 257 Data size: 24415 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1425,6 +1456,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:bigint escape.delim \ @@ -1452,6 +1484,7 @@ Union Statistics: Num rows: 257 Data size: 24415 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1462,6 +1495,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:bigint escape.delim \ @@ -1478,6 +1512,7 @@ Union Statistics: Num rows: 257 Data size: 24415 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -1488,6 +1523,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1 columns.types string:bigint escape.delim \ diff --git ql/src/test/results/clientpositive/union_ppr.q.out ql/src/test/results/clientpositive/union_ppr.q.out index 29250d2..b841994 100644 --- ql/src/test/results/clientpositive/union_ppr.q.out +++ ql/src/test/results/clientpositive/union_ppr.q.out @@ -52,8 +52,10 @@ outputColumnNames: _col0, _col1, _col3 Statistics: Num rows: 666 Data size: 303696 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 666 Data size: 303696 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -78,8 +80,10 @@ outputColumnNames: _col0, _col1, _col3 Statistics: Num rows: 666 Data size: 303696 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator + bucketingVersion: 2 key expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string) null sort order: zzz + numBuckets: -1 sort order: +++ Statistics: Num rows: 666 Data size: 303696 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 @@ -195,6 +199,7 @@ outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 666 Data size: 303696 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator + bucketingVersion: 2 compressed: false GlobalTableId: 0 #### A masked pattern was here #### @@ -205,6 +210,7 @@ input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: + bucketing_version -1 columns _col0,_col1,_col2,_col3 columns.types string:string:string:string escape.delim \