diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java index 0af91bda95..b828f4cffd 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java @@ -663,8 +663,8 @@ public void testAcidInsertWithRemoveUnion() throws Exception { } String[][] expected2 = { - {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":1}\t1\t2", "warehouse/t/delta_0000001_0000001_0001/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":0}\t3\t4", "warehouse/t/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "warehouse/t/delta_0000001_0000001_0001/bucket_00000"}, + {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "warehouse/t/delta_0000001_0000001_0001/bucket_00000"}, {"{\"writeid\":1,\"bucketid\":536870914,\"rowid\":0}\t5\t6", "warehouse/t/delta_0000001_0000001_0002/bucket_00000"}, {"{\"writeid\":1,\"bucketid\":536870914,\"rowid\":1}\t7\t8", "warehouse/t/delta_0000001_0000001_0002/bucket_00000"}, {"{\"writeid\":1,\"bucketid\":536870915,\"rowid\":0}\t9\t10", "warehouse/t/delta_0000001_0000001_0003/bucket_00000"} diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java index 4033b379de..2947c167fc 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java @@ -74,8 +74,6 @@ import org.apache.hadoop.hive.llap.io.metadata.MetadataCache; import org.apache.hadoop.hive.llap.io.metadata.MetadataCache.LlapBufferOrBuffers; import org.apache.hadoop.hive.llap.io.metadata.OrcStripeMetadata; -import org.apache.hadoop.hive.ql.exec.DDLTask; -import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.HdfsUtils; import org.apache.orc.CompressionKind; import org.apache.orc.DataReader; @@ -766,7 +764,8 @@ private boolean determineRgsToRead(int rowIndexStride, if (sarg != null && rowIndexStride != 0) { sargApp = new RecordReaderImpl.SargApplier(sarg, rowIndexStride, evolution, - OrcFile.WriterVersion.from(fileMetadata.getWriterVersionNum())); + OrcFile.WriterVersion.from(OrcFile.WriterImplementation.ORC_JAVA, fileMetadata.getWriterVersionNum()), + false); } boolean hasAnyData = false; // stripeRgs should have been initialized by this time with an empty array. diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java index 1cfe92978a..bed5887022 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java @@ -563,6 +563,23 @@ public void setCurrentStripeOffsets(long currentKnownTornStart, public CompressionCodec getCompressionCodec() { return null; } + + @Override + public long getFileBytes(int column) { + long size = 0L; + List l = this.colStreams.get(column); + if (l == null) { + return size; + } + for (CacheOutputReceiver c : l) { + if (c.getData() != null && !c.suppressed && c.getName().getArea() != StreamName.Area.INDEX) { + for (MemoryBuffer buffer : c.getData()) { + size += buffer.getByteBufferRaw().limit(); + } + } + } + return size; + } } private interface CacheOutput { diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/ConsumerFileMetadata.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/ConsumerFileMetadata.java index bf139c071c..89ad4aa8cd 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/ConsumerFileMetadata.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/ConsumerFileMetadata.java @@ -20,6 +20,7 @@ import java.util.List; import org.apache.orc.CompressionKind; +import org.apache.orc.FileFormatException; import org.apache.orc.OrcProto.Type; import org.apache.orc.TypeDescription; @@ -27,5 +28,5 @@ int getStripeCount(); CompressionKind getCompressionKind(); List getTypes(); - TypeDescription getSchema(); + TypeDescription getSchema() throws FileFormatException; } diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcFileMetadata.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcFileMetadata.java index 0012afb3ef..5cd6f9fa2c 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcFileMetadata.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcFileMetadata.java @@ -22,7 +22,9 @@ import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; import org.apache.orc.CompressionKind; +import org.apache.orc.FileFormatException; import org.apache.orc.FileMetadata; +import org.apache.orc.OrcFile; import org.apache.orc.OrcProto; import org.apache.orc.OrcProto.StripeStatistics; import org.apache.orc.OrcUtils; @@ -123,6 +125,11 @@ public int getMetadataSize() { return metadataSize; } + @Override + public int getWriterImplementation() { + return OrcFile.WriterImplementation.ORC_JAVA.getId(); + } + @Override public int getWriterVersionNum() { return writerVersionNum; @@ -153,7 +160,7 @@ public int getStripeCount() { return stripes.size(); } - public TypeDescription getSchema() { + public TypeDescription getSchema() throws FileFormatException { return OrcUtils.convertTypeFromProtobuf(this.types, 0); } } diff --git a/pom.xml b/pom.xml index 1f43c416db..cb4b746643 100644 --- a/pom.xml +++ b/pom.xml @@ -184,7 +184,7 @@ 0.9.3 2.10.0 2.3 - 1.4.3 + 1.5.0 1.10.19 2.0.0-M5 4.1.17.Final diff --git a/ql/pom.xml b/ql/pom.xml index 06124f7387..d52c307cff 100644 --- a/ql/pom.xml +++ b/ql/pom.xml @@ -943,6 +943,7 @@ org.apache.hive:hive-spark-client org.apache.hive:hive-storage-api org.apache.orc:orc-core + org.apache.orc:orc-shims org.apache.orc:orc-tools joda-time:joda-time diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java index 049dbd38e7..de4fa8b674 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java @@ -29,7 +29,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.NavigableMap; @@ -116,6 +115,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hive.common.util.Ref; import org.apache.orc.ColumnStatistics; +import org.apache.orc.FileFormatException; import org.apache.orc.OrcProto; import org.apache.orc.OrcProto.Footer; import org.apache.orc.OrcProto.Type; @@ -2167,7 +2167,7 @@ static Reader createOrcReaderForSplit(Configuration conf, OrcSplit orcSplit) thr public static boolean[] pickStripesViaTranslatedSarg(SearchArgument sarg, OrcFile.WriterVersion writerVersion, List types, - List stripeStats, int stripeCount) { + List stripeStats, int stripeCount) throws FileFormatException { LOG.info("Translated ORC pushdown predicate: " + sarg); assert sarg != null; if (stripeStats == null || writerVersion == OrcFile.WriterVersion.ORIGINAL) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java index 646b214249..c9078be208 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java @@ -23,14 +23,12 @@ import java.util.Arrays; import java.util.List; -import org.apache.curator.shaded.com.google.common.base.Preconditions; import org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch; import org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch.ColumnStreamData; import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; import org.apache.hadoop.hive.ql.io.orc.encoded.Reader.OrcEncodedColumnBatch; import org.apache.orc.CompressionCodec; import org.apache.orc.TypeDescription; -import org.apache.orc.TypeDescription.Category; import org.apache.orc.impl.InStream; import org.apache.orc.impl.PositionProvider; import org.apache.orc.impl.SettableUncompressedStream; @@ -1053,7 +1051,8 @@ private DecimalStreamReader(int columnId, int precision, int scale, boolean isFileCompressed, OrcProto.ColumnEncoding encoding, TreeReaderFactory.Context context, List vectors) throws IOException { - super(columnId, presentStream, valueStream, scaleStream, encoding, context); + super(columnId, presentStream, valueStream, scaleStream, encoding, + precision, scale, context); this._isFileCompressed = isFileCompressed; this._presentStream = presentStream; this._valueStream = valueStream; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java index af43b14abf..c15c5a6220 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java @@ -194,8 +194,8 @@ public void testCTAS() throws Exception { "'='true', 'transactional_properties'='default') as select a, b from " + Table.ACIDTBL); rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from myctas2 order by ROW__ID"); String expected2[][] = { - {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas2/delta_0000001_0000001_0000/bucket_00000"}, - {"{\"writeid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\t2", "warehouse/myctas2/delta_0000001_0000001_0000/bucket_00001"} + {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "warehouse/myctas2/delta_0000001_0000001_0000/bucket_00000"}, + {"{\"writeid\":1,\"bucketid\":536936448,\"rowid\":0}\t3\t4", "warehouse/myctas2/delta_0000001_0000001_0000/bucket_00001"} }; checkExpected(rs, expected2, "Unexpected row count after ctas from acid table"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java index b28c126dbc..f589dc1d4e 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java @@ -2477,14 +2477,14 @@ public void testCombinationInputFormatWithAcid() throws Exception { assertEquals("mock:/combinationAcid/p=0/base_0000010/bucket_00000", split.getPath().toString()); assertEquals(0, split.getStart()); - assertEquals(677, split.getLength()); + assertEquals(700, split.getLength()); split = (HiveInputFormat.HiveInputSplit) splits[1]; assertEquals("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat", split.inputFormatClassName()); assertEquals("mock:/combinationAcid/p=0/base_0000010/bucket_00001", split.getPath().toString()); assertEquals(0, split.getStart()); - assertEquals(703, split.getLength()); + assertEquals(724, split.getLength()); CombineHiveInputFormat.CombineHiveInputSplit combineSplit = (CombineHiveInputFormat.CombineHiveInputSplit) splits[2]; assertEquals(BUCKETS, combineSplit.getNumPaths()); @@ -2492,7 +2492,7 @@ public void testCombinationInputFormatWithAcid() throws Exception { assertEquals("mock:/combinationAcid/p=1/00000" + bucket + "_0", combineSplit.getPath(bucket).toString()); assertEquals(0, combineSplit.getOffset(bucket)); - assertEquals(241, combineSplit.getLength(bucket)); + assertEquals(251, combineSplit.getLength(bucket)); } String[] hosts = combineSplit.getLocations(); assertEquals(2, hosts.length); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java index ef678a8eb3..a2036f2f86 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java @@ -94,6 +94,7 @@ import org.apache.orc.TypeDescription; import org.junit.Assert; import org.junit.Before; +import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestName; @@ -698,13 +699,13 @@ public void testStringAndBinaryStatistics() throws Exception { assertEquals(3, stats[1].getNumberOfValues()); assertEquals(15, ((BinaryColumnStatistics) stats[1]).getSum()); - assertEquals("count: 3 hasNull: true sum: 15", stats[1].toString()); + assertEquals("count: 3 hasNull: true bytesOnDisk: 28 sum: 15", stats[1].toString()); assertEquals(3, stats[2].getNumberOfValues()); assertEquals("bar", ((StringColumnStatistics) stats[2]).getMinimum()); assertEquals("hi", ((StringColumnStatistics) stats[2]).getMaximum()); assertEquals(8, ((StringColumnStatistics) stats[2]).getSum()); - assertEquals("count: 3 hasNull: true min: bar max: hi sum: 8", + assertEquals("count: 3 hasNull: true bytesOnDisk: 22 min: bar max: hi sum: 8", stats[2].toString()); // check the inspectors @@ -917,13 +918,13 @@ public void test1() throws Exception { assertEquals(2, stats[1].getNumberOfValues()); assertEquals(1, ((BooleanColumnStatistics) stats[1]).getFalseCount()); assertEquals(1, ((BooleanColumnStatistics) stats[1]).getTrueCount()); - assertEquals("count: 2 hasNull: false true: 1", stats[1].toString()); + assertEquals("count: 2 hasNull: false bytesOnDisk: 5 true: 1", stats[1].toString()); assertEquals(2048, ((IntegerColumnStatistics) stats[3]).getMaximum()); assertEquals(1024, ((IntegerColumnStatistics) stats[3]).getMinimum()); assertEquals(true, ((IntegerColumnStatistics) stats[3]).isSumDefined()); assertEquals(3072, ((IntegerColumnStatistics) stats[3]).getSum()); - assertEquals("count: 2 hasNull: false min: 1024 max: 2048 sum: 3072", + assertEquals("count: 2 hasNull: false bytesOnDisk: 9 min: 1024 max: 2048 sum: 3072", stats[3].toString()); StripeStatistics ss = reader.getStripeStatistics().get(0); @@ -935,10 +936,10 @@ public void test1() throws Exception { assertEquals(-15.0, ((DoubleColumnStatistics) stats[7]).getMinimum()); assertEquals(-5.0, ((DoubleColumnStatistics) stats[7]).getMaximum()); assertEquals(-20.0, ((DoubleColumnStatistics) stats[7]).getSum(), 0.00001); - assertEquals("count: 2 hasNull: false min: -15.0 max: -5.0 sum: -20.0", + assertEquals("count: 2 hasNull: false bytesOnDisk: 15 min: -15.0 max: -5.0 sum: -20.0", stats[7].toString()); - assertEquals("count: 2 hasNull: false min: bye max: hi sum: 5", stats[9].toString()); + assertEquals("count: 2 hasNull: false bytesOnDisk: 14 min: bye max: hi sum: 5", stats[9].toString()); // check the inspectors StructObjectInspector readerInspector = @@ -1656,6 +1657,7 @@ public void testWithoutIndex() throws Exception { rows.close(); } + @Ignore("ORC-367. Will be re-enabled in HIVE-19669") @Test public void testSeek() throws Exception { ObjectInspector inspector; @@ -1711,6 +1713,7 @@ public void testSeek() throws Exception { row = (OrcStruct) rows.next(row); BigRow expected = createRandomRow(intValues, doubleValues, stringValues, byteValues, words, i); + //assertEquals(expected, row); assertEquals(expected.boolean1.booleanValue(), ((BooleanWritable) row.getFieldValue(0)).get()); assertEquals(expected.byte1.byteValue(), @@ -1770,6 +1773,7 @@ public void testSeek() throws Exception { rows.close(); } + @Ignore("ORC-367. Will be re-enabled in HIVE-19669") @Test public void testZeroCopySeek() throws Exception { ObjectInspector inspector; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSerDeStats.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSerDeStats.java index 1533ffae2a..81d2e2d7b9 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSerDeStats.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSerDeStats.java @@ -231,13 +231,13 @@ public void testStringAndBinaryStatistics() throws Exception { assertEquals(3, stats[1].getNumberOfValues()); assertEquals(15, ((BinaryColumnStatistics) stats[1]).getSum()); - assertEquals("count: 3 hasNull: true sum: 15", stats[1].toString()); + assertEquals("count: 3 hasNull: true bytesOnDisk: 28 sum: 15", stats[1].toString()); assertEquals(3, stats[2].getNumberOfValues()); assertEquals("bar", ((StringColumnStatistics) stats[2]).getMinimum()); assertEquals("hi", ((StringColumnStatistics) stats[2]).getMaximum()); assertEquals(8, ((StringColumnStatistics) stats[2]).getSum()); - assertEquals("count: 3 hasNull: true min: bar max: hi sum: 8", + assertEquals("count: 3 hasNull: true bytesOnDisk: 22 min: bar max: hi sum: 8", stats[2].toString()); // check the inspectors @@ -455,13 +455,13 @@ public void testOrcSerDeStatsComplex() throws Exception { assertEquals(2, stats[1].getNumberOfValues()); assertEquals(1, ((BooleanColumnStatistics) stats[1]).getFalseCount()); assertEquals(1, ((BooleanColumnStatistics) stats[1]).getTrueCount()); - assertEquals("count: 2 hasNull: false true: 1", stats[1].toString()); + assertEquals("count: 2 hasNull: false bytesOnDisk: 5 true: 1", stats[1].toString()); assertEquals(2048, ((IntegerColumnStatistics) stats[3]).getMaximum()); assertEquals(1024, ((IntegerColumnStatistics) stats[3]).getMinimum()); assertEquals(true, ((IntegerColumnStatistics) stats[3]).isSumDefined()); assertEquals(3072, ((IntegerColumnStatistics) stats[3]).getSum()); - assertEquals("count: 2 hasNull: false min: 1024 max: 2048 sum: 3072", + assertEquals("count: 2 hasNull: false bytesOnDisk: 9 min: 1024 max: 2048 sum: 3072", stats[3].toString()); assertEquals(Long.MAX_VALUE, @@ -469,16 +469,16 @@ public void testOrcSerDeStatsComplex() throws Exception { assertEquals(Long.MAX_VALUE, ((IntegerColumnStatistics) stats[5]).getMinimum()); assertEquals(false, ((IntegerColumnStatistics) stats[5]).isSumDefined()); - assertEquals("count: 2 hasNull: false min: 9223372036854775807 max: 9223372036854775807", + assertEquals("count: 2 hasNull: false bytesOnDisk: 12 min: 9223372036854775807 max: 9223372036854775807", stats[5].toString()); assertEquals(-15.0, ((DoubleColumnStatistics) stats[7]).getMinimum()); assertEquals(-5.0, ((DoubleColumnStatistics) stats[7]).getMaximum()); assertEquals(-20.0, ((DoubleColumnStatistics) stats[7]).getSum(), 0.00001); - assertEquals("count: 2 hasNull: false min: -15.0 max: -5.0 sum: -20.0", + assertEquals("count: 2 hasNull: false bytesOnDisk: 15 min: -15.0 max: -5.0 sum: -20.0", stats[7].toString()); - assertEquals("count: 2 hasNull: false min: bye max: hi sum: 5", stats[9].toString()); + assertEquals("count: 2 hasNull: false bytesOnDisk: 14 min: bye max: hi sum: 5", stats[9].toString()); } @Test @@ -548,13 +548,13 @@ public void testOrcSerDeStatsComplexOldFormat() throws Exception { assertEquals(2, stats[1].getNumberOfValues()); assertEquals(1, ((BooleanColumnStatistics) stats[1]).getFalseCount()); assertEquals(1, ((BooleanColumnStatistics) stats[1]).getTrueCount()); - assertEquals("count: 2 hasNull: false true: 1", stats[1].toString()); + assertEquals("count: 2 hasNull: false bytesOnDisk: 5 true: 1", stats[1].toString()); assertEquals(2048, ((IntegerColumnStatistics) stats[3]).getMaximum()); assertEquals(1024, ((IntegerColumnStatistics) stats[3]).getMinimum()); assertEquals(true, ((IntegerColumnStatistics) stats[3]).isSumDefined()); assertEquals(3072, ((IntegerColumnStatistics) stats[3]).getSum()); - assertEquals("count: 2 hasNull: false min: 1024 max: 2048 sum: 3072", + assertEquals("count: 2 hasNull: false bytesOnDisk: 8 min: 1024 max: 2048 sum: 3072", stats[3].toString()); assertEquals(Long.MAX_VALUE, @@ -562,22 +562,22 @@ public void testOrcSerDeStatsComplexOldFormat() throws Exception { assertEquals(Long.MAX_VALUE, ((IntegerColumnStatistics) stats[5]).getMinimum()); assertEquals(false, ((IntegerColumnStatistics) stats[5]).isSumDefined()); - assertEquals("count: 2 hasNull: false min: 9223372036854775807 max: 9223372036854775807", + assertEquals("count: 2 hasNull: false bytesOnDisk: 12 min: 9223372036854775807 max: 9223372036854775807", stats[5].toString()); assertEquals(-15.0, ((DoubleColumnStatistics) stats[7]).getMinimum()); assertEquals(-5.0, ((DoubleColumnStatistics) stats[7]).getMaximum()); assertEquals(-20.0, ((DoubleColumnStatistics) stats[7]).getSum(), 0.00001); - assertEquals("count: 2 hasNull: false min: -15.0 max: -5.0 sum: -20.0", + assertEquals("count: 2 hasNull: false bytesOnDisk: 15 min: -15.0 max: -5.0 sum: -20.0", stats[7].toString()); assertEquals(5, ((BinaryColumnStatistics) stats[8]).getSum()); - assertEquals("count: 2 hasNull: false sum: 5", stats[8].toString()); + assertEquals("count: 2 hasNull: false bytesOnDisk: 14 sum: 5", stats[8].toString()); assertEquals("bye", ((StringColumnStatistics) stats[9]).getMinimum()); assertEquals("hi", ((StringColumnStatistics) stats[9]).getMaximum()); assertEquals(5, ((StringColumnStatistics) stats[9]).getSum()); - assertEquals("count: 2 hasNull: false min: bye max: hi sum: 5", stats[9].toString()); + assertEquals("count: 2 hasNull: false bytesOnDisk: 20 min: bye max: hi sum: 5", stats[9].toString()); } @Test(expected = ClassCastException.class) diff --git a/ql/src/test/results/clientpositive/acid_mapjoin.q.out b/ql/src/test/results/clientpositive/acid_mapjoin.q.out index 76a781e94e..5569a035e2 100644 --- a/ql/src/test/results/clientpositive/acid_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/acid_mapjoin.q.out @@ -73,21 +73,21 @@ STAGE PLANS: Stage: Stage-5 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:acid1 + $hdt$_1:acid2 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:acid1 + $hdt$_1:acid2 TableScan - alias: acid1 - Statistics: Num rows: 316 Data size: 1265 Basic stats: COMPLETE Column stats: NONE + alias: acid2 + Statistics: Num rows: 210 Data size: 840 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 316 Data size: 1265 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 210 Data size: 840 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int) outputColumnNames: _col0 - Statistics: Num rows: 316 Data size: 1265 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 210 Data size: 840 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator keys: 0 _col0 (type: int) @@ -97,15 +97,15 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: acid2 - Statistics: Num rows: 210 Data size: 840 Basic stats: COMPLETE Column stats: NONE + alias: acid1 + Statistics: Num rows: 316 Data size: 1265 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 210 Data size: 840 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 316 Data size: 1265 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int) outputColumnNames: _col0 - Statistics: Num rows: 210 Data size: 840 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 316 Data size: 1265 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 diff --git a/ql/src/test/results/clientpositive/acid_nullscan.q.out b/ql/src/test/results/clientpositive/acid_nullscan.q.out index 6dad4974ae..ccd33ae785 100644 --- a/ql/src/test/results/clientpositive/acid_nullscan.q.out +++ b/ql/src/test/results/clientpositive/acid_nullscan.q.out @@ -42,12 +42,12 @@ STAGE PLANS: Map Operator Tree: TableScan alias: acid_vectorized_n1 - Statistics: Num rows: 88 Data size: 25400 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 90 Data size: 26090 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: false (type: boolean) - Statistics: Num rows: 1 Data size: 288 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 289 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(a) mode: hash @@ -82,7 +82,7 @@ STAGE PLANS: serialization.ddl struct acid_vectorized_n1 { i32 a, string b} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - totalSize 2540 + totalSize 2609 transactional true transactional_properties default #### A masked pattern was here #### @@ -104,7 +104,7 @@ STAGE PLANS: serialization.ddl struct acid_vectorized_n1 { i32 a, string b} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2540 + totalSize 2609 transactional true transactional_properties default #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/acid_table_stats.q.out b/ql/src/test/results/clientpositive/acid_table_stats.q.out index 2596922fff..b266794b0d 100644 --- a/ql/src/test/results/clientpositive/acid_table_stats.q.out +++ b/ql/src/test/results/clientpositive/acid_table_stats.q.out @@ -94,7 +94,7 @@ Table: acid #### A masked pattern was here #### Partition Parameters: numFiles 2 - totalSize 4009 + totalSize 4064 #### A masked pattern was here #### # Storage Information @@ -132,9 +132,9 @@ STAGE PLANS: Map Operator Tree: TableScan alias: acid - Statistics: Num rows: 82 Data size: 40090 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 83 Data size: 40640 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 82 Data size: 40090 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 83 Data size: 40640 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -210,7 +210,7 @@ Partition Parameters: numFiles 2 numRows 1000 rawDataSize 208000 - totalSize 4009 + totalSize 4064 #### A masked pattern was here #### # Storage Information @@ -261,7 +261,7 @@ Partition Parameters: numFiles 2 numRows 1000 rawDataSize 208000 - totalSize 4009 + totalSize 4064 #### A masked pattern was here #### # Storage Information @@ -385,7 +385,7 @@ Table: acid #### A masked pattern was here #### Partition Parameters: numFiles 4 - totalSize 8011 + totalSize 8119 #### A masked pattern was here #### # Storage Information @@ -432,7 +432,7 @@ Partition Parameters: numFiles 4 numRows 2000 rawDataSize 416000 - totalSize 8011 + totalSize 8119 #### A masked pattern was here #### # Storage Information @@ -667,7 +667,7 @@ Partition Parameters: numFiles 2 numRows 1000 rawDataSize 176000 - totalSize 2979 + totalSize 3008 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/annotate_stats_part.q.out b/ql/src/test/results/clientpositive/annotate_stats_part.q.out index 9e45101fe6..18e8161bb8 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_part.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_part.q.out @@ -90,11 +90,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc_n4 - Statistics: Num rows: 18 Data size: 14640 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 20 Data size: 15680 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 18 Data size: 6840 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 20 Data size: 7600 Basic stats: COMPLETE Column stats: PARTIAL ListSink PREHOOK: query: analyze table loc_orc_n4 partition(year='2001') compute statistics @@ -121,11 +121,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc_n4 - Statistics: Num rows: 8 Data size: 5048 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 9 Data size: 5364 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), '__HIVE_DEFAULT_PARTITION__' (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 8 Data size: 5048 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 9 Data size: 5364 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: explain select * from loc_orc_n4 diff --git a/ql/src/test/results/clientpositive/annotate_stats_table.q.out b/ql/src/test/results/clientpositive/annotate_stats_table.q.out index b502957e96..96042084df 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_table.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_table.q.out @@ -81,11 +81,11 @@ STAGE PLANS: Processor Tree: TableScan alias: emp_orc - Statistics: Num rows: 13 Data size: 2444 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 2632 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: lastname (type: string), deptid (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 2444 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 14 Data size: 2632 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: analyze table emp_orc compute statistics diff --git a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out index 5be906eee4..d651887de4 100644 --- a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out +++ b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out @@ -200,7 +200,7 @@ Table Type: MANAGED_TABLE Table Parameters: bucketing_version 2 numFiles 2 - totalSize 1856 + totalSize 1907 transactional true transactional_properties default #### A masked pattern was here #### @@ -243,7 +243,7 @@ Table Parameters: COLUMN_STATS_ACCURATE {} bucketing_version 2 numFiles 4 - totalSize 3000 + totalSize 3091 transactional true transactional_properties default #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out b/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out index a717b80d04..a8548c2578 100644 --- a/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out +++ b/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out @@ -69,7 +69,7 @@ bucketing_version 2 numFiles 1 numRows 2 rawDataSize 408 -totalSize 453 +totalSize 457 #### A masked pattern was here #### PREHOOK: query: create materialized view if not exists cmv_mat_view2_n4 enable rewrite as select a, c from cmv_basetable_n10 where a = 3 @@ -102,7 +102,7 @@ bucketing_version 2 numFiles 1 numRows 2 rawDataSize 232 -totalSize 322 +totalSize 326 #### A masked pattern was here #### PREHOOK: query: explain select a, c from cmv_basetable_n10 where a = 3 diff --git a/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out b/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out index fa58add84d..aafa3867e1 100644 --- a/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out +++ b/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out @@ -42,7 +42,7 @@ Table Parameters: numFiles 2 numRows 3 rawDataSize 24 - totalSize 547 + totalSize 567 #### A masked pattern was here #### # Storage Information @@ -87,7 +87,7 @@ Table Parameters: numFiles 2 numRows 3 rawDataSize 24 - totalSize 547 + totalSize 567 #### A masked pattern was here #### # Storage Information @@ -177,7 +177,7 @@ Table Parameters: numFiles 2 numRows 3 rawDataSize 24 - totalSize 547 + totalSize 567 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/deleteAnalyze.q.out b/ql/src/test/results/clientpositive/deleteAnalyze.q.out index d98114bff1..0661885180 100644 --- a/ql/src/test/results/clientpositive/deleteAnalyze.q.out +++ b/ql/src/test/results/clientpositive/deleteAnalyze.q.out @@ -54,7 +54,7 @@ Table Parameters: numFiles 1 numRows 2 rawDataSize 634 - totalSize 578 + totalSize 579 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out index 35f4c1b50b..1f36b344d6 100644 --- a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out +++ b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out @@ -337,7 +337,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_n2 - Statistics: Num rows: 30 Data size: 360 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 31 Data size: 372 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (a = 3) (type: boolean) Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE @@ -353,7 +353,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_n2 - Statistics: Num rows: 30 Data size: 480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 31 Data size: 496 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((a = 3) and (d = 3)) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE @@ -454,7 +454,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_n2 - Statistics: Num rows: 30 Data size: 21960 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 31 Data size: 22692 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (a = 3) (type: boolean) Statistics: Num rows: 5 Data size: 3660 Basic stats: COMPLETE Column stats: NONE @@ -545,7 +545,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_n2 - Statistics: Num rows: 30 Data size: 480 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 31 Data size: 496 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((a = 3) and (d = 3)) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out index f1cd05cb9b..48a095191e 100644 --- a/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out +++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out @@ -148,7 +148,7 @@ STAGE PLANS: serialization.ddl struct date_dim_n1 { date d_date} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 193 + totalSize 199 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -194,7 +194,7 @@ STAGE PLANS: serialization.ddl struct date_dim_n1 { date d_date} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 193 + totalSize 199 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -240,7 +240,7 @@ STAGE PLANS: serialization.ddl struct date_dim_n1 { date d_date} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 193 + totalSize 199 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -286,7 +286,7 @@ STAGE PLANS: serialization.ddl struct date_dim_n1 { date d_date} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 193 + totalSize 199 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out index bec6dd4a0e..f80599db84 100644 --- a/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out +++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out @@ -135,7 +135,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 369 + totalSize 383 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -181,7 +181,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 389 + totalSize 404 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -250,7 +250,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 369 + totalSize 383 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -296,7 +296,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 389 + totalSize 404 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -455,7 +455,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 281 + totalSize 291 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -502,7 +502,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 283 + totalSize 293 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -549,7 +549,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 265 + totalSize 275 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -596,7 +596,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 265 + totalSize 275 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -666,7 +666,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 281 + totalSize 291 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -713,7 +713,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 283 + totalSize 293 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -760,7 +760,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 265 + totalSize 275 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -807,7 +807,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 265 + totalSize 275 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out index 1a13b21e4e..e1024bebdb 100644 --- a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out +++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out @@ -161,7 +161,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 369 + totalSize 383 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -207,7 +207,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 390 + totalSize 405 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -253,7 +253,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 410 + totalSize 426 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -299,7 +299,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 419 + totalSize 433 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -368,7 +368,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 369 + totalSize 383 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -414,7 +414,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 390 + totalSize 405 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -460,7 +460,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 410 + totalSize 426 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -506,7 +506,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 419 + totalSize 433 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -603,7 +603,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 369 + totalSize 383 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -649,7 +649,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 390 + totalSize 405 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -695,7 +695,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 410 + totalSize 426 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -741,7 +741,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 419 + totalSize 433 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -810,7 +810,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 369 + totalSize 383 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -856,7 +856,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 390 + totalSize 405 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -902,7 +902,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 410 + totalSize 426 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -948,7 +948,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 419 + totalSize 433 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1100,7 +1100,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 286 + totalSize 296 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1147,7 +1147,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 295 + totalSize 305 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1194,7 +1194,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 302 + totalSize 312 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1241,7 +1241,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 281 + totalSize 291 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1288,7 +1288,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 276 + totalSize 286 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1335,7 +1335,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 288 + totalSize 298 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1382,7 +1382,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 297 + totalSize 307 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1429,7 +1429,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 265 + totalSize 275 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1476,7 +1476,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 265 + totalSize 275 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1523,7 +1523,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 298 + totalSize 308 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1570,7 +1570,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 295 + totalSize 305 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1640,7 +1640,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 286 + totalSize 296 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1687,7 +1687,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 295 + totalSize 305 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1734,7 +1734,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 302 + totalSize 312 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1781,7 +1781,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 281 + totalSize 291 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1828,7 +1828,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 276 + totalSize 286 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1875,7 +1875,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 288 + totalSize 298 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1922,7 +1922,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 297 + totalSize 307 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1969,7 +1969,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 265 + totalSize 275 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2016,7 +2016,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 265 + totalSize 275 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2063,7 +2063,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 298 + totalSize 308 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2110,7 +2110,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 295 + totalSize 305 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde diff --git a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out index c89c22ceba..e9fef82e13 100644 --- a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out +++ b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out @@ -45,7 +45,7 @@ STAGE PLANS: alias: acidtbldefault filterExpr: (a = 1) (type: boolean) buckets included: [13,] of 16 - Statistics: Num rows: 1837 Data size: 6988 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1854 Data size: 7052 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false @@ -102,7 +102,7 @@ STAGE PLANS: serialization.ddl struct acidtbldefault { i32 a} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 34540 + totalSize 34863 transactional true transactional_properties default #### A masked pattern was here #### @@ -125,7 +125,7 @@ STAGE PLANS: serialization.ddl struct acidtbldefault { i32 a} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 34540 + totalSize 34863 transactional true transactional_properties default #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out b/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out index 38a97700e6..dafd5d9756 100644 --- a/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out +++ b/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out @@ -665,22 +665,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over10k_orc_bucketed - Statistics: Num rows: 1229 Data size: 703430 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1241 Data size: 710230 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ROW__ID (type: struct) outputColumnNames: ROW__ID - Statistics: Num rows: 1229 Data size: 703430 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1241 Data size: 710230 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() keys: ROW__ID (type: struct) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 614 Data size: 51576 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 620 Data size: 52080 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: _col0 (type: struct) - Statistics: Num rows: 614 Data size: 51576 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 620 Data size: 52080 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) Execution mode: llap LLAP IO: may be used (ACID table) @@ -692,13 +692,13 @@ STAGE PLANS: keys: KEY._col0 (type: struct) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 614 Data size: 51576 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 620 Data size: 52080 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col1 > 1L) (type: boolean) - Statistics: Num rows: 204 Data size: 17136 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 206 Data size: 17304 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 204 Data size: 17136 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 206 Data size: 17304 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/alter_merge_orc.q.out b/ql/src/test/results/clientpositive/llap/alter_merge_orc.q.out index 7b33e8e93d..cf9094061e 100644 --- a/ql/src/test/results/clientpositive/llap/alter_merge_orc.q.out +++ b/ql/src/test/results/clientpositive/llap/alter_merge_orc.q.out @@ -48,9 +48,9 @@ columns:struct columns { i32 key, string value} partitioned:false partitionColumns: totalNumberFiles:3 -totalFileSize:7545 -maxFileSize:2515 -minFileSize:2515 +totalFileSize:7590 +maxFileSize:2530 +minFileSize:2530 #### A masked pattern was here #### PREHOOK: query: select count(1) from src_orc_merge_test @@ -91,9 +91,9 @@ columns:struct columns { i32 key, string value} partitioned:false partitionColumns: totalNumberFiles:1 -totalFileSize:7198 -maxFileSize:7198 -minFileSize:7198 +totalFileSize:7214 +maxFileSize:7214 +minFileSize:7214 #### A masked pattern was here #### PREHOOK: query: select count(1) from src_orc_merge_test @@ -171,9 +171,9 @@ columns:struct columns { i32 key, string value} partitioned:true partitionColumns:struct partition_columns { string ds} totalNumberFiles:3 -totalFileSize:7545 -maxFileSize:2515 -minFileSize:2515 +totalFileSize:7590 +maxFileSize:2530 +minFileSize:2530 #### A masked pattern was here #### PREHOOK: query: select count(1) from src_orc_merge_test_part_n2 @@ -218,9 +218,9 @@ columns:struct columns { i32 key, string value} partitioned:true partitionColumns:struct partition_columns { string ds} totalNumberFiles:1 -totalFileSize:7198 -maxFileSize:7198 -minFileSize:7198 +totalFileSize:7214 +maxFileSize:7214 +minFileSize:7214 #### A masked pattern was here #### PREHOOK: query: select count(1) from src_orc_merge_test_part_n2 diff --git a/ql/src/test/results/clientpositive/llap/alter_merge_stats_orc.q.out b/ql/src/test/results/clientpositive/llap/alter_merge_stats_orc.q.out index c8b831df79..5ed7d70582 100644 --- a/ql/src/test/results/clientpositive/llap/alter_merge_stats_orc.q.out +++ b/ql/src/test/results/clientpositive/llap/alter_merge_stats_orc.q.out @@ -48,9 +48,9 @@ columns:struct columns { i32 key, string value} partitioned:false partitionColumns: totalNumberFiles:3 -totalFileSize:7545 -maxFileSize:2515 -minFileSize:2515 +totalFileSize:7590 +maxFileSize:2530 +minFileSize:2530 #### A masked pattern was here #### PREHOOK: query: desc extended src_orc_merge_test_stat @@ -93,7 +93,7 @@ Table Parameters: numFiles 3 numRows 1500 rawDataSize 141000 - totalSize 7545 + totalSize 7590 #### A masked pattern was here #### # Storage Information @@ -144,7 +144,7 @@ Table Parameters: numFiles 1 numRows 1500 rawDataSize 141000 - totalSize 7198 + totalSize 7214 #### A masked pattern was here #### # Storage Information @@ -214,9 +214,9 @@ columns:struct columns { i32 key, string value} partitioned:true partitionColumns:struct partition_columns { string ds} totalNumberFiles:3 -totalFileSize:7545 -maxFileSize:2515 -minFileSize:2515 +totalFileSize:7590 +maxFileSize:2530 +minFileSize:2530 #### A masked pattern was here #### PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011') @@ -243,7 +243,7 @@ Partition Parameters: numFiles 3 numRows 1500 rawDataSize 141000 - totalSize 7545 + totalSize 7590 #### A masked pattern was here #### # Storage Information @@ -290,7 +290,7 @@ Partition Parameters: numFiles 3 numRows 1500 rawDataSize 141000 - totalSize 7545 + totalSize 7590 #### A masked pattern was here #### # Storage Information @@ -345,7 +345,7 @@ Partition Parameters: numFiles 1 numRows 1500 rawDataSize 141000 - totalSize 7198 + totalSize 7214 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/llap/column_table_stats_orc.q.out b/ql/src/test/results/clientpositive/llap/column_table_stats_orc.q.out index 11a9c0ecbe..658b05702f 100644 --- a/ql/src/test/results/clientpositive/llap/column_table_stats_orc.q.out +++ b/ql/src/test/results/clientpositive/llap/column_table_stats_orc.q.out @@ -42,7 +42,7 @@ Table Parameters: numFiles 1 numRows 1 rawDataSize 170 - totalSize 273 + totalSize 283 #### A masked pattern was here #### # Storage Information @@ -120,7 +120,7 @@ STAGE PLANS: serialization.ddl struct s_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 273 + totalSize 283 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -142,7 +142,7 @@ STAGE PLANS: serialization.ddl struct s_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 273 + totalSize 283 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.s_n0 @@ -222,7 +222,7 @@ Table Parameters: numFiles 1 numRows 1 rawDataSize 170 - totalSize 273 + totalSize 283 #### A masked pattern was here #### # Storage Information @@ -299,7 +299,7 @@ Table Parameters: numPartitions 2 numRows 2 rawDataSize 340 - totalSize 546 + totalSize 566 #### A masked pattern was here #### # Storage Information @@ -380,7 +380,7 @@ STAGE PLANS: serialization.ddl struct spart_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 273 + totalSize 283 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -425,7 +425,7 @@ STAGE PLANS: serialization.ddl struct spart_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 273 + totalSize 283 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -544,7 +544,7 @@ Table Parameters: numPartitions 2 numRows 2 rawDataSize 340 - totalSize 546 + totalSize 566 #### A masked pattern was here #### # Storage Information @@ -582,7 +582,7 @@ Partition Parameters: numFiles 1 numRows 1 rawDataSize 170 - totalSize 273 + totalSize 283 #### A masked pattern was here #### # Storage Information @@ -620,7 +620,7 @@ Partition Parameters: numFiles 1 numRows 1 rawDataSize 170 - totalSize 273 + totalSize 283 #### A masked pattern was here #### # Storage Information @@ -701,7 +701,7 @@ Table Parameters: numPartitions 2 numRows 2 rawDataSize 340 - totalSize 546 + totalSize 566 #### A masked pattern was here #### # Storage Information @@ -782,7 +782,7 @@ STAGE PLANS: serialization.ddl struct spart_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 273 + totalSize 283 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -896,7 +896,7 @@ Table Parameters: numPartitions 2 numRows 2 rawDataSize 340 - totalSize 546 + totalSize 566 #### A masked pattern was here #### # Storage Information @@ -934,7 +934,7 @@ Partition Parameters: numFiles 1 numRows 1 rawDataSize 170 - totalSize 273 + totalSize 283 #### A masked pattern was here #### # Storage Information @@ -972,7 +972,7 @@ Partition Parameters: numFiles 1 numRows 1 rawDataSize 170 - totalSize 273 + totalSize 283 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/llap/default_constraint.q.out b/ql/src/test/results/clientpositive/llap/default_constraint.q.out index cb69af7579..3365569d70 100644 --- a/ql/src/test/results/clientpositive/llap/default_constraint.q.out +++ b/ql/src/test/results/clientpositive/llap/default_constraint.q.out @@ -1498,7 +1498,7 @@ Table Type: MANAGED_TABLE Table Parameters: bucketing_version 2 numFiles 1 - totalSize 1070 + totalSize 1102 transactional true transactional_properties default #### A masked pattern was here #### @@ -1668,7 +1668,7 @@ Table Parameters: bucketing_version 2 #### A masked pattern was here #### numFiles 2 - totalSize 2140 + totalSize 2204 transactional true transactional_properties default #### A masked pattern was here #### @@ -1748,7 +1748,7 @@ Table Parameters: bucketing_version 2 #### A masked pattern was here #### numFiles 2 - totalSize 2140 + totalSize 2204 transactional true transactional_properties default #### A masked pattern was here #### @@ -1925,7 +1925,7 @@ Table Parameters: bucketing_version 2 #### A masked pattern was here #### numFiles 3 - totalSize 3199 + totalSize 3296 transactional true transactional_properties default #### A masked pattern was here #### @@ -2004,7 +2004,7 @@ Table Parameters: bucketing_version 2 #### A masked pattern was here #### numFiles 3 - totalSize 3199 + totalSize 3296 transactional true transactional_properties default #### A masked pattern was here #### @@ -2084,7 +2084,7 @@ Table Parameters: bucketing_version 2 #### A masked pattern was here #### numFiles 3 - totalSize 3199 + totalSize 3296 transactional true transactional_properties default #### A masked pattern was here #### @@ -2660,7 +2660,7 @@ Table Type: MANAGED_TABLE Table Parameters: bucketing_version 2 numFiles 1 - totalSize 1070 + totalSize 1102 transactional true transactional_properties default #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/llap/deleteAnalyze.q.out b/ql/src/test/results/clientpositive/llap/deleteAnalyze.q.out index a125fd6229..bf82b32046 100644 --- a/ql/src/test/results/clientpositive/llap/deleteAnalyze.q.out +++ b/ql/src/test/results/clientpositive/llap/deleteAnalyze.q.out @@ -54,7 +54,7 @@ Table Parameters: numFiles 1 numRows 2 rawDataSize 634 - totalSize 578 + totalSize 579 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction.q.out b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction.q.out index 22f2860cfa..2c448df873 100644 --- a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction.q.out +++ b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction.q.out @@ -1600,7 +1600,7 @@ STAGE PLANS: serialization.ddl struct srcpart_date_n7 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 3038 + totalSize 3052 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1644,7 +1644,7 @@ STAGE PLANS: serialization.ddl struct srcpart_date_n7 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 3038 + totalSize 3052 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1780,7 +1780,7 @@ STAGE PLANS: serialization.ddl struct srcpart_small_n3 { string key1, string value1} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 459 + totalSize 469 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde diff --git a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_user_level.q.out b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_user_level.q.out index 048712eed1..eefa592c1e 100644 --- a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_user_level.q.out +++ b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_user_level.q.out @@ -866,7 +866,7 @@ STAGE PLANS: serialization.ddl struct srcpart_date_n9 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 3038 + totalSize 3052 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -912,7 +912,7 @@ STAGE PLANS: serialization.ddl struct srcpart_date_n9 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 3038 + totalSize 3052 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1053,7 +1053,7 @@ STAGE PLANS: serialization.ddl struct srcpart_small_n4 { string key1, string value1} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 459 + totalSize 469 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out index 709100f118..29be9a4cd7 100644 --- a/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out +++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out @@ -980,7 +980,7 @@ Partition Parameters: numFiles 2 numRows 32 rawDataSize 640 - totalSize 1392 + totalSize 1428 #### A masked pattern was here #### # Storage Information @@ -1020,7 +1020,7 @@ Partition Parameters: numFiles 2 numRows 6 rawDataSize 120 - totalSize 1096 + totalSize 1132 #### A masked pattern was here #### # Storage Information @@ -1060,7 +1060,7 @@ Partition Parameters: numFiles 2 numRows 14 rawDataSize 280 - totalSize 1210 + totalSize 1246 #### A masked pattern was here #### # Storage Information @@ -1100,7 +1100,7 @@ Partition Parameters: numFiles 2 numRows 6 rawDataSize 120 - totalSize 1096 + totalSize 1132 #### A masked pattern was here #### # Storage Information @@ -1139,7 +1139,7 @@ Partition Parameters: numFiles 2 numRows 32 rawDataSize 640 - totalSize 1424 + totalSize 1460 #### A masked pattern was here #### # Storage Information @@ -1178,7 +1178,7 @@ Partition Parameters: numFiles 2 numRows 4 rawDataSize 80 - totalSize 936 + totalSize 968 #### A masked pattern was here #### # Storage Information @@ -1217,7 +1217,7 @@ Partition Parameters: numFiles 2 numRows 32 rawDataSize 640 - totalSize 1416 + totalSize 1444 #### A masked pattern was here #### # Storage Information @@ -1256,7 +1256,7 @@ Partition Parameters: numFiles 2 numRows 4 rawDataSize 80 - totalSize 944 + totalSize 978 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out index 72f3b8bf63..0b57fbc1fe 100644 --- a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out +++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out @@ -1287,7 +1287,7 @@ Partition Parameters: numFiles 1 numRows 11 rawDataSize 88 - totalSize 454 + totalSize 464 #### A masked pattern was here #### # Storage Information @@ -1345,7 +1345,7 @@ Partition Parameters: numFiles 1 numRows 13 rawDataSize 104 - totalSize 477 + totalSize 487 #### A masked pattern was here #### # Storage Information @@ -1540,7 +1540,7 @@ Partition Parameters: numFiles 1 numRows 11 rawDataSize 88 - totalSize 454 + totalSize 464 #### A masked pattern was here #### # Storage Information @@ -1598,7 +1598,7 @@ Partition Parameters: numFiles 1 numRows 13 rawDataSize 104 - totalSize 477 + totalSize 487 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out index fa51dd3e27..8dff1076eb 100644 --- a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out +++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out @@ -94,19 +94,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: acid_part - Statistics: Num rows: 158 Data size: 60174 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 160 Data size: 61011 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key = 'foo') (type: boolean) - Statistics: Num rows: 5 Data size: 1904 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ROW__ID (type: struct) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 1904 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 1904 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE Execution mode: llap LLAP IO: may be used (ACID table) Reducer 2 @@ -115,10 +115,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 5 Data size: 1904 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 1904 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -189,7 +189,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: acid_part - Statistics: Num rows: 156 Data size: 102424 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 159 Data size: 104357 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (key = 'foo') (type: boolean) Statistics: Num rows: 5 Data size: 1355 Basic stats: COMPLETE Column stats: PARTIAL @@ -380,19 +380,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: acid_part_sdpo - Statistics: Num rows: 173 Data size: 66062 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 176 Data size: 67083 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key = 'foo') (type: boolean) - Statistics: Num rows: 5 Data size: 1909 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ROW__ID (type: struct) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 1909 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 1909 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE Execution mode: llap LLAP IO: may be used (ACID table) Reducer 2 @@ -401,10 +401,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 5 Data size: 1909 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 1909 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -475,7 +475,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: acid_part_sdpo - Statistics: Num rows: 168 Data size: 110259 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 171 Data size: 112202 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (key = 'foo') (type: boolean) Statistics: Num rows: 5 Data size: 1355 Basic stats: COMPLETE Column stats: PARTIAL @@ -675,19 +675,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: acid_2l_part - Statistics: Num rows: 155 Data size: 59623 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 157 Data size: 60537 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key = 'foo') (type: boolean) - Statistics: Num rows: 5 Data size: 1923 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ROW__ID (type: struct) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 1923 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 1923 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE Execution mode: llap LLAP IO: may be used (ACID table) Reducer 2 @@ -696,10 +696,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), 11 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 5 Data size: 1923 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 1923 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1095,19 +1095,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: acid_2l_part_sdpo - Statistics: Num rows: 155 Data size: 59623 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 157 Data size: 60537 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key = 'foo') (type: boolean) - Statistics: Num rows: 5 Data size: 1923 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ROW__ID (type: struct) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 1923 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 1923 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE Execution mode: llap LLAP IO: may be used (ACID table) Reducer 2 @@ -1116,10 +1116,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), 11 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 5 Data size: 1923 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 1923 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1515,7 +1515,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: acid_2l_part_sdpo_no_cp - Statistics: Num rows: 95 Data size: 81448 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 97 Data size: 82932 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: (key = 'foo') (type: boolean) Statistics: Num rows: 5 Data size: 1860 Basic stats: COMPLETE Column stats: PARTIAL diff --git a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out index 5a9d263267..84477c337f 100644 --- a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out +++ b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out @@ -3233,19 +3233,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: acid_uami_n1 - Statistics: Num rows: 262 Data size: 82000 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 267 Data size: 83640 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((de = 109.23) or (de = 119.23)) and enforce_constraint(vc is not null)) (type: boolean) - Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1566 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ROW__ID (type: struct), i (type: int), vc (type: varchar(128)) outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1566 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1566 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int), _col3 (type: varchar(128)) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) @@ -3255,10 +3255,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: int), 3.14 (type: decimal(5,2)), VALUE._col1 (type: varchar(128)) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1566 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 1566 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -3326,7 +3326,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: acid_uami_n1 - Statistics: Num rows: 300 Data size: 93808 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 305 Data size: 95448 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((de = 3.14) and enforce_constraint((i is not null and vc is not null))) (type: boolean) Statistics: Num rows: 2 Data size: 625 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/llap/extrapolate_part_stats_partial_ndv.q.out b/ql/src/test/results/clientpositive/llap/extrapolate_part_stats_partial_ndv.q.out index 8a0da7d73d..67eabcbaa8 100644 --- a/ql/src/test/results/clientpositive/llap/extrapolate_part_stats_partial_ndv.q.out +++ b/ql/src/test/results/clientpositive/llap/extrapolate_part_stats_partial_ndv.q.out @@ -285,7 +285,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n0 { string state, double locid, decimal(10,0) cnt, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 531 + totalSize 545 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -331,7 +331,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n0 { string state, double locid, decimal(10,0) cnt, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 562 + totalSize 576 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -377,7 +377,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n0 { string state, double locid, decimal(10,0) cnt, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 580 + totalSize 595 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -423,7 +423,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n0 { string state, double locid, decimal(10,0) cnt, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 602 + totalSize 612 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -662,7 +662,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n0 { string state, double locid, decimal(10,0) cnt, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 531 + totalSize 545 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -708,7 +708,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n0 { string state, double locid, decimal(10,0) cnt, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 562 + totalSize 576 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -754,7 +754,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n0 { string state, double locid, decimal(10,0) cnt, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 580 + totalSize 595 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -800,7 +800,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d_n0 { string state, double locid, decimal(10,0) cnt, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 602 + totalSize 612 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1075,7 +1075,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n0 { string state, i32 locid, decimal(10,0) cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 386 + totalSize 402 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1122,7 +1122,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n0 { string state, i32 locid, decimal(10,0) cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 409 + totalSize 424 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1169,7 +1169,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n0 { string state, i32 locid, decimal(10,0) cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 423 + totalSize 432 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1216,7 +1216,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n0 { string state, i32 locid, decimal(10,0) cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 383 + totalSize 401 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1263,7 +1263,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n0 { string state, i32 locid, decimal(10,0) cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 394 + totalSize 405 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1310,7 +1310,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n0 { string state, i32 locid, decimal(10,0) cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 387 + totalSize 405 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1357,7 +1357,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n0 { string state, i32 locid, decimal(10,0) cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 409 + totalSize 424 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1404,7 +1404,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n0 { string state, i32 locid, decimal(10,0) cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 366 + totalSize 380 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1451,7 +1451,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n0 { string state, i32 locid, decimal(10,0) cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 361 + totalSize 375 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1498,7 +1498,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n0 { string state, i32 locid, decimal(10,0) cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 412 + totalSize 424 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1545,7 +1545,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d_n0 { string state, i32 locid, decimal(10,0) cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 412 + totalSize 424 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde diff --git a/ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out b/ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out index b0089efc7a..d61917d761 100644 --- a/ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out +++ b/ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out @@ -170,7 +170,7 @@ Table Type: MANAGED_TABLE Table Parameters: bucketing_version 2 numFiles 1 - totalSize 295436 + totalSize 295544 transactional true transactional_properties default #### A masked pattern was here #### @@ -205,9 +205,9 @@ STAGE PLANS: Map Operator Tree: TableScan alias: acid_ivot - Statistics: Num rows: 5861 Data size: 2954360 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 5863 Data size: 2955440 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 5861 Data size: 2954360 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 5863 Data size: 2955440 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash @@ -376,7 +376,7 @@ Table Type: MANAGED_TABLE Table Parameters: bucketing_version 2 numFiles 1 - totalSize 1572 + totalSize 1625 transactional true transactional_properties default #### A masked pattern was here #### @@ -411,9 +411,9 @@ STAGE PLANS: Map Operator Tree: TableScan alias: acid_ivot - Statistics: Num rows: 31 Data size: 15720 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 32 Data size: 16250 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 31 Data size: 15720 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 32 Data size: 16250 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash @@ -509,7 +509,7 @@ Table Type: MANAGED_TABLE Table Parameters: bucketing_version 2 numFiles 2 - totalSize 3146 + totalSize 3250 transactional true transactional_properties default #### A masked pattern was here #### @@ -544,9 +544,9 @@ STAGE PLANS: Map Operator Tree: TableScan alias: acid_ivot - Statistics: Num rows: 62 Data size: 31460 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 64 Data size: 32500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 62 Data size: 31460 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 64 Data size: 32500 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash @@ -638,7 +638,7 @@ Table Type: MANAGED_TABLE Table Parameters: bucketing_version 2 numFiles 3 - totalSize 298582 + totalSize 298795 transactional true transactional_properties default #### A masked pattern was here #### @@ -673,9 +673,9 @@ STAGE PLANS: Map Operator Tree: TableScan alias: acid_ivot - Statistics: Num rows: 5924 Data size: 2985820 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 5928 Data size: 2987950 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 5924 Data size: 2985820 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 5928 Data size: 2987950 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() mode: hash diff --git a/ql/src/test/results/clientpositive/llap/llap_nullscan.q.out b/ql/src/test/results/clientpositive/llap/llap_nullscan.q.out index 2393450142..7bf003eb18 100644 --- a/ql/src/test/results/clientpositive/llap/llap_nullscan.q.out +++ b/ql/src/test/results/clientpositive/llap/llap_nullscan.q.out @@ -101,7 +101,7 @@ STAGE PLANS: serialization.ddl struct src_orc_n1 { string key, string value, string ds, string hr} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - totalSize 626 + totalSize 644 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.NullStructSerDe @@ -124,7 +124,7 @@ STAGE PLANS: serialization.ddl struct src_orc_n1 { string key, string value, string ds, string hr} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 626 + totalSize 644 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.src_orc_n1 diff --git a/ql/src/test/results/clientpositive/llap/llap_partitioned.q.out b/ql/src/test/results/clientpositive/llap/llap_partitioned.q.out index d827addbbe..faab23c3a3 100644 --- a/ql/src/test/results/clientpositive/llap/llap_partitioned.q.out +++ b/ql/src/test/results/clientpositive/llap/llap_partitioned.q.out @@ -1696,7 +1696,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: od - Statistics: Num rows: 10 Data size: 2640 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE TableScan Vectorization: native: true vectorizationSchemaColumns: [0:csmallint:smallint, 1:cint:int, 2:cbigint:bigint, 3:cfloat:float, 4:cdouble:double, 5:cstring1:string, 6:cchar1:char(255), 7:cvchar1:varchar(255), 8:cboolean1:boolean, 9:cboolean2:boolean, 10:ctinyint:tinyint, 11:ROW__ID:struct] @@ -1710,7 +1710,7 @@ STAGE PLANS: native: true nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true valueColumnNums: [] - Statistics: Num rows: 10 Data size: 2640 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint) outputColumnNames: _col0 diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create.q.out index bcd41446fd..69751a9004 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_create.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create.q.out @@ -50,7 +50,7 @@ Table Parameters: numFiles 1 numRows 5 rawDataSize 1025 - totalSize 497 + totalSize 503 #### A masked pattern was here #### # Storage Information @@ -111,7 +111,7 @@ Table Parameters: numFiles 1 numRows 5 rawDataSize 580 - totalSize 346 + totalSize 348 #### A masked pattern was here #### # Storage Information @@ -247,7 +247,7 @@ key value numFiles 1 numRows 5 rawDataSize 1605 -totalSize 701 +totalSize 702 #### A masked pattern was here #### PREHOOK: query: drop materialized view cmv_mat_view_n4 PREHOOK: type: DROP_MATERIALIZED_VIEW diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite.q.out index 60e7f327ac..70649bd8f1 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite.q.out @@ -69,7 +69,7 @@ bucketing_version 2 numFiles 1 numRows 2 rawDataSize 408 -totalSize 453 +totalSize 457 #### A masked pattern was here #### PREHOOK: query: create materialized view if not exists cmv_mat_view2_n4 enable rewrite as select a, c from cmv_basetable_n10 where a = 3 @@ -102,7 +102,7 @@ bucketing_version 2 numFiles 1 numRows 2 rawDataSize 232 -totalSize 322 +totalSize 326 #### A masked pattern was here #### PREHOOK: query: explain select a, c from cmv_basetable_n10 where a = 3 diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out index cca7d9f2d6..0d5da8e75c 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out @@ -749,7 +749,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_2 - Statistics: Num rows: 42 Data size: 4872 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 43 Data size: 4988 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((c > 10.1) and a is not null) (type: boolean) Statistics: Num rows: 14 Data size: 1624 Basic stats: COMPLETE Column stats: COMPLETE @@ -1027,7 +1027,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_2 - Statistics: Num rows: 42 Data size: 4872 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 43 Data size: 4988 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((c > 10.1) and a is not null) (type: boolean) Statistics: Num rows: 14 Data size: 1624 Basic stats: COMPLETE Column stats: COMPLETE diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out index 7aef9be03a..f01591eeb0 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out @@ -268,7 +268,7 @@ Table Parameters: numFiles 2 numRows 2 rawDataSize 248 - totalSize 706 + totalSize 736 transactional true transactional_properties default #### A masked pattern was here #### @@ -495,7 +495,7 @@ Table Parameters: numFiles 2 numRows 2 rawDataSize 248 - totalSize 706 + totalSize 736 transactional true transactional_properties default #### A masked pattern was here #### @@ -937,7 +937,7 @@ Table Type: MATERIALIZED_VIEW Table Parameters: bucketing_version 2 numFiles 3 - totalSize 1451 + totalSize 1512 transactional true transactional_properties default #### A masked pattern was here #### @@ -1064,7 +1064,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_2_n2 - Statistics: Num rows: 60 Data size: 7200 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 61 Data size: 7320 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((c > 10) and a is not null) (type: boolean) Statistics: Num rows: 20 Data size: 2400 Basic stats: COMPLETE Column stats: COMPLETE @@ -1283,19 +1283,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_2_n2 - Statistics: Num rows: 70 Data size: 8400 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 72 Data size: 8640 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((c > 10) and a is not null) (type: boolean) - Statistics: Num rows: 23 Data size: 2760 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 2880 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)), d (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 23 Data size: 2760 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 2880 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 23 Data size: 2760 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 2880 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: decimal(10,2)), _col2 (type: int) Execution mode: llap LLAP IO: may be used (ACID table) @@ -1309,7 +1309,7 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col2, _col3 - Statistics: Num rows: 38 Data size: 4560 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 40 Data size: 4800 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: sum(_col3) keys: _col0 (type: int), _col2 (type: decimal(10,2)) @@ -1532,7 +1532,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_2_n2 - Statistics: Num rows: 85 Data size: 10200 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 88 Data size: 10560 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((ROW__ID.writeid > 4) and (c > 10) and a is not null) (type: boolean) Statistics: Num rows: 9 Data size: 1080 Basic stats: COMPLETE Column stats: COMPLETE diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out index 2f0bf3da0f..26791e4179 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out @@ -400,7 +400,7 @@ Table Type: MATERIALIZED_VIEW Table Parameters: bucketing_version 2 numFiles 2 - totalSize 1053 + totalSize 1078 transactional true transactional_properties default #### A masked pattern was here #### @@ -523,7 +523,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_2_n3 - Statistics: Num rows: 60 Data size: 6960 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 61 Data size: 7076 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((c > 10) and a is not null) (type: boolean) Statistics: Num rows: 20 Data size: 2320 Basic stats: COMPLETE Column stats: COMPLETE @@ -731,19 +731,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_2_n3 - Statistics: Num rows: 70 Data size: 8120 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 72 Data size: 8352 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((c > 10) and a is not null) (type: boolean) - Statistics: Num rows: 23 Data size: 2668 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 2784 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), c (type: decimal(10,2)) outputColumnNames: _col0, _col1 - Statistics: Num rows: 23 Data size: 2668 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 2784 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 23 Data size: 2668 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 2784 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: decimal(10,2)) Execution mode: llap LLAP IO: may be used (ACID table) @@ -757,14 +757,14 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col2 - Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 40 Data size: 4640 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col2 (type: decimal(10,2)) outputColumnNames: _col0, _col1 - Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 40 Data size: 4640 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 40 Data size: 4640 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -774,7 +774,7 @@ STAGE PLANS: Select Operator expressions: _col0 (type: int), _col1 (type: decimal(10,2)) outputColumnNames: a, c - Statistics: Num rows: 38 Data size: 4408 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 40 Data size: 4640 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: compute_stats(a, 'hll'), compute_stats(c, 'hll') mode: hash @@ -943,7 +943,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_2_n3 - Statistics: Num rows: 85 Data size: 9860 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 88 Data size: 10208 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((ROW__ID.writeid > 4) and (c > 10) and a is not null) (type: boolean) Statistics: Num rows: 9 Data size: 1044 Basic stats: COMPLETE Column stats: COMPLETE diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_dummy.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_dummy.q.out index 9116089d70..e570efe0fc 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_dummy.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_dummy.q.out @@ -69,7 +69,7 @@ bucketing_version 2 numFiles 1 numRows 2 rawDataSize 408 -totalSize 453 +totalSize 457 #### A masked pattern was here #### PREHOOK: query: create materialized view if not exists cmv_mat_view2 enable rewrite as select a, c from cmv_basetable_n0 where a = 3 @@ -102,7 +102,7 @@ bucketing_version 2 numFiles 1 numRows 2 rawDataSize 232 -totalSize 322 +totalSize 326 #### A masked pattern was here #### PREHOOK: query: explain select a, c from cmv_basetable_n0 where a = 3 diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_multi_db.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_multi_db.q.out index 4cf7bce1b7..284d0a9979 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_multi_db.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_multi_db.q.out @@ -93,7 +93,7 @@ bucketing_version 2 numFiles 1 numRows 2 rawDataSize 408 -totalSize 453 +totalSize 457 #### A masked pattern was here #### PREHOOK: query: create materialized view if not exists cmv_mat_view2_n2 enable rewrite as select a, c from db1.cmv_basetable_n7 where a = 3 @@ -126,7 +126,7 @@ bucketing_version 2 numFiles 1 numRows 2 rawDataSize 232 -totalSize 322 +totalSize 326 #### A masked pattern was here #### PREHOOK: query: create database db3 PREHOOK: type: CREATEDATABASE diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out index 338a848d0c..0a310a3048 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out @@ -749,7 +749,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_2_n0 - Statistics: Num rows: 42 Data size: 4872 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 43 Data size: 4988 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((c > 10.1) and a is not null) (type: boolean) Statistics: Num rows: 14 Data size: 1624 Basic stats: COMPLETE Column stats: COMPLETE @@ -1027,7 +1027,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cmv_basetable_2_n0 - Statistics: Num rows: 42 Data size: 4872 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 43 Data size: 4988 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((c > 10.1) and a is not null) (type: boolean) Statistics: Num rows: 14 Data size: 1624 Basic stats: COMPLETE Column stats: COMPLETE diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_time_window.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_time_window.q.out index 164568c32e..f82aa100f3 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_time_window.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_time_window.q.out @@ -255,7 +255,7 @@ Table Parameters: numRows 2 rawDataSize 232 rewriting.time.window 300s - totalSize 586 + totalSize 608 #### A masked pattern was here #### # Storage Information @@ -477,7 +477,7 @@ Table Parameters: numRows 2 rawDataSize 232 rewriting.time.window 300s - totalSize 586 + totalSize 608 #### A masked pattern was here #### # Storage Information @@ -780,7 +780,7 @@ Table Parameters: numRows 3 rawDataSize 348 rewriting.time.window 300s - totalSize 616 + totalSize 628 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_describe.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_describe.q.out index 272cbeca7e..70addc4279 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_describe.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_describe.q.out @@ -73,7 +73,7 @@ Table Parameters: numFiles 1 numRows 5 rawDataSize 580 - totalSize 346 + totalSize 348 #### A masked pattern was here #### # Storage Information @@ -100,7 +100,7 @@ key foo numFiles 1 numRows 5 rawDataSize 580 -totalSize 346 +totalSize 348 #### A masked pattern was here #### PREHOOK: query: select a, c from cmv_mat_view_n8 PREHOOK: type: QUERY @@ -242,7 +242,7 @@ Table Parameters: numFiles 1 numRows 5 rawDataSize 1025 - totalSize 497 + totalSize 503 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_drop.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_drop.q.out index ceb34f7b13..441f105a60 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_drop.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_drop.q.out @@ -20,9 +20,9 @@ columns:struct columns { i32 cint, string cstring1} partitioned:false partitionColumns: totalNumberFiles:1 -totalFileSize:47120 -maxFileSize:47120 -minFileSize:47120 +totalFileSize:47137 +maxFileSize:47137 +minFileSize:47137 #### A masked pattern was here #### PREHOOK: query: drop materialized view dmv_mat_view diff --git a/ql/src/test/results/clientpositive/llap/orc_analyze.q.out b/ql/src/test/results/clientpositive/llap/orc_analyze.q.out index abbbbf94a9..14bf186aab 100644 --- a/ql/src/test/results/clientpositive/llap/orc_analyze.q.out +++ b/ql/src/test/results/clientpositive/llap/orc_analyze.q.out @@ -102,7 +102,7 @@ Table Parameters: numFiles 1 numRows 100 rawDataSize 52600 - totalSize 3200 + totalSize 3236 #### A masked pattern was here #### # Storage Information @@ -150,7 +150,7 @@ Table Parameters: numFiles 1 numRows 100 rawDataSize 52600 - totalSize 3200 + totalSize 3236 #### A masked pattern was here #### # Storage Information @@ -237,7 +237,7 @@ Table Parameters: numFiles 1 numRows 100 rawDataSize 52600 - totalSize 3200 + totalSize 3236 #### A masked pattern was here #### # Storage Information @@ -345,7 +345,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 2099 + totalSize 2134 #### A masked pattern was here #### # Storage Information @@ -386,7 +386,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 22050 - totalSize 2114 + totalSize 2147 #### A masked pattern was here #### # Storage Information @@ -439,7 +439,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 2099 + totalSize 2134 #### A masked pattern was here #### # Storage Information @@ -480,7 +480,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 22050 - totalSize 2114 + totalSize 2147 #### A masked pattern was here #### # Storage Information @@ -576,7 +576,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 2099 + totalSize 2134 #### A masked pattern was here #### # Storage Information @@ -617,7 +617,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 22050 - totalSize 2114 + totalSize 2147 #### A masked pattern was here #### # Storage Information @@ -731,7 +731,7 @@ Partition Parameters: numFiles 4 numRows 50 rawDataSize 21955 - totalSize 5322 + totalSize 5394 #### A masked pattern was here #### # Storage Information @@ -772,7 +772,7 @@ Partition Parameters: numFiles 4 numRows 50 rawDataSize 22043 - totalSize 5314 + totalSize 5388 #### A masked pattern was here #### # Storage Information @@ -825,7 +825,7 @@ Partition Parameters: numFiles 4 numRows 50 rawDataSize 21955 - totalSize 5322 + totalSize 5394 #### A masked pattern was here #### # Storage Information @@ -866,7 +866,7 @@ Partition Parameters: numFiles 4 numRows 50 rawDataSize 22043 - totalSize 5314 + totalSize 5388 #### A masked pattern was here #### # Storage Information @@ -968,7 +968,7 @@ Partition Parameters: numFiles 4 numRows 50 rawDataSize 21955 - totalSize 5322 + totalSize 5394 #### A masked pattern was here #### # Storage Information @@ -1009,7 +1009,7 @@ Partition Parameters: numFiles 4 numRows 50 rawDataSize 22043 - totalSize 5314 + totalSize 5388 #### A masked pattern was here #### # Storage Information @@ -1117,7 +1117,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 2099 + totalSize 2134 #### A masked pattern was here #### # Storage Information @@ -1170,7 +1170,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 2099 + totalSize 2134 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out b/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out index 435e3fc37c..c4fe46ee82 100644 --- a/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out +++ b/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out @@ -233,7 +233,7 @@ Table Parameters: orc.bloom.filter.columns * orc.row.index.stride 1000 rawDataSize 1139514 - totalSize 55376 + totalSize 55453 #### A masked pattern was here #### # Storage Information @@ -251,7 +251,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@orc_ppd_n1 PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: - HDFS_BYTES_READ: 16673 + HDFS_BYTES_READ: 16676 HDFS_BYTES_WRITTEN: 104 HDFS_READ_OPS: 7 HDFS_LARGE_READ_OPS: 0 diff --git a/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out b/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out index c07832633d..6dec42f3af 100644 --- a/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out +++ b/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out @@ -233,7 +233,7 @@ Table Parameters: orc.bloom.filter.columns * orc.row.index.stride 1000 rawDataSize 1139514 - totalSize 55376 + totalSize 55453 #### A masked pattern was here #### # Storage Information @@ -251,7 +251,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@orc_ppd PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: - HDFS_BYTES_READ: 17728 + HDFS_BYTES_READ: 17731 HDFS_BYTES_WRITTEN: 104 HDFS_READ_OPS: 8 HDFS_LARGE_READ_OPS: 0 diff --git a/ql/src/test/results/clientpositive/llap/orc_merge1.q.out b/ql/src/test/results/clientpositive/llap/orc_merge1.q.out index f5d94f96a0..71899642c7 100644 --- a/ql/src/test/results/clientpositive/llap/orc_merge1.q.out +++ b/ql/src/test/results/clientpositive/llap/orc_merge1.q.out @@ -162,12 +162,12 @@ POSTHOOK: Lineage: orcfile_merge1_n1 PARTITION(ds=1,part=0).value SIMPLE [(src)s POSTHOOK: Lineage: orcfile_merge1_n1 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge1_n1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 6 items --rw-r--r-- 3 ### USER ### ### GROUP ### 543 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-r--r-- 3 ### USER ### ### GROUP ### 550 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-r--r-- 3 ### USER ### ### GROUP ### 549 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-r--r-- 3 ### USER ### ### GROUP ### 485 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-r--r-- 3 ### USER ### ### GROUP ### 542 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-r--r-- 3 ### USER ### ### GROUP ### 467 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-r--r-- 3 ### USER ### ### GROUP ### 555 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-r--r-- 3 ### USER ### ### GROUP ### 562 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-r--r-- 3 ### USER ### ### GROUP ### 561 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-r--r-- 3 ### USER ### ### GROUP ### 496 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-r--r-- 3 ### USER ### ### GROUP ### 554 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-r--r-- 3 ### USER ### ### GROUP ### 478 ### HDFS DATE ### hdfs://### HDFS PATH ### PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1b_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part @@ -340,7 +340,7 @@ POSTHOOK: Lineage: orcfile_merge1b_n1 PARTITION(ds=1,part=0).value SIMPLE [(src) POSTHOOK: Lineage: orcfile_merge1b_n1 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge1b_n1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items --rw-rw-rw- 3 ### USER ### ### GROUP ### 1344 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-rw-rw- 3 ### USER ### ### GROUP ### 1360 ### HDFS DATE ### hdfs://### HDFS PATH ### PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1c_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part @@ -505,7 +505,7 @@ POSTHOOK: Lineage: orcfile_merge1c_n1 PARTITION(ds=1,part=0).value SIMPLE [(src) POSTHOOK: Lineage: orcfile_merge1c_n1 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge1c_n1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items --rw-rw-rw- 3 ### USER ### ### GROUP ### 2421 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-rw-rw- 3 ### USER ### ### GROUP ### 2461 ### HDFS DATE ### hdfs://### HDFS PATH ### PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM orcfile_merge1_n1 WHERE ds='1' diff --git a/ql/src/test/results/clientpositive/llap/orc_merge10.q.out b/ql/src/test/results/clientpositive/llap/orc_merge10.q.out index 32a4306208..0f9bb2d0bd 100644 --- a/ql/src/test/results/clientpositive/llap/orc_merge10.q.out +++ b/ql/src/test/results/clientpositive/llap/orc_merge10.q.out @@ -162,9 +162,9 @@ POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=0).value SIMPLE [(src)src. POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 3 items --rw-r--r-- 3 ### USER ### ### GROUP ### 933 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-r--r-- 3 ### USER ### ### GROUP ### 861 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-r--r-- 3 ### USER ### ### GROUP ### 842 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-r--r-- 3 ### USER ### ### GROUP ### 947 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-r--r-- 3 ### USER ### ### GROUP ### 875 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-r--r-- 3 ### USER ### ### GROUP ### 856 ### HDFS DATE ### hdfs://### HDFS PATH ### PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part @@ -337,7 +337,7 @@ POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=0).value SIMPLE [(src)src POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items --rw-rw-rw- 3 ### USER ### ### GROUP ### 1740 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-rw-rw- 3 ### USER ### ### GROUP ### 1754 ### HDFS DATE ### hdfs://### HDFS PATH ### PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part @@ -502,7 +502,7 @@ POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=0).value SIMPLE [(src)src POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items --rw-rw-rw- 3 ### USER ### ### GROUP ### 2384 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-rw-rw- 3 ### USER ### ### GROUP ### 2409 ### HDFS DATE ### hdfs://### HDFS PATH ### PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM orcfile_merge1 WHERE ds='1' @@ -605,7 +605,7 @@ POSTHOOK: type: ALTER_PARTITION_MERGE POSTHOOK: Input: default@orcfile_merge1 POSTHOOK: Output: default@orcfile_merge1@ds=1/part=0 Found 1 items --rw-rw-rw- 3 ### USER ### ### GROUP ### 2384 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-rw-rw- 3 ### USER ### ### GROUP ### 2409 ### HDFS DATE ### hdfs://### HDFS PATH ### PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM orcfile_merge1c WHERE ds='1' @@ -686,21 +686,21 @@ Type: struct Stripe Statistics: Stripe 1: Column 0: count: 90 hasNull: false - Column 1: count: 90 hasNull: false min: 0 max: 495 sum: 22736 - Column 2: count: 90 hasNull: false min: val_0 max: val_86 sum: 612 + Column 1: count: 90 hasNull: false bytesOnDisk: 185 min: 0 max: 495 sum: 22736 + Column 2: count: 90 hasNull: false bytesOnDisk: 428 min: val_0 max: val_86 sum: 612 Stripe 2: Column 0: count: 78 hasNull: false - Column 1: count: 78 hasNull: false min: 0 max: 497 sum: 18371 - Column 2: count: 78 hasNull: false min: val_0 max: val_95 sum: 529 + Column 1: count: 78 hasNull: false bytesOnDisk: 161 min: 0 max: 497 sum: 18371 + Column 2: count: 78 hasNull: false bytesOnDisk: 380 min: val_0 max: val_95 sum: 529 Stripe 3: Column 0: count: 74 hasNull: false - Column 1: count: 74 hasNull: false min: 2 max: 493 sum: 19663 - Column 2: count: 74 hasNull: false min: val_105 max: val_97 sum: 505 + Column 1: count: 74 hasNull: false bytesOnDisk: 153 min: 2 max: 493 sum: 19663 + Column 2: count: 74 hasNull: false bytesOnDisk: 363 min: val_105 max: val_97 sum: 505 File Statistics: Column 0: count: 242 hasNull: false - Column 1: count: 242 hasNull: false min: 0 max: 497 sum: 60770 - Column 2: count: 242 hasNull: false min: val_0 max: val_97 sum: 1646 + Column 1: count: 242 hasNull: false bytesOnDisk: 499 min: 0 max: 497 sum: 60770 + Column 2: count: 242 hasNull: false bytesOnDisk: 1171 min: val_0 max: val_97 sum: 1646 Stripes: Stripe: offset: 3 data: 613 rows: 90 tail: 61 index: 76 @@ -752,7 +752,7 @@ Stripes: Row group indices for column 2: Entry 0: count: 74 hasNull: false min: val_105 max: val_97 sum: 505 positions: 0,0,0,0,0 -File length: 2384 bytes +File length: 2409 bytes Padding length: 0 bytes Padding ratio: 0% ________________________________________________________________________________________________________________________ @@ -775,21 +775,21 @@ Type: struct Stripe Statistics: Stripe 1: Column 0: count: 90 hasNull: false - Column 1: count: 90 hasNull: false min: 0 max: 495 sum: 22736 - Column 2: count: 90 hasNull: false min: val_0 max: val_86 sum: 612 + Column 1: count: 90 hasNull: false bytesOnDisk: 185 min: 0 max: 495 sum: 22736 + Column 2: count: 90 hasNull: false bytesOnDisk: 428 min: val_0 max: val_86 sum: 612 Stripe 2: Column 0: count: 78 hasNull: false - Column 1: count: 78 hasNull: false min: 0 max: 497 sum: 18371 - Column 2: count: 78 hasNull: false min: val_0 max: val_95 sum: 529 + Column 1: count: 78 hasNull: false bytesOnDisk: 161 min: 0 max: 497 sum: 18371 + Column 2: count: 78 hasNull: false bytesOnDisk: 380 min: val_0 max: val_95 sum: 529 Stripe 3: Column 0: count: 74 hasNull: false - Column 1: count: 74 hasNull: false min: 2 max: 493 sum: 19663 - Column 2: count: 74 hasNull: false min: val_105 max: val_97 sum: 505 + Column 1: count: 74 hasNull: false bytesOnDisk: 153 min: 2 max: 493 sum: 19663 + Column 2: count: 74 hasNull: false bytesOnDisk: 363 min: val_105 max: val_97 sum: 505 File Statistics: Column 0: count: 242 hasNull: false - Column 1: count: 242 hasNull: false min: 0 max: 497 sum: 60770 - Column 2: count: 242 hasNull: false min: val_0 max: val_97 sum: 1646 + Column 1: count: 242 hasNull: false bytesOnDisk: 499 min: 0 max: 497 sum: 60770 + Column 2: count: 242 hasNull: false bytesOnDisk: 1171 min: val_0 max: val_97 sum: 1646 Stripes: Stripe: offset: 3 data: 613 rows: 90 tail: 61 index: 76 @@ -841,7 +841,7 @@ Stripes: Row group indices for column 2: Entry 0: count: 74 hasNull: false min: val_105 max: val_97 sum: 505 positions: 0,0,0,0,0 -File length: 2384 bytes +File length: 2409 bytes Padding length: 0 bytes Padding ratio: 0% ________________________________________________________________________________________________________________________ diff --git a/ql/src/test/results/clientpositive/llap/orc_merge11.q.out b/ql/src/test/results/clientpositive/llap/orc_merge11.q.out index a4ec749f60..1b2ddd3cdc 100644 --- a/ql/src/test/results/clientpositive/llap/orc_merge11.q.out +++ b/ql/src/test/results/clientpositive/llap/orc_merge11.q.out @@ -81,19 +81,19 @@ Type: struct= 0.0D) (type: boolean) - Statistics: Num rows: 36 Data size: 6383 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 37 Data size: 6562 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 36 Data size: 6383 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 37 Data size: 6562 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -490,19 +490,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tab1_n6 - Statistics: Num rows: 110 Data size: 19504 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 111 Data size: 19688 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 105 Data size: 18617 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 105 Data size: 18617 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 105 Data size: 18617 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 4 @@ -533,7 +533,7 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - Statistics: Num rows: 115 Data size: 20478 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 116 Data size: 20681 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -634,11 +634,11 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tab2_n5 - Statistics: Num rows: 110 Data size: 19504 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 111 Data size: 19688 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 110 Data size: 19504 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 111 Data size: 19688 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(key) mode: hash @@ -705,38 +705,38 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tab1_n6 - Statistics: Num rows: 110 Data size: 19504 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 111 Data size: 19688 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 105 Data size: 18617 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 105 Data size: 18617 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 105 Data size: 18617 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 4 Map Operator Tree: TableScan alias: tab2_n5 - Statistics: Num rows: 110 Data size: 19504 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 111 Data size: 19688 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 105 Data size: 18617 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 105 Data size: 18617 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 105 Data size: 18617 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Reducer 2 @@ -748,7 +748,7 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - Statistics: Num rows: 115 Data size: 20478 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 116 Data size: 20681 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_stats.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_stats.q.out index eccb983131..5c45776607 100644 --- a/ql/src/test/results/clientpositive/llap/schema_evol_stats.q.out +++ b/ql/src/test/results/clientpositive/llap/schema_evol_stats.q.out @@ -290,7 +290,7 @@ Table Parameters: numPartitions 2 numRows 8 rawDataSize 1116 - totalSize 819 + totalSize 848 #### A masked pattern was here #### # Storage Information @@ -327,7 +327,7 @@ Partition Parameters: numFiles 1 numRows 4 rawDataSize 384 - totalSize 313 + totalSize 323 #### A masked pattern was here #### # Storage Information @@ -366,7 +366,7 @@ Partition Parameters: numFiles 1 numRows 4 rawDataSize 732 - totalSize 506 + totalSize 525 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out index a6a037150d..cb44e85547 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out @@ -3726,7 +3726,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesnullorc - Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 12288 Data size: 9580 Basic stats: COMPLETE Column stats: COMPLETE TableScan Vectorization: native: true Select Operator @@ -3734,7 +3734,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [] - Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 12288 Data size: 9580 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() Group By Vectorization: diff --git a/ql/src/test/results/clientpositive/orc_file_dump.q.out b/ql/src/test/results/clientpositive/orc_file_dump.q.out index 2067145ce5..8ec71a9ccc 100644 --- a/ql/src/test/results/clientpositive/orc_file_dump.q.out +++ b/ql/src/test/results/clientpositive/orc_file_dump.q.out @@ -102,34 +102,34 @@ Type: struct Stripe Statistics: Stripe 1: Column 0: count: 152 hasNull: false - Column 1: count: 152 hasNull: false min: 0 max: 497 sum: 38034 - Column 2: count: 152 hasNull: false min: val_0 max: val_97 sum: 1034 + Column 1: count: 152 hasNull: false bytesOnDisk: 309 min: 0 max: 497 sum: 38034 + Column 2: count: 152 hasNull: false bytesOnDisk: 679 min: val_0 max: val_97 sum: 1034 Stripe 2: Column 0: count: 90 hasNull: false - Column 1: count: 90 hasNull: false min: 0 max: 495 sum: 22736 - Column 2: count: 90 hasNull: false min: val_0 max: val_86 sum: 612 + Column 1: count: 90 hasNull: false bytesOnDisk: 185 min: 0 max: 495 sum: 22736 + Column 2: count: 90 hasNull: false bytesOnDisk: 428 min: val_0 max: val_86 sum: 612 File Statistics: Column 0: count: 242 hasNull: false - Column 1: count: 242 hasNull: false min: 0 max: 497 sum: 60770 - Column 2: count: 242 hasNull: false min: val_0 max: val_97 sum: 1646 + Column 1: count: 242 hasNull: false bytesOnDisk: 494 min: 0 max: 497 sum: 60770 + Column 2: count: 242 hasNull: false bytesOnDisk: 1107 min: val_0 max: val_97 sum: 1646 Stripes: Stripe: offset: 3 data: 988 rows: 152 tail: 72 index: 77 @@ -675,7 +675,7 @@ Stripes: Row group indices for column 2: Entry 0: count: 90 hasNull: false min: val_0 max: val_86 sum: 612 positions: 0,0,0,0,0 -File length: 2134 bytes +File length: 2155 bytes Padding length: 0 bytes Padding ratio: 0% ________________________________________________________________________________________________________________________ @@ -698,17 +698,17 @@ Type: struct Stripe Statistics: Stripe 1: Column 0: count: 152 hasNull: false - Column 1: count: 152 hasNull: false min: 0 max: 497 sum: 38034 - Column 2: count: 152 hasNull: false min: val_0 max: val_97 sum: 1034 + Column 1: count: 152 hasNull: false bytesOnDisk: 309 min: 0 max: 497 sum: 38034 + Column 2: count: 152 hasNull: false bytesOnDisk: 679 min: val_0 max: val_97 sum: 1034 Stripe 2: Column 0: count: 90 hasNull: false - Column 1: count: 90 hasNull: false min: 0 max: 495 sum: 22736 - Column 2: count: 90 hasNull: false min: val_0 max: val_86 sum: 612 + Column 1: count: 90 hasNull: false bytesOnDisk: 185 min: 0 max: 495 sum: 22736 + Column 2: count: 90 hasNull: false bytesOnDisk: 428 min: val_0 max: val_86 sum: 612 File Statistics: Column 0: count: 242 hasNull: false - Column 1: count: 242 hasNull: false min: 0 max: 497 sum: 60770 - Column 2: count: 242 hasNull: false min: val_0 max: val_97 sum: 1646 + Column 1: count: 242 hasNull: false bytesOnDisk: 494 min: 0 max: 497 sum: 60770 + Column 2: count: 242 hasNull: false bytesOnDisk: 1107 min: val_0 max: val_97 sum: 1646 Stripes: Stripe: offset: 3 data: 988 rows: 152 tail: 72 index: 77 @@ -745,7 +745,7 @@ Stripes: Row group indices for column 2: Entry 0: count: 90 hasNull: false min: val_0 max: val_86 sum: 612 positions: 0,0,0,0,0 -File length: 2134 bytes +File length: 2155 bytes Padding length: 0 bytes Padding ratio: 0% ________________________________________________________________________________________________________________________ diff --git a/ql/src/test/results/clientpositive/orc_merge11.q.out b/ql/src/test/results/clientpositive/orc_merge11.q.out index a4ec749f60..1b2ddd3cdc 100644 --- a/ql/src/test/results/clientpositive/orc_merge11.q.out +++ b/ql/src/test/results/clientpositive/orc_merge11.q.out @@ -81,19 +81,19 @@ Type: struct 12:int - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col0) Group By Vectorization: diff --git a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out index 0336982c34..8dbd679764 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out @@ -3697,7 +3697,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesnullorc - Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 12288 Data size: 9580 Basic stats: COMPLETE Column stats: COMPLETE TableScan Vectorization: native: true Select Operator @@ -3705,7 +3705,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [] - Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 12288 Data size: 9580 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() Group By Vectorization: @@ -3808,7 +3808,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesnullorc - Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12288 Data size: 9580 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -3818,7 +3818,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0] - Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12288 Data size: 9580 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(ctinyint) Group By Vectorization: @@ -3921,7 +3921,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesnullorc - Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12288 Data size: 9580 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -3931,7 +3931,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [2] - Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12288 Data size: 9580 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(cint) Group By Vectorization: @@ -4034,7 +4034,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesnullorc - Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12288 Data size: 9580 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -4044,7 +4044,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [4] - Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12288 Data size: 9580 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(cfloat) Group By Vectorization: @@ -4147,7 +4147,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesnullorc - Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12288 Data size: 9580 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -4157,7 +4157,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [6] - Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12288 Data size: 9580 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(cstring1) Group By Vectorization: @@ -4260,7 +4260,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesnullorc - Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12288 Data size: 9580 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -4270,7 +4270,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [10] - Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 12288 Data size: 9580 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(cboolean1) Group By Vectorization: diff --git a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out index fe5fd23b9d..18926cb873 100644 --- a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out @@ -258,7 +258,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -269,7 +269,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 3, 14, 15, 16, 17, 18] selectExpressions: VectorUDFUnixTimeStampTimestamp(col 1:timestamp) -> 5:bigint, VectorUDFYearTimestamp(col 1:timestamp, field YEAR) -> 6:int, VectorUDFMonthTimestamp(col 1:timestamp, field MONTH) -> 7:int, VectorUDFDayOfMonthTimestamp(col 1:timestamp, field DAY_OF_MONTH) -> 8:int, VectorUDFDayOfMonthTimestamp(col 1:timestamp, field DAY_OF_MONTH) -> 9:int, VectorUDFWeekOfYearTimestamp(col 1:timestamp, field WEEK_OF_YEAR) -> 10:int, VectorUDFHourTimestamp(col 1:timestamp, field HOUR_OF_DAY) -> 11:int, VectorUDFMinuteTimestamp(col 1:timestamp, field MINUTE) -> 12:int, VectorUDFSecondTimestamp(col 1:timestamp, field SECOND) -> 13:int, IfExprTimestampColumnScalar(col 0:boolean, col 1:timestamp, val 1319-02-02 16:31:57.778) -> 14:timestamp, IfExprTimestampScalarColumn(col 0:boolean, val 2000-12-18 08:42:30.0005, col 1:timestamp) -> 15:timestamp, IfExprTimestampColumnColumn(col 0:boolean, col 1:timestampcol 3:timestamp) -> 16:timestamp, IfExprColumnNull(col 0:boolean, col 1:timestamp, null)(children: col 0:boolean, col 1:timestamp) -> 17:timestamp, IfExprNullColumn(col 0:boolean, null, col 3)(children: col 0:boolean, col 3:timestamp) -> 18:timestamp - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: bigint) sort order: + @@ -277,7 +277,7 @@ STAGE PLANS: className: VectorReduceSinkObjectHashOperator native: true nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int), _col9 (type: boolean), _col10 (type: timestamp), _col11 (type: timestamp), _col12 (type: timestamp), _col13 (type: timestamp), _col14 (type: timestamp), _col15 (type: timestamp), _col16 (type: timestamp) Execution mode: vectorized Map Vectorization: @@ -305,13 +305,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -466,7 +466,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -477,7 +477,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [5, 6, 7, 8, 9, 10, 11, 12, 13] selectExpressions: VectorUDFUnixTimeStampString(col 2:string) -> 5:bigint, VectorUDFYearString(col 2:string, fieldStart 0, fieldLength 4) -> 6:int, VectorUDFMonthString(col 2:string, fieldStart 5, fieldLength 2) -> 7:int, VectorUDFDayOfMonthString(col 2:string, fieldStart 8, fieldLength 2) -> 8:int, VectorUDFDayOfMonthString(col 2:string, fieldStart 8, fieldLength 2) -> 9:int, VectorUDFWeekOfYearString(col 2:string) -> 10:int, VectorUDFHourString(col 2:string, fieldStart 11, fieldLength 2) -> 11:int, VectorUDFMinuteString(col 2:string, fieldStart 14, fieldLength 2) -> 12:int, VectorUDFSecondString(col 2:string, fieldStart 17, fieldLength 2) -> 13:int - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: bigint) sort order: + @@ -485,7 +485,7 @@ STAGE PLANS: className: VectorReduceSinkObjectHashOperator native: true nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int) Execution mode: vectorized Map Vectorization: @@ -513,13 +513,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8] - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -658,7 +658,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -669,7 +669,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [7, 8, 9, 10, 11, 12, 13, 14, 15] selectExpressions: LongColEqualLongColumn(col 5:bigint, col 6:bigint)(children: VectorUDFUnixTimeStampTimestamp(col 1:timestamp) -> 5:bigint, VectorUDFUnixTimeStampString(col 2:string) -> 6:bigint) -> 7:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFYearTimestamp(col 1:timestamp, field YEAR) -> 5:int, VectorUDFYearString(col 2:string, fieldStart 0, fieldLength 4) -> 6:int) -> 8:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFMonthTimestamp(col 1:timestamp, field MONTH) -> 5:int, VectorUDFMonthString(col 2:string, fieldStart 5, fieldLength 2) -> 6:int) -> 9:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFDayOfMonthTimestamp(col 1:timestamp, field DAY_OF_MONTH) -> 5:int, VectorUDFDayOfMonthString(col 2:string, fieldStart 8, fieldLength 2) -> 6:int) -> 10:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFDayOfMonthTimestamp(col 1:timestamp, field DAY_OF_MONTH) -> 5:int, VectorUDFDayOfMonthString(col 2:string, fieldStart 8, fieldLength 2) -> 6:int) -> 11:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFWeekOfYearTimestamp(col 1:timestamp, field WEEK_OF_YEAR) -> 5:int, VectorUDFWeekOfYearString(col 2:string) -> 6:int) -> 12:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFHourTimestamp(col 1:timestamp, field HOUR_OF_DAY) -> 5:int, VectorUDFHourString(col 2:string, fieldStart 11, fieldLength 2) -> 6:int) -> 13:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFMinuteTimestamp(col 1:timestamp, field MINUTE) -> 5:int, VectorUDFMinuteString(col 2:string, fieldStart 14, fieldLength 2) -> 6:int) -> 14:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFSecondTimestamp(col 1:timestamp, field SECOND) -> 5:int, VectorUDFSecondString(col 2:string, fieldStart 17, fieldLength 2) -> 6:int) -> 15:boolean - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: boolean) sort order: + @@ -677,7 +677,7 @@ STAGE PLANS: className: VectorReduceSinkObjectHashOperator native: true nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: boolean), _col2 (type: boolean), _col3 (type: boolean), _col4 (type: boolean), _col5 (type: boolean), _col6 (type: boolean), _col7 (type: boolean), _col8 (type: boolean) Execution mode: vectorized Map Vectorization: @@ -705,13 +705,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8] - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -981,7 +981,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -991,7 +991,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [1] - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: min(ctimestamp1), max(ctimestamp1), count(ctimestamp1), count() Group By Vectorization: @@ -1106,7 +1106,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -1116,7 +1116,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [1] - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(ctimestamp1) Group By Vectorization: @@ -1248,7 +1248,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -1259,7 +1259,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 5, 8] selectExpressions: CastTimestampToDouble(col 1:timestamp) -> 5:double, DoubleColMultiplyDoubleColumn(col 6:double, col 7:double)(children: CastTimestampToDouble(col 1:timestamp) -> 6:double, CastTimestampToDouble(col 1:timestamp) -> 7:double) -> 8:double - Statistics: Num rows: 52 Data size: 3555 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3219 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col0), count(_col0), sum(_col2), sum(_col1) Group By Vectorization: diff --git a/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out b/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out index a6f12af4c1..01ec1329ee 100644 --- a/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out +++ b/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out @@ -370,10 +370,10 @@ POSTHOOK: Lineage: over10k_orc_bucketed_n0.si SIMPLE [(over10k_n9)over10k_n9.Fie POSTHOOK: Lineage: over10k_orc_bucketed_n0.t SIMPLE [(over10k_n9)over10k_n9.FieldSchema(name:t, type:tinyint, comment:null), ] POSTHOOK: Lineage: over10k_orc_bucketed_n0.ts SIMPLE [(over10k_n9)over10k_n9.FieldSchema(name:ts, type:timestamp, comment:null), ] Found 4 items --rw-rw-rw- 3 ### USER ### ### GROUP ### 8942 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 7710 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 7297 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 7204 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-rw-rw- 3 ### USER ### ### GROUP ### 8997 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-rw-rw- 3 ### USER ### ### GROUP ### 7773 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-rw-rw- 3 ### USER ### ### GROUP ### 7358 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-rw-rw- 3 ### USER ### ### GROUP ### 7261 ### HDFS DATE ### hdfs://### HDFS PATH ### PREHOOK: query: insert into over10k_orc_bucketed_n0 select * from over10k_n9 PREHOOK: type: QUERY PREHOOK: Input: default@over10k_n9 @@ -394,14 +394,14 @@ POSTHOOK: Lineage: over10k_orc_bucketed_n0.si SIMPLE [(over10k_n9)over10k_n9.Fie POSTHOOK: Lineage: over10k_orc_bucketed_n0.t SIMPLE [(over10k_n9)over10k_n9.FieldSchema(name:t, type:tinyint, comment:null), ] POSTHOOK: Lineage: over10k_orc_bucketed_n0.ts SIMPLE [(over10k_n9)over10k_n9.FieldSchema(name:ts, type:timestamp, comment:null), ] Found 8 items --rw-rw-rw- 3 ### USER ### ### GROUP ### 8942 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 8942 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 7710 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 7710 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 7297 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 7297 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 7204 ### HDFS DATE ### hdfs://### HDFS PATH ### --rw-rw-rw- 3 ### USER ### ### GROUP ### 7204 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-rw-rw- 3 ### USER ### ### GROUP ### 8997 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-rw-rw- 3 ### USER ### ### GROUP ### 8997 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-rw-rw- 3 ### USER ### ### GROUP ### 7773 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-rw-rw- 3 ### USER ### ### GROUP ### 7773 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-rw-rw- 3 ### USER ### ### GROUP ### 7358 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-rw-rw- 3 ### USER ### ### GROUP ### 7358 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-rw-rw- 3 ### USER ### ### GROUP ### 7261 ### HDFS DATE ### hdfs://### HDFS PATH ### +-rw-rw-rw- 3 ### USER ### ### GROUP ### 7261 ### HDFS DATE ### hdfs://### HDFS PATH ### PREHOOK: query: select distinct 7 as seven, INPUT__FILE__NAME from over10k_orc_bucketed_n0 PREHOOK: type: QUERY PREHOOK: Input: default@over10k_orc_bucketed_n0 @@ -680,22 +680,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over10k_orc_bucketed_n0 - Statistics: Num rows: 1229 Data size: 703430 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1241 Data size: 710230 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ROW__ID (type: struct) outputColumnNames: ROW__ID - Statistics: Num rows: 1229 Data size: 703430 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1241 Data size: 710230 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() keys: ROW__ID (type: struct) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 614 Data size: 51576 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 620 Data size: 52080 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: struct) sort order: + Map-reduce partition columns: _col0 (type: struct) - Statistics: Num rows: 614 Data size: 51576 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 620 Data size: 52080 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -704,13 +704,13 @@ STAGE PLANS: keys: KEY._col0 (type: struct) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 614 Data size: 51576 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 620 Data size: 52080 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col1 > 1L) (type: boolean) - Statistics: Num rows: 204 Data size: 17136 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 206 Data size: 17304 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 204 Data size: 17136 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 206 Data size: 17304 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out b/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out index ce8ab92c20..fd71c0c4d9 100644 --- a/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out +++ b/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out @@ -304,7 +304,7 @@ Stage-3 Output:["_col0","_col1","_col3"] Filter Operator [FIL_9] (rows=10/2 width=316) predicate:((de = 109.23) or (de = 119.23)) - TableScan [TS_0] (rows=83/4 width=316) + TableScan [TS_0] (rows=86/4 width=316) default@acid_uami_n2,acid_uami_n2, ACID table,Tbl:COMPLETE,Col:NONE,Output:["i","de","vc"] PREHOOK: query: select * from acid_uami_n2 order by de diff --git a/ql/src/test/results/clientpositive/tez/orc_merge12.q.out b/ql/src/test/results/clientpositive/tez/orc_merge12.q.out index 673bad2230..d29d704aa8 100644 --- a/ql/src/test/results/clientpositive/tez/orc_merge12.q.out +++ b/ql/src/test/results/clientpositive/tez/orc_merge12.q.out @@ -161,8 +161,8 @@ Stripe Statistics: Column 6: count: 9174 hasNull: true min: -16379.0 max: 9763215.5639 sum: 5.62236530305E7 Column 7: count: 12288 hasNull: false min: 00020767-dd8f-4f4d-bd68-4b7be64b8e44 max: fffa3516-e219-4027-b0d3-72bb2e676c52 sum: 442368 Column 8: count: 12288 hasNull: false min: 000976f7-7075-4f3f-a564-5a375fafcc101416a2b7-7f64-41b7-851f-97d15405037e max: fffd0642-5f01-48cd-8d97-3428faee49e9b39f2b4c-efdc-4e5f-9ab5-4aa5394cb156 sum: 884736 - Column 9: count: 9173 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 - Column 10: count: 9174 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 + Column 9: count: 9173 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 + Column 10: count: 9174 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 Column 11: count: 9174 hasNull: true true: 6138 Column 12: count: 9173 hasNull: true true: 3983 Column 13: count: 9173 hasNull: true min: -64 max: 62 sum: -39856 @@ -173,8 +173,8 @@ Stripe Statistics: Column 18: count: 9174 hasNull: true min: -16379.0 max: 9763215.5639 sum: 5.62236530305E7 Column 19: count: 9174 hasNull: true min: 0042l0d5rPD6sMlJ7Ue0q max: yy2GiGM sum: 127881 Column 20: count: 9173 hasNull: true min: 0034fkcXMQI3 max: yyt0S8WorA sum: 149134 - Column 21: count: 9173 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 - Column 22: count: 9174 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 + Column 21: count: 9173 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 + Column 22: count: 9174 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 Column 23: count: 9174 hasNull: true true: 6138 Column 24: count: 9173 hasNull: true true: 3983 Column 25: count: 9173 hasNull: true min: -64 max: 62 sum: -39856 @@ -185,8 +185,8 @@ Stripe Statistics: Column 30: count: 9174 hasNull: true min: -16379.0 max: 9763215.5639 sum: 5.62236530305E7 Column 31: count: 9174 hasNull: true min: 0042l0d5rPD6sMlJ7Ue0q max: yy2GiGM sum: 127881 Column 32: count: 9173 hasNull: true min: 0034fkcXMQI3 max: yyt0S8WorA sum: 149134 - Column 33: count: 9173 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 - Column 34: count: 9174 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 + Column 33: count: 9173 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 + Column 34: count: 9174 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 Column 35: count: 9174 hasNull: true true: 6138 Column 36: count: 9173 hasNull: true true: 3983 Stripe 2: @@ -199,8 +199,8 @@ Stripe Statistics: Column 6: count: 9174 hasNull: true min: -16379.0 max: 9763215.5639 sum: 5.62236530305E7 Column 7: count: 12288 hasNull: false min: 00020767-dd8f-4f4d-bd68-4b7be64b8e44 max: fffa3516-e219-4027-b0d3-72bb2e676c52 sum: 442368 Column 8: count: 12288 hasNull: false min: 000976f7-7075-4f3f-a564-5a375fafcc101416a2b7-7f64-41b7-851f-97d15405037e max: fffd0642-5f01-48cd-8d97-3428faee49e9b39f2b4c-efdc-4e5f-9ab5-4aa5394cb156 sum: 884736 - Column 9: count: 9173 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 - Column 10: count: 9174 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 + Column 9: count: 9173 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 + Column 10: count: 9174 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 Column 11: count: 9174 hasNull: true true: 6138 Column 12: count: 9173 hasNull: true true: 3983 Column 13: count: 9173 hasNull: true min: -64 max: 62 sum: -39856 @@ -211,8 +211,8 @@ Stripe Statistics: Column 18: count: 9174 hasNull: true min: -16379.0 max: 9763215.5639 sum: 5.62236530305E7 Column 19: count: 9174 hasNull: true min: 0042l0d5rPD6sMlJ7Ue0q max: yy2GiGM sum: 127881 Column 20: count: 9173 hasNull: true min: 0034fkcXMQI3 max: yyt0S8WorA sum: 149134 - Column 21: count: 9173 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 - Column 22: count: 9174 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 + Column 21: count: 9173 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 + Column 22: count: 9174 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 Column 23: count: 9174 hasNull: true true: 6138 Column 24: count: 9173 hasNull: true true: 3983 Column 25: count: 9173 hasNull: true min: -64 max: 62 sum: -39856 @@ -223,8 +223,8 @@ Stripe Statistics: Column 30: count: 9174 hasNull: true min: -16379.0 max: 9763215.5639 sum: 5.62236530305E7 Column 31: count: 9174 hasNull: true min: 0042l0d5rPD6sMlJ7Ue0q max: yy2GiGM sum: 127881 Column 32: count: 9173 hasNull: true min: 0034fkcXMQI3 max: yyt0S8WorA sum: 149134 - Column 33: count: 9173 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 - Column 34: count: 9174 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 + Column 33: count: 9173 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 + Column 34: count: 9174 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 Column 35: count: 9174 hasNull: true true: 6138 Column 36: count: 9173 hasNull: true true: 3983 @@ -238,8 +238,8 @@ File Statistics: Column 6: count: 18348 hasNull: true min: -16379.0 max: 9763215.5639 sum: 1.12447306061E8 Column 7: count: 24576 hasNull: false min: 00020767-dd8f-4f4d-bd68-4b7be64b8e44 max: fffa3516-e219-4027-b0d3-72bb2e676c52 sum: 884736 Column 8: count: 24576 hasNull: false min: 000976f7-7075-4f3f-a564-5a375fafcc101416a2b7-7f64-41b7-851f-97d15405037e max: fffd0642-5f01-48cd-8d97-3428faee49e9b39f2b4c-efdc-4e5f-9ab5-4aa5394cb156 sum: 1769472 - Column 9: count: 18346 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 - Column 10: count: 18348 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 + Column 9: count: 18346 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 + Column 10: count: 18348 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 Column 11: count: 18348 hasNull: true true: 12276 Column 12: count: 18346 hasNull: true true: 7966 Column 13: count: 18346 hasNull: true min: -64 max: 62 sum: -79712 @@ -250,8 +250,8 @@ File Statistics: Column 18: count: 18348 hasNull: true min: -16379.0 max: 9763215.5639 sum: 1.12447306061E8 Column 19: count: 18348 hasNull: true min: 0042l0d5rPD6sMlJ7Ue0q max: yy2GiGM sum: 255762 Column 20: count: 18346 hasNull: true min: 0034fkcXMQI3 max: yyt0S8WorA sum: 298268 - Column 21: count: 18346 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 - Column 22: count: 18348 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 + Column 21: count: 18346 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 + Column 22: count: 18348 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 Column 23: count: 18348 hasNull: true true: 12276 Column 24: count: 18346 hasNull: true true: 7966 Column 25: count: 18346 hasNull: true min: -64 max: 62 sum: -79712 @@ -262,8 +262,8 @@ File Statistics: Column 30: count: 18348 hasNull: true min: -16379.0 max: 9763215.5639 sum: 1.12447306061E8 Column 31: count: 18348 hasNull: true min: 0042l0d5rPD6sMlJ7Ue0q max: yy2GiGM sum: 255762 Column 32: count: 18346 hasNull: true min: 0034fkcXMQI3 max: yyt0S8WorA sum: 298268 - Column 33: count: 18346 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 - Column 34: count: 18348 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 + Column 33: count: 18346 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 + Column 34: count: 18348 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 Column 35: count: 18348 hasNull: true true: 12276 Column 36: count: 18346 hasNull: true true: 7966 @@ -457,11 +457,11 @@ Stripes: Entry 0: count: 10000 hasNull: false min: 000976f7-7075-4f3f-a564-5a375fafcc101416a2b7-7f64-41b7-851f-97d15405037e max: fffd0642-5f01-48cd-8d97-3428faee49e9b39f2b4c-efdc-4e5f-9ab5-4aa5394cb156 sum: 720000 positions: 0,0,0,0,0 Entry 1: count: 2288 hasNull: false min: 00124556-8383-44c4-a28b-7a413de74ccc4137606f-2cf7-43fb-beff-b6d374fd15ec max: ffde3bce-bb56-4fa9-81d7-146ca2eab946225c18e0-0002-4d07-9853-12c92c0f5637 sum: 164736 positions: 306445,195712,0,9766,272 Row group indices for column 9: - Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 - Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258 + Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 + Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:43.64 max UTC: 1969-12-31 06:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258 Row group indices for column 10: - Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 - Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272 + Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 + Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272 Row group indices for column 11: Entry 0: count: 7140 hasNull: true true: 5115 positions: 0,0,0,0,0,0,0,0 Entry 1: count: 2034 hasNull: true true: 1023 positions: 0,126,98,0,0,520,126,4 @@ -493,11 +493,11 @@ Stripes: Entry 0: count: 6889 hasNull: true min: 0034fkcXMQI3 max: yyt0S8WorA sum: 109415 positions: 0,0,0,0,0,0,0 Entry 1: count: 2284 hasNull: true min: 004J8y max: yjDBo sum: 39719 positions: 0,168,8,0,0,13280,262 Row group indices for column 21: - Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 - Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258 + Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 + Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:43.64 max UTC: 1969-12-31 06:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258 Row group indices for column 22: - Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 - Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272 + Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 + Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272 Row group indices for column 23: Entry 0: count: 7140 hasNull: true true: 5115 positions: 0,0,0,0,0,0,0,0 Entry 1: count: 2034 hasNull: true true: 1023 positions: 0,126,98,0,0,520,126,4 @@ -529,11 +529,11 @@ Stripes: Entry 0: count: 6889 hasNull: true min: 0034fkcXMQI3 max: yyt0S8WorA sum: 109415 positions: 0,0,0,0,0,0,0 Entry 1: count: 2284 hasNull: true min: 004J8y max: yjDBo sum: 39719 positions: 0,168,8,0,0,13280,262 Row group indices for column 33: - Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 - Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258 + Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 + Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:43.64 max UTC: 1969-12-31 06:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258 Row group indices for column 34: - Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 - Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272 + Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 + Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272 Row group indices for column 35: Entry 0: count: 7140 hasNull: true true: 5115 positions: 0,0,0,0,0,0,0,0 Entry 1: count: 2034 hasNull: true true: 1023 positions: 0,126,98,0,0,520,126,4 @@ -729,11 +729,11 @@ Stripes: Entry 0: count: 10000 hasNull: false min: 000976f7-7075-4f3f-a564-5a375fafcc101416a2b7-7f64-41b7-851f-97d15405037e max: fffd0642-5f01-48cd-8d97-3428faee49e9b39f2b4c-efdc-4e5f-9ab5-4aa5394cb156 sum: 720000 positions: 0,0,0,0,0 Entry 1: count: 2288 hasNull: false min: 00124556-8383-44c4-a28b-7a413de74ccc4137606f-2cf7-43fb-beff-b6d374fd15ec max: ffde3bce-bb56-4fa9-81d7-146ca2eab946225c18e0-0002-4d07-9853-12c92c0f5637 sum: 164736 positions: 306445,195712,0,9766,272 Row group indices for column 9: - Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 - Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258 + Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 + Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:43.64 max UTC: 1969-12-31 06:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258 Row group indices for column 10: - Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 - Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272 + Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 + Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272 Row group indices for column 11: Entry 0: count: 7140 hasNull: true true: 5115 positions: 0,0,0,0,0,0,0,0 Entry 1: count: 2034 hasNull: true true: 1023 positions: 0,126,98,0,0,520,126,4 @@ -765,11 +765,11 @@ Stripes: Entry 0: count: 6889 hasNull: true min: 0034fkcXMQI3 max: yyt0S8WorA sum: 109415 positions: 0,0,0,0,0,0,0 Entry 1: count: 2284 hasNull: true min: 004J8y max: yjDBo sum: 39719 positions: 0,168,8,0,0,13280,262 Row group indices for column 21: - Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 - Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258 + Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 + Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:43.64 max UTC: 1969-12-31 06:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258 Row group indices for column 22: - Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 - Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272 + Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 + Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272 Row group indices for column 23: Entry 0: count: 7140 hasNull: true true: 5115 positions: 0,0,0,0,0,0,0,0 Entry 1: count: 2034 hasNull: true true: 1023 positions: 0,126,98,0,0,520,126,4 @@ -801,11 +801,11 @@ Stripes: Entry 0: count: 6889 hasNull: true min: 0034fkcXMQI3 max: yyt0S8WorA sum: 109415 positions: 0,0,0,0,0,0,0 Entry 1: count: 2284 hasNull: true min: 004J8y max: yjDBo sum: 39719 positions: 0,168,8,0,0,13280,262 Row group indices for column 33: - Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 - Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258 + Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 + Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:43.64 max UTC: 1969-12-31 06:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258 Row group indices for column 34: - Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 - Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272 + Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0 + Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 min UTC: 1969-12-31 05:59:30.929 max UTC: 1969-12-31 06:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272 Row group indices for column 35: Entry 0: count: 7140 hasNull: true true: 5115 positions: 0,0,0,0,0,0,0,0 Entry 1: count: 2034 hasNull: true true: 1023 positions: 0,126,98,0,0,520,126,4 @@ -813,7 +813,7 @@ Stripes: Entry 0: count: 6889 hasNull: true true: 3402 positions: 0,0,0,0,0,0,0,0 Entry 1: count: 2284 hasNull: true true: 581 positions: 0,168,8,0,0,520,97,1 -File length: 3007982 bytes +File length: 3007984 bytes Padding length: 0 bytes Padding ratio: 0% ________________________________________________________________________________________________________________________ diff --git a/ql/src/test/results/clientpositive/typechangetest.q.out b/ql/src/test/results/clientpositive/typechangetest.q.out index bbb53a09cd..5ca96a5d4f 100644 --- a/ql/src/test/results/clientpositive/typechangetest.q.out +++ b/ql/src/test/results/clientpositive/typechangetest.q.out @@ -1183,9 +1183,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 -2 1400-01-01 01:01:00.000000001 -3 1400-01-01 01:01:00.000000001 -4 1400-01-01 01:01:00.000000001 +2 1400-01-01 01:01:01.000000001 +3 1400-01-01 01:01:01.000000001 +4 1400-01-01 01:01:01.000000001 PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId PREHOOK: type: QUERY PREHOOK: Input: default@testaltcolorc_n0 @@ -1259,9 +1259,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 -2 1400-01-01 01:01:00.000000001 -3 1400-01-01 01:01:00.000000001 -4 1400-01-01 01:01:00.000000001 +2 1400-01-01 01:01:01.000000001 +3 1400-01-01 01:01:01.000000001 +4 1400-01-01 01:01:01.000000001 PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId PREHOOK: type: QUERY PREHOOK: Input: default@testaltcolorc_n0 @@ -1335,9 +1335,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 -2 1400-01-01 01:01:00.000000001 -3 1400-01-01 01:01:00.000000001 -4 1400-01-01 01:01:00.000000001 +2 1400-01-01 01:01:01.000000001 +3 1400-01-01 01:01:01.000000001 +4 1400-01-01 01:01:01.000000001 PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId PREHOOK: type: QUERY PREHOOK: Input: default@testaltcolorc_n0 @@ -1411,9 +1411,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 -2 1400-01-01 01:01:00.000000001 -3 1400-01-01 01:01:00.000000001 -4 1400-01-01 01:01:00.000000001 +2 1400-01-01 01:01:01.000000001 +3 1400-01-01 01:01:01.000000001 +4 1400-01-01 01:01:01.000000001 PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId PREHOOK: type: QUERY PREHOOK: Input: default@testaltcolorc_n0 diff --git a/ql/src/test/results/clientpositive/vector_case_when_1.q.out b/ql/src/test/results/clientpositive/vector_case_when_1.q.out index b2ac291877..66807acb16 100644 --- a/ql/src/test/results/clientpositive/vector_case_when_1.q.out +++ b/ql/src/test/results/clientpositive/vector_case_when_1.q.out @@ -199,15 +199,15 @@ STAGE PLANS: Map Operator Tree: TableScan alias: lineitem_test - Statistics: Num rows: 101 Data size: 78920 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_quantity (type: int), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN ('Many') ELSE ('Huge number') END (type: string), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN ('Many') ELSE (null) END (type: string), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN (null) ELSE (null) END (type: string), if((l_shipmode = 'SHIP '), date_add(l_shipdate, 10), date_add(l_shipdate, 5)) (type: date), CASE WHEN ((l_returnflag = 'N')) THEN ((l_extendedprice * (1.0D - l_discount))) ELSE (0) END (type: double), CASE WHEN ((l_returnflag = 'N')) THEN ((l_extendedprice * (1.0D - l_discount))) ELSE (0.0D) END (type: double), if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), null, l_tax) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, null) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax) (type: decimal(12,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0) (type: decimal(12,2)), if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0) (type: decimal(10,2)), if((l_partkey > 30), CAST( l_receiptdate AS TIMESTAMP), CAST( l_commitdate AS TIMESTAMP)) (type: timestamp), if((l_suppkey > 10000), datediff(l_receiptdate, l_commitdate), null) (type: int), if((l_suppkey > 10000), null, datediff(l_receiptdate, l_commitdate)) (type: int), if(((l_suppkey % 500) > 100), DATE'2009-01-01', DATE'2009-12-31') (type: date) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16 - Statistics: Num rows: 101 Data size: 78920 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + - Statistics: Num rows: 101 Data size: 78920 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: date), _col5 (type: double), _col6 (type: double), _col7 (type: decimal(10,2)), _col8 (type: decimal(10,2)), _col9 (type: decimal(12,2)), _col10 (type: decimal(12,2)), _col11 (type: decimal(10,2)), _col12 (type: decimal(10,2)), _col13 (type: timestamp), _col14 (type: int), _col15 (type: int), _col16 (type: date) Map Vectorization: enabled: true @@ -223,10 +223,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: date), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: decimal(10,2)), VALUE._col7 (type: decimal(10,2)), VALUE._col8 (type: decimal(12,2)), VALUE._col9 (type: decimal(12,2)), VALUE._col10 (type: decimal(10,2)), VALUE._col11 (type: decimal(10,2)), VALUE._col12 (type: timestamp), VALUE._col13 (type: int), VALUE._col14 (type: int), VALUE._col15 (type: date) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16 - Statistics: Num rows: 101 Data size: 78920 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 101 Data size: 78920 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -525,7 +525,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: lineitem_test - Statistics: Num rows: 101 Data size: 78920 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true vectorizationSchemaColumns: [0:l_orderkey:int, 1:l_partkey:int, 2:l_suppkey:int, 3:l_linenumber:int, 4:l_quantity:int, 5:l_extendedprice:double, 6:l_discount:double, 7:l_tax:decimal(10,2), 8:l_returnflag:char(1), 9:l_linestatus:char(1), 10:l_shipdate:date, 11:l_commitdate:date, 12:l_receiptdate:date, 13:l_shipinstruct:varchar(20), 14:l_shipmode:char(10), 15:l_comment:string, 16:ROW__ID:struct] @@ -537,7 +537,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [4, 22, 24, 25, 26, 27, 28, 30, 31, 32, 33, 34, 35, 38, 40, 43, 44] selectExpressions: IfExprStringScalarStringGroupColumn(col 17:boolean, val Singlecol 21:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, IfExprStringScalarStringGroupColumn(col 18:boolean, val Twocol 22:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 18:boolean, IfExprStringScalarStringGroupColumn(col 19:boolean, val Somecol 21:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 19:boolean, IfExprStringScalarStringScalar(col 20:boolean, val Many, val Huge number)(children: LongColLessLongScalar(col 4:int, val 100) -> 20:boolean) -> 21:string) -> 22:string) -> 21:string) -> 22:string, IfExprStringScalarStringGroupColumn(col 17:boolean, val Singlecol 23:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, IfExprStringScalarStringGroupColumn(col 18:boolean, val Twocol 24:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 18:boolean, IfExprStringScalarStringGroupColumn(col 19:boolean, val Somecol 23:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 19:boolean, IfExprColumnNull(col 20:boolean, col 21:string, null)(children: LongColLessLongScalar(col 4:int, val 100) -> 20:boolean, ConstantVectorExpression(val Many) -> 21:string) -> 23:string) -> 24:string) -> 23:string) -> 24:string, IfExprStringScalarStringGroupColumn(col 17:boolean, val Singlecol 23:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, IfExprStringScalarStringGroupColumn(col 18:boolean, val Twocol 25:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 18:boolean, IfExprStringScalarStringGroupColumn(col 19:boolean, val Somecol 23:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 19:boolean, IfExprNullNull(null, null) -> 23:string) -> 25:string) -> 23:string) -> 25:string, IfExprLongColumnLongColumn(col 17:boolean, col 18:date, col 19:date)(children: StringGroupColEqualCharScalar(col 14:char(10), val SHIP) -> 17:boolean, VectorUDFDateAddColScalar(col 10:date, val 10) -> 18:date, VectorUDFDateAddColScalar(col 10:date, val 5) -> 19:date) -> 26:date, IfExprDoubleColumnLongScalar(col 17:boolean, col 28:double, val 0)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 17:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 27:double)(children: DoubleScalarSubtractDoubleColumn(val 1.0, col 6:double) -> 27:double) -> 28:double) -> 27:double, IfExprDoubleColumnDoubleScalar(col 17:boolean, col 29:double, val 0.0)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 17:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 28:double)(children: DoubleScalarSubtractDoubleColumn(val 1.0, col 6:double) -> 28:double) -> 29:double) -> 28:double, IfExprNullColumn(col 17:boolean, null, col 7)(children: StringGroupColEqualStringScalar(col 23:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 17:boolean, col 7:decimal(10,2)) -> 30:decimal(10,2), IfExprColumnNull(col 18:boolean, col 7:decimal(10,2), null)(children: StringGroupColEqualStringScalar(col 23:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 18:boolean, col 7:decimal(10,2)) -> 31:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: StringGroupColEqualStringScalar(col 23:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 19:boolean) -> 32:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: StringGroupColEqualStringScalar(col 23:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 19:boolean) -> 33:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: StringGroupColEqualStringScalar(col 23:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 19:boolean) -> 34:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: StringGroupColEqualStringScalar(col 23:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 19:boolean) -> 35:decimal(10,2), IfExprTimestampColumnColumn(col 19:boolean, col 36:timestampcol 37:timestamp)(children: LongColGreaterLongScalar(col 1:int, val 30) -> 19:boolean, CastDateToTimestamp(col 12:date) -> 36:timestamp, CastDateToTimestamp(col 11:date) -> 37:timestamp) -> 38:timestamp, IfExprColumnNull(col 19:boolean, col 39:int, null)(children: LongColGreaterLongScalar(col 2:int, val 10000) -> 19:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 39:int) -> 40:int, IfExprNullColumn(col 41:boolean, null, col 42)(children: LongColGreaterLongScalar(col 2:int, val 10000) -> 41:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 42:int) -> 43:int, IfExprLongScalarLongScalar(col 45:boolean, val 14245, val 14609)(children: LongColGreaterLongScalar(col 44:int, val 100)(children: LongColModuloLongScalar(col 2:int, val 500) -> 44:int) -> 45:boolean) -> 44:date - Statistics: Num rows: 101 Data size: 78920 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + @@ -546,7 +546,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 101 Data size: 78920 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: date), _col5 (type: double), _col6 (type: double), _col7 (type: decimal(10,2)), _col8 (type: decimal(10,2)), _col9 (type: decimal(12,2)), _col10 (type: decimal(12,2)), _col11 (type: decimal(10,2)), _col12 (type: decimal(10,2)), _col13 (type: timestamp), _col14 (type: int), _col15 (type: int), _col16 (type: date) Execution mode: vectorized Map Vectorization: @@ -572,10 +572,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: date), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: decimal(10,2)), VALUE._col7 (type: decimal(10,2)), VALUE._col8 (type: decimal(12,2)), VALUE._col9 (type: decimal(12,2)), VALUE._col10 (type: decimal(10,2)), VALUE._col11 (type: decimal(10,2)), VALUE._col12 (type: timestamp), VALUE._col13 (type: int), VALUE._col14 (type: int), VALUE._col15 (type: date) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16 - Statistics: Num rows: 101 Data size: 78920 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 101 Data size: 78920 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -874,7 +874,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: lineitem_test - Statistics: Num rows: 101 Data size: 78920 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true vectorizationSchemaColumns: [0:l_orderkey:int, 1:l_partkey:int, 2:l_suppkey:int, 3:l_linenumber:int, 4:l_quantity:int, 5:l_extendedprice:double, 6:l_discount:double, 7:l_tax:decimal(10,2), 8:l_returnflag:char(1), 9:l_linestatus:char(1), 10:l_shipdate:date, 11:l_commitdate:date, 12:l_receiptdate:date, 13:l_shipinstruct:varchar(20), 14:l_shipmode:char(10), 15:l_comment:string, 16:ROW__ID:struct] @@ -886,7 +886,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [4, 27, 38, 48, 52, 54, 60, 63, 65, 67, 68, 69, 70, 73, 76, 79, 80] selectExpressions: IfExprColumnCondExpr(col 17:boolean, col 18:stringcol 26:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, ConstantVectorExpression(val Single) -> 18:string, IfExprColumnCondExpr(col 19:boolean, col 20:stringcol 25:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 19:boolean, ConstantVectorExpression(val Two) -> 20:string, IfExprColumnCondExpr(col 21:boolean, col 22:stringcol 24:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 21:boolean, ConstantVectorExpression(val Some) -> 22:string, IfExprStringScalarStringScalar(col 23:boolean, val Many, val Huge number)(children: LongColLessLongScalar(col 4:int, val 100) -> 23:boolean) -> 24:string) -> 25:string) -> 26:string) -> 27:string, IfExprColumnCondExpr(col 23:boolean, col 28:stringcol 37:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 23:boolean, ConstantVectorExpression(val Single) -> 28:string, IfExprColumnCondExpr(col 29:boolean, col 30:stringcol 36:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 29:boolean, ConstantVectorExpression(val Two) -> 30:string, IfExprColumnCondExpr(col 31:boolean, col 32:stringcol 35:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 31:boolean, ConstantVectorExpression(val Some) -> 32:string, IfExprColumnNull(col 33:boolean, col 34:string, null)(children: LongColLessLongScalar(col 4:int, val 100) -> 33:boolean, ConstantVectorExpression(val Many) -> 34:string) -> 35:string) -> 36:string) -> 37:string) -> 38:string, IfExprColumnCondExpr(col 39:boolean, col 40:stringcol 47:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 39:boolean, ConstantVectorExpression(val Single) -> 40:string, IfExprColumnCondExpr(col 41:boolean, col 42:stringcol 46:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 41:boolean, ConstantVectorExpression(val Two) -> 42:string, IfExprColumnCondExpr(col 43:boolean, col 44:stringcol 45:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 43:boolean, ConstantVectorExpression(val Some) -> 44:string, IfExprNullNull(null, null) -> 45:string) -> 46:string) -> 47:string) -> 48:string, IfExprCondExprCondExpr(col 49:boolean, col 50:datecol 51:date)(children: StringGroupColEqualCharScalar(col 14:char(10), val SHIP) -> 49:boolean, VectorUDFDateAddColScalar(col 10:date, val 10) -> 50:date, VectorUDFDateAddColScalar(col 10:date, val 5) -> 51:date) -> 52:date, IfExprDoubleColumnLongScalar(col 57:boolean, col 58:double, val 0)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 57:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 54:double)(children: DoubleScalarSubtractDoubleColumn(val 1.0, col 6:double) -> 54:double) -> 58:double) -> 54:double, IfExprCondExprColumn(col 57:boolean, col 59:double, col 58:double)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 57:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 58:double)(children: DoubleScalarSubtractDoubleColumn(val 1.0, col 6:double) -> 58:double) -> 59:double, ConstantVectorExpression(val 0.0) -> 58:double) -> 60:double, IfExprNullColumn(col 62:boolean, null, col 7)(children: StringGroupColEqualStringScalar(col 61:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 62:boolean, col 7:decimal(10,2)) -> 63:decimal(10,2), IfExprColumnNull(col 64:boolean, col 7:decimal(10,2), null)(children: StringGroupColEqualStringScalar(col 61:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 64:boolean, col 7:decimal(10,2)) -> 65:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: StringGroupColEqualStringScalar(col 61:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean) -> 67:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: StringGroupColEqualStringScalar(col 61:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean) -> 68:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: StringGroupColEqualStringScalar(col 61:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean) -> 69:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: StringGroupColEqualStringScalar(col 61:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean) -> 70:decimal(10,2), IfExprCondExprCondExpr(col 66:boolean, col 71:timestampcol 72:timestamp)(children: LongColGreaterLongScalar(col 1:int, val 30) -> 66:boolean, CastDateToTimestamp(col 12:date) -> 71:timestamp, CastDateToTimestamp(col 11:date) -> 72:timestamp) -> 73:timestamp, IfExprCondExprNull(col 74:boolean, col 75:int, null)(children: LongColGreaterLongScalar(col 2:int, val 10000) -> 74:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 75:int) -> 76:int, IfExprNullCondExpr(col 77:boolean, null, col 78:int)(children: LongColGreaterLongScalar(col 2:int, val 10000) -> 77:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 78:int) -> 79:int, IfExprLongScalarLongScalar(col 81:boolean, val 14245, val 14609)(children: LongColGreaterLongScalar(col 80:int, val 100)(children: LongColModuloLongScalar(col 2:int, val 500) -> 80:int) -> 81:boolean) -> 80:date - Statistics: Num rows: 101 Data size: 78920 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + @@ -895,7 +895,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 101 Data size: 78920 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: date), _col5 (type: double), _col6 (type: double), _col7 (type: decimal(10,2)), _col8 (type: decimal(10,2)), _col9 (type: decimal(12,2)), _col10 (type: decimal(12,2)), _col11 (type: decimal(10,2)), _col12 (type: decimal(10,2)), _col13 (type: timestamp), _col14 (type: int), _col15 (type: int), _col16 (type: date) Execution mode: vectorized Map Vectorization: @@ -921,10 +921,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: date), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: decimal(10,2)), VALUE._col7 (type: decimal(10,2)), VALUE._col8 (type: decimal(12,2)), VALUE._col9 (type: decimal(12,2)), VALUE._col10 (type: decimal(10,2)), VALUE._col11 (type: decimal(10,2)), VALUE._col12 (type: timestamp), VALUE._col13 (type: int), VALUE._col14 (type: int), VALUE._col15 (type: date) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16 - Statistics: Num rows: 101 Data size: 78920 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 101 Data size: 78920 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/vector_case_when_2.q.out b/ql/src/test/results/clientpositive/vector_case_when_2.q.out index 159c9831a5..b8a5214a17 100644 --- a/ql/src/test/results/clientpositive/vector_case_when_2.q.out +++ b/ql/src/test/results/clientpositive/vector_case_when_2.q.out @@ -129,15 +129,15 @@ STAGE PLANS: Map Operator Tree: TableScan alias: timestamps - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), CASE WHEN ((ctimestamp2 <= TIMESTAMP'1800-12-31 00:00:00.0')) THEN ('1800s or Earlier') WHEN ((ctimestamp2 < TIMESTAMP'1900-01-01 00:00:00.0')) THEN ('1900s') WHEN (ctimestamp2 BETWEEN TIMESTAMP'2006-01-01 00:00:00.0' AND TIMESTAMP'2010-12-31 23:59:59.999999999') THEN ('Late 2000s') WHEN ((ctimestamp2 <= TIMESTAMP'2015-12-31 23:59:59.999999999')) THEN ('Early 2010s') ELSE ('Unknown') END (type: string), CASE WHEN ((ctimestamp2 <= TIMESTAMP'2000-12-31 23:59:59.999999999')) THEN ('Old') WHEN ((ctimestamp2 < TIMESTAMP'2006-01-01 00:00:00.0')) THEN ('Early 2000s') WHEN (ctimestamp2 BETWEEN TIMESTAMP'2006-01-01 00:00:00.0' AND TIMESTAMP'2010-12-31 23:59:59.999999999') THEN ('Late 2000s') WHEN ((ctimestamp2 <= TIMESTAMP'2015-12-31 23:59:59.999999999')) THEN ('Early 2010s') ELSE (null) END (type: string), CASE WHEN ((ctimestamp2 <= TIMESTAMP'2000-12-31 23:59:59.999999999')) THEN ('Old') WHEN ((ctimestamp2 < TIMESTAMP'2006-01-01 00:00:00.0')) THEN ('Early 2000s') WHEN (ctimestamp2 BETWEEN TIMESTAMP'2006-01-01 00:00:00.0' AND TIMESTAMP'2010-12-31 23:59:59.999999999') THEN ('Late 2000s') WHEN ((ctimestamp2 <= TIMESTAMP'2015-12-31 23:59:59.999999999')) THEN (null) ELSE (null) END (type: string), if((ctimestamp1 < TIMESTAMP'1974-10-04 17:21:03.989'), year(ctimestamp1), year(ctimestamp2)) (type: int), CASE WHEN ((stimestamp1 like '%19%')) THEN (stimestamp1) ELSE (TIMESTAMP'2018-03-08 23:04:59.0') END (type: string), if((ctimestamp1 = TIMESTAMP'2021-09-24 03:18:32.413655165'), null, minute(ctimestamp1)) (type: int), if(((ctimestamp2 >= TIMESTAMP'5344-10-04 18:40:08.165') and (ctimestamp2 < TIMESTAMP'6631-11-13 16:31:29.702202248')), minute(ctimestamp1), null) (type: int), if(((UDFToDouble(ctimestamp1) % 500.0D) > 100.0D), date_add(cdate, 1), date_add(cdate, 365)) (type: date), stimestamp1 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: timestamp), _col10 (type: string), _col1 (type: timestamp) sort order: +++ - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: int), _col8 (type: int), _col9 (type: date) Map Vectorization: enabled: true @@ -153,10 +153,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: timestamp), KEY.reducesinkkey2 (type: timestamp), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int), VALUE._col4 (type: string), VALUE._col5 (type: int), VALUE._col6 (type: int), VALUE._col7 (type: date) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -365,7 +365,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: timestamps - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true vectorizationSchemaColumns: [0:cdate:date, 1:ctimestamp1:timestamp, 2:stimestamp1:string, 3:ctimestamp2:timestamp, 4:ROW__ID:struct] @@ -377,7 +377,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 3, 10, 12, 13, 14, 11, 7, 16, 23, 2] selectExpressions: IfExprStringScalarStringGroupColumn(col 5:boolean, val 1800s or Earliercol 9:string)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 1800-12-31 00:00:00.0) -> 5:boolean, IfExprStringScalarStringGroupColumn(col 6:boolean, val 1900scol 10:string)(children: TimestampColLessTimestampScalar(col 3:timestamp, val 1900-01-01 00:00:00.0) -> 6:boolean, IfExprStringScalarStringGroupColumn(col 7:boolean, val Late 2000scol 9:string)(children: VectorUDFAdaptor(ctimestamp2 BETWEEN TIMESTAMP'2006-01-01 00:00:00.0' AND TIMESTAMP'2010-12-31 23:59:59.999999999') -> 7:boolean, IfExprStringScalarStringScalar(col 8:boolean, val Early 2010s, val Unknown)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 2015-12-31 23:59:59.999999999) -> 8:boolean) -> 9:string) -> 10:string) -> 9:string) -> 10:string, IfExprStringScalarStringGroupColumn(col 5:boolean, val Oldcol 11:string)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 2000-12-31 23:59:59.999999999) -> 5:boolean, IfExprStringScalarStringGroupColumn(col 6:boolean, val Early 2000scol 12:string)(children: TimestampColLessTimestampScalar(col 3:timestamp, val 2006-01-01 00:00:00.0) -> 6:boolean, IfExprStringScalarStringGroupColumn(col 7:boolean, val Late 2000scol 11:string)(children: VectorUDFAdaptor(ctimestamp2 BETWEEN TIMESTAMP'2006-01-01 00:00:00.0' AND TIMESTAMP'2010-12-31 23:59:59.999999999') -> 7:boolean, IfExprColumnNull(col 8:boolean, col 9:string, null)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 2015-12-31 23:59:59.999999999) -> 8:boolean, ConstantVectorExpression(val Early 2010s) -> 9:string) -> 11:string) -> 12:string) -> 11:string) -> 12:string, IfExprStringScalarStringGroupColumn(col 5:boolean, val Oldcol 11:string)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 2000-12-31 23:59:59.999999999) -> 5:boolean, IfExprStringScalarStringGroupColumn(col 6:boolean, val Early 2000scol 13:string)(children: TimestampColLessTimestampScalar(col 3:timestamp, val 2006-01-01 00:00:00.0) -> 6:boolean, IfExprStringScalarStringGroupColumn(col 7:boolean, val Late 2000scol 11:string)(children: VectorUDFAdaptor(ctimestamp2 BETWEEN TIMESTAMP'2006-01-01 00:00:00.0' AND TIMESTAMP'2010-12-31 23:59:59.999999999') -> 7:boolean, IfExprNullNull(null, null) -> 11:string) -> 13:string) -> 11:string) -> 13:string, IfExprLongColumnLongColumn(col 5:boolean, col 6:int, col 7:int)(children: TimestampColLessTimestampScalar(col 1:timestamp, val 1974-10-04 17:21:03.989) -> 5:boolean, VectorUDFYearTimestamp(col 1:timestamp, field YEAR) -> 6:int, VectorUDFYearTimestamp(col 3:timestamp, field YEAR) -> 7:int) -> 14:int, VectorUDFAdaptor(CASE WHEN ((stimestamp1 like '%19%')) THEN (stimestamp1) ELSE (TIMESTAMP'2018-03-08 23:04:59.0') END)(children: SelectStringColLikeStringScalar(col 2:string) -> 5:boolean) -> 11:string, IfExprNullColumn(col 5:boolean, null, col 6)(children: TimestampColEqualTimestampScalar(col 1:timestamp, val 2021-09-24 03:18:32.413655165) -> 5:boolean, VectorUDFMinuteTimestamp(col 1:timestamp, field MINUTE) -> 6:int) -> 7:int, IfExprColumnNull(col 17:boolean, col 15:int, null)(children: ColAndCol(col 15:boolean, col 16:boolean)(children: TimestampColGreaterEqualTimestampScalar(col 3:timestamp, val 5344-10-04 18:40:08.165) -> 15:boolean, TimestampColLessTimestampScalar(col 3:timestamp, val 6631-11-13 16:31:29.702202248) -> 16:boolean) -> 17:boolean, VectorUDFMinuteTimestamp(col 1:timestamp, field MINUTE) -> 15:int) -> 16:int, IfExprLongColumnLongColumn(col 20:boolean, col 21:date, col 22:date)(children: DoubleColGreaterDoubleScalar(col 19:double, val 100.0)(children: DoubleColModuloDoubleScalar(col 18:double, val 500.0)(children: CastTimestampToDouble(col 1:timestamp) -> 18:double) -> 19:double) -> 20:boolean, VectorUDFDateAddColScalar(col 0:date, val 1) -> 21:date, VectorUDFDateAddColScalar(col 0:date, val 365) -> 22:date) -> 23:date - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: timestamp), _col10 (type: string), _col1 (type: timestamp) sort order: +++ @@ -386,7 +386,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: int), _col8 (type: int), _col9 (type: date) Execution mode: vectorized Map Vectorization: @@ -412,10 +412,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: timestamp), KEY.reducesinkkey2 (type: timestamp), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int), VALUE._col4 (type: string), VALUE._col5 (type: int), VALUE._col6 (type: int), VALUE._col7 (type: date) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -624,7 +624,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: timestamps - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true vectorizationSchemaColumns: [0:cdate:date, 1:ctimestamp1:timestamp, 2:stimestamp1:string, 3:ctimestamp2:timestamp, 4:ROW__ID:struct] @@ -636,7 +636,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 3, 15, 26, 36, 40, 42, 44, 46, 53, 2] selectExpressions: IfExprColumnCondExpr(col 5:boolean, col 6:stringcol 14:string)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 1800-12-31 00:00:00.0) -> 5:boolean, ConstantVectorExpression(val 1800s or Earlier) -> 6:string, IfExprColumnCondExpr(col 7:boolean, col 8:stringcol 13:string)(children: TimestampColLessTimestampScalar(col 3:timestamp, val 1900-01-01 00:00:00.0) -> 7:boolean, ConstantVectorExpression(val 1900s) -> 8:string, IfExprColumnCondExpr(col 9:boolean, col 10:stringcol 12:string)(children: VectorUDFAdaptor(ctimestamp2 BETWEEN TIMESTAMP'2006-01-01 00:00:00.0' AND TIMESTAMP'2010-12-31 23:59:59.999999999') -> 9:boolean, ConstantVectorExpression(val Late 2000s) -> 10:string, IfExprStringScalarStringScalar(col 11:boolean, val Early 2010s, val Unknown)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 2015-12-31 23:59:59.999999999) -> 11:boolean) -> 12:string) -> 13:string) -> 14:string) -> 15:string, IfExprColumnCondExpr(col 11:boolean, col 16:stringcol 25:string)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 2000-12-31 23:59:59.999999999) -> 11:boolean, ConstantVectorExpression(val Old) -> 16:string, IfExprColumnCondExpr(col 17:boolean, col 18:stringcol 24:string)(children: TimestampColLessTimestampScalar(col 3:timestamp, val 2006-01-01 00:00:00.0) -> 17:boolean, ConstantVectorExpression(val Early 2000s) -> 18:string, IfExprColumnCondExpr(col 19:boolean, col 20:stringcol 23:string)(children: VectorUDFAdaptor(ctimestamp2 BETWEEN TIMESTAMP'2006-01-01 00:00:00.0' AND TIMESTAMP'2010-12-31 23:59:59.999999999') -> 19:boolean, ConstantVectorExpression(val Late 2000s) -> 20:string, IfExprColumnNull(col 21:boolean, col 22:string, null)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 2015-12-31 23:59:59.999999999) -> 21:boolean, ConstantVectorExpression(val Early 2010s) -> 22:string) -> 23:string) -> 24:string) -> 25:string) -> 26:string, IfExprColumnCondExpr(col 27:boolean, col 28:stringcol 35:string)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 2000-12-31 23:59:59.999999999) -> 27:boolean, ConstantVectorExpression(val Old) -> 28:string, IfExprColumnCondExpr(col 29:boolean, col 30:stringcol 34:string)(children: TimestampColLessTimestampScalar(col 3:timestamp, val 2006-01-01 00:00:00.0) -> 29:boolean, ConstantVectorExpression(val Early 2000s) -> 30:string, IfExprColumnCondExpr(col 31:boolean, col 32:stringcol 33:string)(children: VectorUDFAdaptor(ctimestamp2 BETWEEN TIMESTAMP'2006-01-01 00:00:00.0' AND TIMESTAMP'2010-12-31 23:59:59.999999999') -> 31:boolean, ConstantVectorExpression(val Late 2000s) -> 32:string, IfExprNullNull(null, null) -> 33:string) -> 34:string) -> 35:string) -> 36:string, IfExprCondExprCondExpr(col 37:boolean, col 38:intcol 39:int)(children: TimestampColLessTimestampScalar(col 1:timestamp, val 1974-10-04 17:21:03.989) -> 37:boolean, VectorUDFYearTimestamp(col 1:timestamp, field YEAR) -> 38:int, VectorUDFYearTimestamp(col 3:timestamp, field YEAR) -> 39:int) -> 40:int, VectorUDFAdaptor(CASE WHEN ((stimestamp1 like '%19%')) THEN (stimestamp1) ELSE (TIMESTAMP'2018-03-08 23:04:59.0') END)(children: SelectStringColLikeStringScalar(col 2:string) -> 41:boolean) -> 42:string, IfExprNullCondExpr(col 41:boolean, null, col 43:int)(children: TimestampColEqualTimestampScalar(col 1:timestamp, val 2021-09-24 03:18:32.413655165) -> 41:boolean, VectorUDFMinuteTimestamp(col 1:timestamp, field MINUTE) -> 43:int) -> 44:int, IfExprCondExprNull(col 47:boolean, col 45:int, null)(children: ColAndCol(col 45:boolean, col 46:boolean)(children: TimestampColGreaterEqualTimestampScalar(col 3:timestamp, val 5344-10-04 18:40:08.165) -> 45:boolean, TimestampColLessTimestampScalar(col 3:timestamp, val 6631-11-13 16:31:29.702202248) -> 46:boolean) -> 47:boolean, VectorUDFMinuteTimestamp(col 1:timestamp, field MINUTE) -> 45:int) -> 46:int, IfExprCondExprCondExpr(col 50:boolean, col 51:datecol 52:date)(children: DoubleColGreaterDoubleScalar(col 49:double, val 100.0)(children: DoubleColModuloDoubleScalar(col 48:double, val 500.0)(children: CastTimestampToDouble(col 1:timestamp) -> 48:double) -> 49:double) -> 50:boolean, VectorUDFDateAddColScalar(col 0:date, val 1) -> 51:date, VectorUDFDateAddColScalar(col 0:date, val 365) -> 52:date) -> 53:date - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: timestamp), _col10 (type: string), _col1 (type: timestamp) sort order: +++ @@ -645,7 +645,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: int), _col8 (type: int), _col9 (type: date) Execution mode: vectorized Map Vectorization: @@ -671,10 +671,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: timestamp), KEY.reducesinkkey2 (type: timestamp), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int), VALUE._col4 (type: string), VALUE._col5 (type: int), VALUE._col6 (type: int), VALUE._col7 (type: date) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/vector_char_2.q.out b/ql/src/test/results/clientpositive/vector_char_2.q.out index ae9910dff1..97038ee0c4 100644 --- a/ql/src/test/results/clientpositive/vector_char_2.q.out +++ b/ql/src/test/results/clientpositive/vector_char_2.q.out @@ -84,7 +84,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: char_2_n0 - Statistics: Num rows: 501 Data size: 99168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 99000 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -95,7 +95,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 3] selectExpressions: CastStringToLong(col 0:char(10)) -> 3:int - Statistics: Num rows: 501 Data size: 99168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 99000 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col1), count() Group By Vectorization: @@ -109,7 +109,7 @@ STAGE PLANS: keys: _col0 (type: char(20)) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 501 Data size: 99168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 99000 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: char(20)) sort order: + @@ -119,7 +119,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 501 Data size: 99168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 99000 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 value expressions: _col1 (type: bigint), _col2 (type: bigint) Execution mode: vectorized @@ -142,7 +142,7 @@ STAGE PLANS: keys: KEY._col0 (type: char(20)) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 49485 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 49401 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -164,7 +164,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 250 Data size: 49485 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 49401 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 value expressions: _col1 (type: bigint), _col2 (type: bigint) Execution mode: vectorized @@ -185,7 +185,7 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: char(20)), VALUE._col0 (type: bigint), VALUE._col1 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 49485 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 49401 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 5 Statistics: Num rows: 5 Data size: 985 Basic stats: COMPLETE Column stats: NONE @@ -272,7 +272,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: char_2_n0 - Statistics: Num rows: 501 Data size: 99168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 99000 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -283,7 +283,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 3] selectExpressions: CastStringToLong(col 0:char(10)) -> 3:int - Statistics: Num rows: 501 Data size: 99168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 99000 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col1), count() Group By Vectorization: @@ -297,7 +297,7 @@ STAGE PLANS: keys: _col0 (type: char(20)) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 501 Data size: 99168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 99000 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: char(20)) sort order: - @@ -307,7 +307,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 501 Data size: 99168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 99000 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 value expressions: _col1 (type: bigint), _col2 (type: bigint) Execution mode: vectorized @@ -330,7 +330,7 @@ STAGE PLANS: keys: KEY._col0 (type: char(20)) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 49485 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 49401 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -352,7 +352,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 250 Data size: 49485 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 49401 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 value expressions: _col1 (type: bigint), _col2 (type: bigint) Execution mode: vectorized @@ -373,7 +373,7 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: char(20)), VALUE._col0 (type: bigint), VALUE._col1 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 49485 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 49401 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 5 Statistics: Num rows: 5 Data size: 985 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/vector_coalesce_2.q.out b/ql/src/test/results/clientpositive/vector_coalesce_2.q.out index 48d38c316e..c42d295920 100644 --- a/ql/src/test/results/clientpositive/vector_coalesce_2.q.out +++ b/ql/src/test/results/clientpositive/vector_coalesce_2.q.out @@ -52,7 +52,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: str_str_orc - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -63,7 +63,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 5] selectExpressions: CastStringToLong(col 4:string)(children: VectorCoalesce(columns [0, 3])(children: col 0:string, ConstantVectorExpression(val 0) -> 3:string) -> 4:string) -> 5:int - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col1) Group By Vectorization: @@ -77,7 +77,7 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + @@ -87,7 +87,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Execution mode: vectorized Map Vectorization: @@ -109,14 +109,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 271 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), round((UDFToDouble(_col1) / 60.0D), 2) (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 271 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 271 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -167,7 +167,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: str_str_orc - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -178,13 +178,13 @@ STAGE PLANS: native: true projectedOutputColumnNums: [4] selectExpressions: VectorCoalesce(columns [0, 3])(children: col 0:string, ConstantVectorExpression(val 0) -> 3:string) -> 4:string - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -247,7 +247,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: str_str_orc - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -258,7 +258,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 5] selectExpressions: CastStringToLong(col 4:string)(children: VectorCoalesce(columns [0, 3])(children: col 0:string, ConstantVectorExpression(val 0) -> 3:string) -> 4:string) -> 5:int - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col1) Group By Vectorization: @@ -272,7 +272,7 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + @@ -282,7 +282,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Execution mode: vectorized Map Vectorization: @@ -304,14 +304,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 271 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), round((UDFToDouble(_col1) / 60.0D), 2) (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 271 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 271 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -362,7 +362,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: str_str_orc - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -373,13 +373,13 @@ STAGE PLANS: native: true projectedOutputColumnNums: [4] selectExpressions: VectorCoalesce(columns [0, 3])(children: col 0:string, ConstantVectorExpression(val 0) -> 3:string) -> 4:string - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/vector_data_types.q.out b/ql/src/test/results/clientpositive/vector_data_types.q.out index c2a2fce075..b72340da5f 100644 --- a/ql/src/test/results/clientpositive/vector_data_types.q.out +++ b/ql/src/test/results/clientpositive/vector_data_types.q.out @@ -128,22 +128,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over1korc_n1 - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float), d (type: double), bo (type: boolean), s (type: string), ts (type: timestamp), dec (type: decimal(4,2)), bin (type: binary) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int) sort order: +++ - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: boolean), _col7 (type: string), _col8 (type: timestamp), _col9 (type: decimal(4,2)), _col10 (type: binary) Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: smallint), KEY.reducesinkkey2 (type: int), VALUE._col0 (type: bigint), VALUE._col1 (type: float), VALUE._col2 (type: double), VALUE._col3 (type: boolean), VALUE._col4 (type: string), VALUE._col5 (type: timestamp), VALUE._col6 (type: decimal(4,2)), VALUE._col7 (type: binary) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 20 Statistics: Num rows: 20 Data size: 5920 Basic stats: COMPLETE Column stats: NONE @@ -218,7 +218,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over1korc_n1 - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -228,7 +228,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int) sort order: +++ @@ -237,7 +237,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: boolean), _col7 (type: string), _col8 (type: timestamp), _col9 (type: decimal(4,2)), _col10 (type: binary) Execution mode: vectorized @@ -258,7 +258,7 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: smallint), KEY.reducesinkkey2 (type: int), VALUE._col0 (type: bigint), VALUE._col1 (type: float), VALUE._col2 (type: double), VALUE._col3 (type: boolean), VALUE._col4 (type: string), VALUE._col5 (type: timestamp), VALUE._col6 (type: decimal(4,2)), VALUE._col7 (type: binary) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 20 Statistics: Num rows: 20 Data size: 5920 Basic stats: COMPLETE Column stats: NONE @@ -326,7 +326,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over1korc_n1 - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -337,7 +337,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [12] selectExpressions: VectorUDFAdaptor(hash(t,si,i,b,f,d,bo,s,ts,dec,bin)) -> 12:int - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col0) Group By Vectorization: diff --git a/ql/src/test/results/clientpositive/vector_interval_1.q.out b/ql/src/test/results/clientpositive/vector_interval_1.q.out index 8c0086e300..70b7c6679e 100644 --- a/ql/src/test/results/clientpositive/vector_interval_1.q.out +++ b/ql/src/test/results/clientpositive/vector_interval_1.q.out @@ -80,7 +80,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: vector_interval_1 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -91,7 +91,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [2, 5, 6] selectExpressions: CastStringToIntervalYearMonth(col 2:string) -> 5:interval_year_month, CastStringToIntervalDayTime(col 3:string) -> 6:interval_day_time - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + @@ -100,7 +100,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: interval_year_month), _col2 (type: interval_day_time) Execution mode: vectorized Map Vectorization: @@ -120,10 +120,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: string), INTERVAL'1-2' (type: interval_year_month), VALUE._col0 (type: interval_year_month), INTERVAL'1 02:03:04.000000000' (type: interval_day_time), VALUE._col1 (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -191,7 +191,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: vector_interval_1 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -202,7 +202,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 7, 6, 9, 8] selectExpressions: IntervalYearMonthColAddIntervalYearMonthColumn(col 5:interval_year_month, col 6:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 5:interval_year_month, CastStringToIntervalYearMonth(col 2:string) -> 6:interval_year_month) -> 7:interval_year_month, IntervalYearMonthScalarAddIntervalYearMonthColumn(val 14, col 5:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 5:interval_year_month) -> 6:interval_year_month, IntervalYearMonthColSubtractIntervalYearMonthColumn(col 5:interval_year_month, col 8:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 5:interval_year_month, CastStringToIntervalYearMonth(col 2:string) -> 8:interval_year_month) -> 9:interval_year_month, IntervalYearMonthScalarSubtractIntervalYearMonthColumn(val 14, col 5:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 5:interval_year_month) -> 8:interval_year_month - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: date) sort order: + @@ -211,7 +211,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: interval_year_month), _col2 (type: interval_year_month), _col3 (type: interval_year_month), _col4 (type: interval_year_month) Execution mode: vectorized Map Vectorization: @@ -231,10 +231,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: date), INTERVAL'2-4' (type: interval_year_month), VALUE._col0 (type: interval_year_month), VALUE._col1 (type: interval_year_month), INTERVAL'0-0' (type: interval_year_month), VALUE._col2 (type: interval_year_month), VALUE._col3 (type: interval_year_month) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -310,7 +310,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: vector_interval_1 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -321,7 +321,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 7, 6, 9, 8] selectExpressions: IntervalDayTimeColAddIntervalDayTimeColumn(col 5:interval_day_time, col 6:interval_day_time)(children: CastStringToIntervalDayTime(col 3:string) -> 5:interval_day_time, CastStringToIntervalDayTime(col 3:string) -> 6:interval_day_time) -> 7:interval_day_time, IntervalDayTimeScalarAddIntervalDayTimeColumn(val 1 02:03:04.000000000, col 5:interval_day_time)(children: CastStringToIntervalDayTime(col 3:string) -> 5:interval_day_time) -> 6:interval_day_time, IntervalDayTimeColSubtractIntervalDayTimeColumn(col 5:interval_day_time, col 8:interval_day_time)(children: CastStringToIntervalDayTime(col 3:string) -> 5:interval_day_time, CastStringToIntervalDayTime(col 3:string) -> 8:interval_day_time) -> 9:interval_day_time, IntervalDayTimeScalarSubtractIntervalDayTimeColumn(val 1 02:03:04.000000000, col 5:interval_day_time)(children: CastStringToIntervalDayTime(col 3:string) -> 5:interval_day_time) -> 8:interval_day_time - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: date) sort order: + @@ -330,7 +330,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time), _col4 (type: interval_day_time) Execution mode: vectorized Map Vectorization: @@ -350,10 +350,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: date), INTERVAL'2 04:06:08.000000000' (type: interval_day_time), VALUE._col0 (type: interval_day_time), VALUE._col1 (type: interval_day_time), INTERVAL'0 00:00:00.000000000' (type: interval_day_time), VALUE._col2 (type: interval_day_time), VALUE._col3 (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -441,7 +441,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: vector_interval_1 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -452,7 +452,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 5, 7, 6, 9, 8, 11, 12, 14, 15, 16, 17, 18] selectExpressions: DateColAddIntervalYearMonthScalar(col 1:date, val 1-2) -> 5:date, DateColAddIntervalYearMonthColumn(col 1:date, col 6:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 6:interval_year_month) -> 7:date, IntervalYearMonthScalarAddDateColumn(val 1-2, col 1:interval_year_month) -> 6:date, IntervalYearMonthColAddDateColumn(col 8:interval_year_month, col 1:date)(children: CastStringToIntervalYearMonth(col 2:string) -> 8:interval_year_month) -> 9:date, DateColSubtractIntervalYearMonthScalar(col 1:date, val 1-2) -> 8:date, DateColSubtractIntervalYearMonthColumn(col 1:date, col 10:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 10:interval_year_month) -> 11:date, DateColAddIntervalDayTimeScalar(col 1:date, val 1 02:03:04.000000000) -> 12:timestamp, DateColAddIntervalDayTimeColumn(col 1:date, col 13:interval_day_time)(children: CastStringToIntervalDayTime(col 3:string) -> 13:interval_day_time) -> 14:timestamp, IntervalDayTimeScalarAddDateColumn(val 1 02:03:04.000000000, col 1:date) -> 15:timestamp, IntervalDayTimeColAddDateColumn(col 13:interval_day_time, col 1:date)(children: CastStringToIntervalDayTime(col 3:string) -> 13:interval_day_time) -> 16:timestamp, DateColSubtractIntervalDayTimeScalar(col 1:date, val 1 02:03:04.000000000) -> 17:timestamp, DateColSubtractIntervalDayTimeColumn(col 1:date, col 13:interval_day_time)(children: CastStringToIntervalDayTime(col 3:string) -> 13:interval_day_time) -> 18:timestamp - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: date) sort order: + @@ -461,7 +461,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: date), _col2 (type: date), _col3 (type: date), _col4 (type: date), _col5 (type: date), _col6 (type: date), _col7 (type: timestamp), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: timestamp), _col11 (type: timestamp), _col12 (type: timestamp) Execution mode: vectorized Map Vectorization: @@ -481,10 +481,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: date), VALUE._col1 (type: date), VALUE._col2 (type: date), VALUE._col3 (type: date), VALUE._col4 (type: date), VALUE._col5 (type: date), VALUE._col6 (type: timestamp), VALUE._col7 (type: timestamp), VALUE._col8 (type: timestamp), VALUE._col9 (type: timestamp), VALUE._col10 (type: timestamp), VALUE._col11 (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -584,7 +584,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: vector_interval_1 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -595,7 +595,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [0, 5, 7, 8, 9, 10, 11, 12, 14, 15, 16, 17, 18] selectExpressions: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-2) -> 5:timestamp, TimestampColAddIntervalYearMonthColumn(col 0:timestamp, col 6:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 6:interval_year_month) -> 7:timestamp, IntervalYearMonthScalarAddTimestampColumn(val 1-2, col 0:interval_year_month) -> 8:timestamp, IntervalYearMonthColAddTimestampColumn(col 6:interval_year_month, col 0:timestamp)(children: CastStringToIntervalYearMonth(col 2:string) -> 6:interval_year_month) -> 9:timestamp, TimestampColSubtractIntervalYearMonthScalar(col 0:timestamp, val 1-2) -> 10:timestamp, TimestampColSubtractIntervalYearMonthColumn(col 0:timestamp, col 6:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 6:interval_year_month) -> 11:timestamp, TimestampColAddIntervalDayTimeScalar(col 0:timestamp, val 1 02:03:04.000000000) -> 12:timestamp, TimestampColAddIntervalDayTimeColumn(col 0:timestamp, col 13:interval_day_time)(children: CastStringToIntervalDayTime(col 3:string) -> 13:interval_day_time) -> 14:timestamp, IntervalDayTimeScalarAddTimestampColumn(val 1 02:03:04.000000000, col 0:timestamp) -> 15:timestamp, IntervalDayTimeColAddTimestampColumn(col 13:interval_day_time, col 0:timestamp)(children: CastStringToIntervalDayTime(col 3:string) -> 13:interval_day_time) -> 16:timestamp, TimestampColSubtractIntervalDayTimeScalar(col 0:timestamp, val 1 02:03:04.000000000) -> 17:timestamp, TimestampColSubtractIntervalDayTimeColumn(col 0:timestamp, col 13:interval_day_time)(children: CastStringToIntervalDayTime(col 3:string) -> 13:interval_day_time) -> 18:timestamp - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: timestamp) sort order: + @@ -604,7 +604,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: timestamp), _col2 (type: timestamp), _col3 (type: timestamp), _col4 (type: timestamp), _col5 (type: timestamp), _col6 (type: timestamp), _col7 (type: timestamp), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: timestamp), _col11 (type: timestamp), _col12 (type: timestamp) Execution mode: vectorized Map Vectorization: @@ -624,10 +624,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: timestamp), VALUE._col0 (type: timestamp), VALUE._col1 (type: timestamp), VALUE._col2 (type: timestamp), VALUE._col3 (type: timestamp), VALUE._col4 (type: timestamp), VALUE._col5 (type: timestamp), VALUE._col6 (type: timestamp), VALUE._col7 (type: timestamp), VALUE._col8 (type: timestamp), VALUE._col9 (type: timestamp), VALUE._col10 (type: timestamp), VALUE._col11 (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -709,7 +709,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: vector_interval_1 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -720,7 +720,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [0, 5, 6, 7] selectExpressions: TimestampColSubtractTimestampColumn(col 0:timestamp, col 0:timestamp) -> 5:interval_day_time, TimestampScalarSubtractTimestampColumn(val 2001-01-01 01:02:03.0, col 0:timestamp) -> 6:interval_day_time, TimestampColSubtractTimestampScalar(col 0:timestamp, val 2001-01-01 01:02:03.0) -> 7:interval_day_time - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: timestamp) sort order: + @@ -729,7 +729,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time) Execution mode: vectorized Map Vectorization: @@ -749,10 +749,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: timestamp), VALUE._col0 (type: interval_day_time), VALUE._col1 (type: interval_day_time), VALUE._col2 (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -816,7 +816,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: vector_interval_1 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -827,7 +827,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 5, 6, 7] selectExpressions: DateColSubtractDateColumn(col 1:date, col 1:date) -> 5:interval_day_time, DateScalarSubtractDateColumn(val 2001-01-01 00:00:00.0, col 1:date) -> 6:interval_day_time, DateColSubtractDateScalar(col 1:date, val 2001-01-01 00:00:00.0) -> 7:interval_day_time - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: date) sort order: + @@ -836,7 +836,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time) Execution mode: vectorized Map Vectorization: @@ -856,10 +856,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: interval_day_time), VALUE._col1 (type: interval_day_time), VALUE._col2 (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -929,7 +929,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: vector_interval_1 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -940,7 +940,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 5, 6, 7, 8, 9, 10] selectExpressions: TimestampColSubtractDateColumn(col 0:timestamp, col 1:date) -> 5:interval_day_time, TimestampScalarSubtractDateColumn(val 2001-01-01 01:02:03.0, col 1:date) -> 6:interval_day_time, TimestampColSubtractDateScalar(col 0:timestamp, val 2001-01-01 00:00:00.0) -> 7:interval_day_time, DateColSubtractTimestampColumn(col 1:date, col 0:timestamp) -> 8:interval_day_time, DateColSubtractTimestampScalar(col 1:date, val 2001-01-01 01:02:03.0) -> 9:interval_day_time, DateScalarSubtractTimestampColumn(val 2001-01-01 00:00:00.0, col 0:timestamp) -> 10:interval_day_time - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: date) sort order: + @@ -949,7 +949,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time), _col4 (type: interval_day_time), _col5 (type: interval_day_time), _col6 (type: interval_day_time) Execution mode: vectorized Map Vectorization: @@ -969,10 +969,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: interval_day_time), VALUE._col1 (type: interval_day_time), VALUE._col2 (type: interval_day_time), VALUE._col3 (type: interval_day_time), VALUE._col4 (type: interval_day_time), VALUE._col5 (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out index 01e915b8f1..9f90e8262e 100644 --- a/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out +++ b/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out @@ -253,7 +253,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -264,7 +264,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 3, 14, 15, 16, 17, 18] selectExpressions: VectorUDFUnixTimeStampTimestamp(col 1:timestamp) -> 5:bigint, VectorUDFYearTimestamp(col 1:timestamp, field YEAR) -> 6:int, VectorUDFMonthTimestamp(col 1:timestamp, field MONTH) -> 7:int, VectorUDFDayOfMonthTimestamp(col 1:timestamp, field DAY_OF_MONTH) -> 8:int, VectorUDFDayOfMonthTimestamp(col 1:timestamp, field DAY_OF_MONTH) -> 9:int, VectorUDFWeekOfYearTimestamp(col 1:timestamp, field WEEK_OF_YEAR) -> 10:int, VectorUDFHourTimestamp(col 1:timestamp, field HOUR_OF_DAY) -> 11:int, VectorUDFMinuteTimestamp(col 1:timestamp, field MINUTE) -> 12:int, VectorUDFSecondTimestamp(col 1:timestamp, field SECOND) -> 13:int, IfExprTimestampColumnScalar(col 0:boolean, col 1:timestamp, val 1319-02-02 16:31:57.778) -> 14:timestamp, IfExprTimestampScalarColumn(col 0:boolean, val 2000-12-18 08:42:30.0005, col 1:timestamp) -> 15:timestamp, IfExprTimestampColumnColumn(col 0:boolean, col 1:timestampcol 3:timestamp) -> 16:timestamp, IfExprColumnNull(col 0:boolean, col 1:timestamp, null)(children: col 0:boolean, col 1:timestamp) -> 17:timestamp, IfExprNullColumn(col 0:boolean, null, col 3)(children: col 0:boolean, col 3:timestamp) -> 18:timestamp - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: bigint) sort order: + @@ -273,7 +273,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int), _col9 (type: boolean), _col10 (type: timestamp), _col11 (type: timestamp), _col12 (type: timestamp), _col13 (type: timestamp), _col14 (type: timestamp), _col15 (type: timestamp), _col16 (type: timestamp) Execution mode: vectorized Map Vectorization: @@ -293,10 +293,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: bigint), VALUE._col0 (type: int), VALUE._col1 (type: int), VALUE._col2 (type: int), VALUE._col3 (type: int), VALUE._col4 (type: int), VALUE._col5 (type: int), VALUE._col6 (type: int), VALUE._col7 (type: int), VALUE._col8 (type: boolean), VALUE._col9 (type: timestamp), VALUE._col10 (type: timestamp), VALUE._col11 (type: timestamp), VALUE._col12 (type: timestamp), VALUE._col13 (type: timestamp), VALUE._col14 (type: timestamp), VALUE._col15 (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16 - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -446,7 +446,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -457,7 +457,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [5, 6, 7, 8, 9, 10, 11, 12, 13] selectExpressions: VectorUDFUnixTimeStampString(col 2:string) -> 5:bigint, VectorUDFYearString(col 2:string, fieldStart 0, fieldLength 4) -> 6:int, VectorUDFMonthString(col 2:string, fieldStart 5, fieldLength 2) -> 7:int, VectorUDFDayOfMonthString(col 2:string, fieldStart 8, fieldLength 2) -> 8:int, VectorUDFDayOfMonthString(col 2:string, fieldStart 8, fieldLength 2) -> 9:int, VectorUDFWeekOfYearString(col 2:string) -> 10:int, VectorUDFHourString(col 2:string, fieldStart 11, fieldLength 2) -> 11:int, VectorUDFMinuteString(col 2:string, fieldStart 14, fieldLength 2) -> 12:int, VectorUDFSecondString(col 2:string, fieldStart 17, fieldLength 2) -> 13:int - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: bigint) sort order: + @@ -466,7 +466,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int) Execution mode: vectorized Map Vectorization: @@ -486,10 +486,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: bigint), VALUE._col0 (type: int), VALUE._col1 (type: int), VALUE._col2 (type: int), VALUE._col3 (type: int), VALUE._col4 (type: int), VALUE._col5 (type: int), VALUE._col6 (type: int), VALUE._col7 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -623,7 +623,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -634,7 +634,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [7, 8, 9, 10, 11, 12, 13, 14, 15] selectExpressions: LongColEqualLongColumn(col 5:bigint, col 6:bigint)(children: VectorUDFUnixTimeStampTimestamp(col 1:timestamp) -> 5:bigint, VectorUDFUnixTimeStampString(col 2:string) -> 6:bigint) -> 7:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFYearTimestamp(col 1:timestamp, field YEAR) -> 5:int, VectorUDFYearString(col 2:string, fieldStart 0, fieldLength 4) -> 6:int) -> 8:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFMonthTimestamp(col 1:timestamp, field MONTH) -> 5:int, VectorUDFMonthString(col 2:string, fieldStart 5, fieldLength 2) -> 6:int) -> 9:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFDayOfMonthTimestamp(col 1:timestamp, field DAY_OF_MONTH) -> 5:int, VectorUDFDayOfMonthString(col 2:string, fieldStart 8, fieldLength 2) -> 6:int) -> 10:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFDayOfMonthTimestamp(col 1:timestamp, field DAY_OF_MONTH) -> 5:int, VectorUDFDayOfMonthString(col 2:string, fieldStart 8, fieldLength 2) -> 6:int) -> 11:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFWeekOfYearTimestamp(col 1:timestamp, field WEEK_OF_YEAR) -> 5:int, VectorUDFWeekOfYearString(col 2:string) -> 6:int) -> 12:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFHourTimestamp(col 1:timestamp, field HOUR_OF_DAY) -> 5:int, VectorUDFHourString(col 2:string, fieldStart 11, fieldLength 2) -> 6:int) -> 13:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFMinuteTimestamp(col 1:timestamp, field MINUTE) -> 5:int, VectorUDFMinuteString(col 2:string, fieldStart 14, fieldLength 2) -> 6:int) -> 14:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFSecondTimestamp(col 1:timestamp, field SECOND) -> 5:int, VectorUDFSecondString(col 2:string, fieldStart 17, fieldLength 2) -> 6:int) -> 15:boolean - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: boolean) sort order: + @@ -643,7 +643,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: boolean), _col2 (type: boolean), _col3 (type: boolean), _col4 (type: boolean), _col5 (type: boolean), _col6 (type: boolean), _col7 (type: boolean), _col8 (type: boolean) Execution mode: vectorized Map Vectorization: @@ -663,10 +663,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: boolean), VALUE._col0 (type: boolean), VALUE._col1 (type: boolean), VALUE._col2 (type: boolean), VALUE._col3 (type: boolean), VALUE._col4 (type: boolean), VALUE._col5 (type: boolean), VALUE._col6 (type: boolean), VALUE._col7 (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -916,7 +916,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -926,7 +926,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [1] - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: min(ctimestamp1), max(ctimestamp1), count(ctimestamp1), count() Group By Vectorization: @@ -1023,7 +1023,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -1033,7 +1033,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [1] - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(ctimestamp1) Group By Vectorization: @@ -1142,7 +1142,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -1153,7 +1153,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 5, 8] selectExpressions: CastTimestampToDouble(col 1:timestamp) -> 5:double, DoubleColMultiplyDoubleColumn(col 6:double, col 7:double)(children: CastTimestampToDouble(col 1:timestamp) -> 6:double, CastTimestampToDouble(col 1:timestamp) -> 7:double) -> 8:double - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col0), count(_col0), sum(_col2), sum(_col1) Group By Vectorization: diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml index 467e17ac18..7e4fefb31f 100644 --- a/standalone-metastore/pom.xml +++ b/standalone-metastore/pom.xml @@ -80,7 +80,7 @@ 0.9.3 2.8.2 1.10.19 - 1.4.3 + 1.5.0 2.5.0 1.3.0 2.7.0-SNAPSHOT diff --git a/streaming/src/test/org/apache/hive/streaming/TestStreaming.java b/streaming/src/test/org/apache/hive/streaming/TestStreaming.java index 042fdbe0e0..1f05d887ac 100644 --- a/streaming/src/test/org/apache/hive/streaming/TestStreaming.java +++ b/streaming/src/test/org/apache/hive/streaming/TestStreaming.java @@ -2077,15 +2077,15 @@ public void testFileDumpDeltaFilesWithStreamingOptimizations() throws Exception Assert.assertEquals(true, outDump.contains("Compression: NONE")); // no stats/indexes Assert.assertEquals(true, outDump.contains("Column 0: count: 0 hasNull: false")); - Assert.assertEquals(true, outDump.contains("Column 1: count: 0 hasNull: false sum: 0")); - Assert.assertEquals(true, outDump.contains("Column 2: count: 0 hasNull: false sum: 0")); - Assert.assertEquals(true, outDump.contains("Column 3: count: 0 hasNull: false sum: 0")); - Assert.assertEquals(true, outDump.contains("Column 4: count: 0 hasNull: false sum: 0")); - Assert.assertEquals(true, outDump.contains("Column 5: count: 0 hasNull: false sum: 0")); + Assert.assertEquals(true, outDump.contains("Column 1: count: 0 hasNull: false bytesOnDisk: 12 sum: 0")); + Assert.assertEquals(true, outDump.contains("Column 2: count: 0 hasNull: false bytesOnDisk: 12 sum: 0")); + Assert.assertEquals(true, outDump.contains("Column 3: count: 0 hasNull: false bytesOnDisk: 24 sum: 0")); + Assert.assertEquals(true, outDump.contains("Column 4: count: 0 hasNull: false bytesOnDisk: 14 sum: 0")); + Assert.assertEquals(true, outDump.contains("Column 5: count: 0 hasNull: false bytesOnDisk: 12 sum: 0")); Assert.assertEquals(true, outDump.contains("Column 6: count: 0 hasNull: false")); - Assert.assertEquals(true, outDump.contains("Column 7: count: 0 hasNull: false")); - Assert.assertEquals(true, outDump.contains("Column 8: count: 0 hasNull: false sum: 0")); - Assert.assertEquals(true, outDump.contains("Column 9: count: 0 hasNull: false")); + Assert.assertEquals(true, outDump.contains("Column 7: count: 0 hasNull: false bytesOnDisk: 11864")); + Assert.assertEquals(true, outDump.contains("Column 8: count: 0 hasNull: false bytesOnDisk: 2033 sum: 0")); + Assert.assertEquals(true, outDump.contains("Column 9: count: 0 hasNull: false bytesOnDisk: 13629")); // no dictionary Assert.assertEquals(true, outDump.contains("Encoding column 7: DIRECT_V2")); Assert.assertEquals(true, outDump.contains("Encoding column 9: DIRECT_V2"));