diff --git common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java index 7111dd8..e671edc 100644 --- common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java +++ common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hive.common; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -46,6 +48,14 @@ public static FileStatus[] getFileStatusRecurse(Path path, int level, FileSystem fs) throws IOException { + // if level is <0, the return all files/directories under the specified path + if ( level < 0) { + FileStatus fileStatus = fs.getFileStatus(path); + List result = new ArrayList(); + FileUtils.listStatusRecursively(fs, fileStatus, result); + return result.toArray(new FileStatus[result.size()]); + } + // construct a path pattern (e.g., /*/*) to find all dynamically generated paths StringBuilder sb = new StringBuilder(path.toUri().getPath()); for (int i = 0; i < level; i++) { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index 2fad510..2b86820 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -189,13 +189,8 @@ public static boolean updateUnpartitionedTableStatsFast(Database db, Table tbl, // Let's try to populate those stats that don't require full scan. LOG.info("Updating table stats fast for " + tbl.getTableName()); FileStatus[] fileStatus = wh.getFileStatusesForUnpartitionedTable(db, tbl); - params.put(StatsSetupConst.NUM_FILES, Integer.toString(fileStatus.length)); - long tableSize = 0L; - for (FileStatus status : fileStatus) { - tableSize += status.getLen(); - } - params.put(StatsSetupConst.TOTAL_SIZE, Long.toString(tableSize)); - LOG.info("Updated size of table " + tbl.getTableName() +" to "+ Long.toString(tableSize)); + populateQuickStats(fileStatus, params); + LOG.info("Updated size of table " + tbl.getTableName() +" to "+ params.get(StatsSetupConst.TOTAL_SIZE)); if(!params.containsKey(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK)) { // invalidate stats requiring scan since this is a regular ddl alter case for (String stat : StatsSetupConst.statsRequireCompute) { @@ -213,6 +208,20 @@ public static boolean updateUnpartitionedTableStatsFast(Database db, Table tbl, return updated; } + public static void populateQuickStats(FileStatus[] fileStatus, Map params) { + int numFiles = 0; + long tableSize = 0L; + for (FileStatus status : fileStatus) { + // don't take directories into account for quick stats + if (!status.isDir()) { + tableSize += status.getLen(); + numFiles += 1; + } + } + params.put(StatsSetupConst.NUM_FILES, Integer.toString(numFiles)); + params.put(StatsSetupConst.TOTAL_SIZE, Long.toString(tableSize)); + } + // check if stats need to be (re)calculated public static boolean requireCalStats(Configuration hiveConf, Partition oldPart, Partition newPart, Table tbl) { @@ -285,13 +294,8 @@ public static boolean updatePartitionStatsFast(Partition part, Warehouse wh, // populate those statistics that don't require a full scan of the data. LOG.warn("Updating partition stats fast for: " + part.getTableName()); FileStatus[] fileStatus = wh.getFileStatusesForSD(part.getSd()); - params.put(StatsSetupConst.NUM_FILES, Integer.toString(fileStatus.length)); - long partSize = 0L; - for (int i = 0; i < fileStatus.length; i++) { - partSize += fileStatus[i].getLen(); - } - params.put(StatsSetupConst.TOTAL_SIZE, Long.toString(partSize)); - LOG.warn("Updated size to " + Long.toString(partSize)); + populateQuickStats(fileStatus, params); + LOG.warn("Updated size to " + params.get(StatsSetupConst.TOTAL_SIZE)); if(!params.containsKey(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK)) { // invalidate stats requiring scan since this is a regular ddl alter case for (String stat : StatsSetupConst.statsRequireCompute) { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java index 31af90f..588020d 100755 --- metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -498,7 +498,8 @@ public static String makePartName(List partCols, FileSystem fileSys = path.getFileSystem(conf); /* consider sub-directory created from list bucketing. */ int listBucketingDepth = calculateListBucketingDMLDepth(desc); - return HiveStatsUtils.getFileStatusRecurse(path, (1 + listBucketingDepth), fileSys); + return HiveStatsUtils.getFileStatusRecurse(path, (1 + listBucketingDepth), + fileSys); } catch (IOException ioe) { MetaStoreUtils.logAndThrowMetaException(ioe); } @@ -516,12 +517,16 @@ private static int calculateListBucketingDMLDepth(StorageDescriptor desc) { // list bucketing will introduce more files int listBucketingDepth = 0; SkewedInfo skewedInfo = desc.getSkewedInfo(); + // we do not have to check the size of SkewedColValueLocationMap. + // if skewed column names and value are present and if location map + // is empty then the table/partition directory contains only the default + // list bucketing directory. Refer Hive.constructOneLBLocationMap(), it + // does not store the location of default list bucketing directory. if ((skewedInfo != null) && (skewedInfo.getSkewedColNames() != null) && (skewedInfo.getSkewedColNames().size() > 0) && (skewedInfo.getSkewedColValues() != null) && (skewedInfo.getSkewedColValues().size() > 0) - && (skewedInfo.getSkewedColValueLocationMaps() != null) - && (skewedInfo.getSkewedColValueLocationMaps().size() > 0)) { + && (skewedInfo.getSkewedColValueLocationMaps() != null)) { listBucketingDepth = skewedInfo.getSkewedColNames().size(); } return listBucketingDepth; @@ -537,7 +542,7 @@ private static int calculateListBucketingDMLDepth(StorageDescriptor desc) { Path tablePath = getTablePath(db, table.getTableName()); try { FileSystem fileSys = tablePath.getFileSystem(conf); - return HiveStatsUtils.getFileStatusRecurse(tablePath, 1, fileSys); + return HiveStatsUtils.getFileStatusRecurse(tablePath, -1, fileSys); } catch (IOException ioe) { MetaStoreUtils.logAndThrowMetaException(ioe); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java index bb2a3df..6922f89 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; @@ -326,12 +327,7 @@ private void updateQuickStats(Warehouse wh, Map parameters, * calculate fast statistics */ FileStatus[] partfileStatus = wh.getFileStatusesForSD(desc); - parameters.put(StatsSetupConst.NUM_FILES, String.valueOf(partfileStatus.length)); - long partSize = 0L; - for (int i = 0; i < partfileStatus.length; i++) { - partSize += partfileStatus[i].getLen(); - } - parameters.put(StatsSetupConst.TOTAL_SIZE, String.valueOf(partSize)); + MetaStoreUtils.populateQuickStats(partfileStatus, parameters); } private void clearStats(Map parameters) { diff --git ql/src/test/queries/clientnegative/stats_partialscan_autogether.q ql/src/test/queries/clientnegative/stats_partialscan_autogether.q index 47a8148..e12b450 100644 --- ql/src/test/queries/clientnegative/stats_partialscan_autogether.q +++ ql/src/test/queries/clientnegative/stats_partialscan_autogether.q @@ -13,7 +13,7 @@ set mapred.max.split.size=256; CREATE table analyze_srcpart_partial_scan (key STRING, value STRING) partitioned by (ds string, hr string) stored as rcfile; -insert overwrite table analyze_srcpart_partial_scan partition (ds, hr) select * from srcpart where ds is not null; +insert overwrite table analyze_srcpart_partial_scan partition (ds, hr) select * from srcpart where ds is not null order by key; describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11); diff --git ql/src/test/queries/clientpositive/stats_partscan_1.q ql/src/test/queries/clientpositive/stats_partscan_1.q index cdf92e4..2beeba1 100644 --- ql/src/test/queries/clientpositive/stats_partscan_1.q +++ ql/src/test/queries/clientpositive/stats_partscan_1.q @@ -18,7 +18,7 @@ set mapred.max.split.size=256; CREATE table analyze_srcpart_partial_scan (key STRING, value STRING) partitioned by (ds string, hr string) stored as rcfile; -insert overwrite table analyze_srcpart_partial_scan partition (ds, hr) select * from srcpart where ds is not null; +insert overwrite table analyze_srcpart_partial_scan partition (ds, hr) select * from srcpart where ds is not null order by key; describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11); set hive.stats.autogather=true; diff --git ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out index 1586dea..2fc0de0 100644 --- ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out +++ ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out @@ -15,7 +15,7 @@ stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@analyze_srcpart_partial_scan -PREHOOK: query: insert overwrite table analyze_srcpart_partial_scan partition (ds, hr) select * from srcpart where ds is not null +PREHOOK: query: insert overwrite table analyze_srcpart_partial_scan partition (ds, hr) select * from srcpart where ds is not null order by key PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -23,7 +23,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Output: default@analyze_srcpart_partial_scan -POSTHOOK: query: insert overwrite table analyze_srcpart_partial_scan partition (ds, hr) select * from srcpart where ds is not null +POSTHOOK: query: insert overwrite table analyze_srcpart_partial_scan partition (ds, hr) select * from srcpart where ds is not null order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -79,7 +79,7 @@ Partition Parameters: numFiles 1 numRows -1 rawDataSize -1 - totalSize 5293 + totalSize 5077 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/auto_join32.q.out ql/src/test/results/clientpositive/auto_join32.q.out index 54141fb..fb35ae4 100644 --- ql/src/test/results/clientpositive/auto_join32.q.out +++ ql/src/test/results/clientpositive/auto_join32.q.out @@ -269,7 +269,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 0 Data size: 16 Basic stats: PARTIAL Column stats: NONE Sorted Merge Bucket Map Join Operator condition map: Inner Join 0 to 1 diff --git ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out index d14347e..6e84a37 100644 --- ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out +++ ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out @@ -176,9 +176,9 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 80294704 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 79536648 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - Statistics: Num rows: 0 Data size: 80294704 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 79536648 Basic stats: PARTIAL Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash diff --git ql/src/test/results/clientpositive/groupby_sort_1.q.out ql/src/test/results/clientpositive/groupby_sort_1.q.out index 7aa3f92..6f929ad 100644 --- ql/src/test/results/clientpositive/groupby_sort_1.q.out +++ ql/src/test/results/clientpositive/groupby_sort_1.q.out @@ -5081,7 +5081,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} @@ -5103,7 +5103,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} @@ -5439,7 +5439,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} @@ -5461,7 +5461,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} @@ -5992,7 +5992,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} @@ -6014,7 +6014,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} @@ -6507,7 +6507,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} @@ -6529,7 +6529,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} @@ -7099,7 +7099,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} @@ -7121,7 +7121,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} diff --git ql/src/test/results/clientpositive/groupby_sort_skew_1.q.out ql/src/test/results/clientpositive/groupby_sort_skew_1.q.out index 1c6d233..b8bdc40 100644 --- ql/src/test/results/clientpositive/groupby_sort_skew_1.q.out +++ ql/src/test/results/clientpositive/groupby_sort_skew_1.q.out @@ -5466,7 +5466,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} @@ -5488,7 +5488,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} @@ -5887,7 +5887,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} @@ -5909,7 +5909,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} @@ -6440,7 +6440,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} @@ -6462,7 +6462,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} @@ -6955,7 +6955,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} @@ -6977,7 +6977,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} @@ -7547,7 +7547,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} @@ -7569,7 +7569,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.t2 - numFiles 1 + numFiles 2 numRows 6 rawDataSize 24 serialization.ddl struct t2 { string key, string val} diff --git ql/src/test/results/clientpositive/infer_bucket_sort_list_bucket.q.out ql/src/test/results/clientpositive/infer_bucket_sort_list_bucket.q.out index 0e7adef..24bf7a5 100644 --- ql/src/test/results/clientpositive/infer_bucket_sort_list_bucket.q.out +++ ql/src/test/results/clientpositive/infer_bucket_sort_list_bucket.q.out @@ -150,7 +150,7 @@ Partition Parameters: numFiles 1 numRows 309 rawDataSize 1482 - totalSize 136 + totalSize 1791 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/list_bucket_dml_6.q.out ql/src/test/results/clientpositive/list_bucket_dml_6.q.out index db5aea1..84f5c9a 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_6.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_6.q.out @@ -387,10 +387,10 @@ Protect Mode: None #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE true - numFiles 1 + numFiles 2 numRows 16 rawDataSize 136 - totalSize 102 + totalSize 310 #### A masked pattern was here #### # Storage Information @@ -878,7 +878,7 @@ Partition Parameters: numFiles 1 numRows 16 rawDataSize 136 - totalSize 102 + totalSize 254 #### A masked pattern was here #### # Storage Information @@ -1099,7 +1099,7 @@ STAGE PLANS: serialization.ddl struct list_bucketing_dynamic_part { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - totalSize 102 + totalSize 254 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe diff --git ql/src/test/results/clientpositive/list_bucket_dml_7.q.out ql/src/test/results/clientpositive/list_bucket_dml_7.q.out index 44349d5..a408b66 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_7.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_7.q.out @@ -335,10 +335,10 @@ Protect Mode: None #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE true - numFiles 1 + numFiles 2 numRows 16 rawDataSize 136 - totalSize 204 + totalSize 310 #### A masked pattern was here #### # Storage Information @@ -826,7 +826,7 @@ Partition Parameters: numFiles 1 numRows 16 rawDataSize 136 - totalSize 136 + totalSize 254 #### A masked pattern was here #### # Storage Information @@ -1047,7 +1047,7 @@ STAGE PLANS: serialization.ddl struct list_bucketing_dynamic_part { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - totalSize 136 + totalSize 254 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe diff --git ql/src/test/results/clientpositive/list_bucket_dml_8.q.out ql/src/test/results/clientpositive/list_bucket_dml_8.q.out index 6d59782..e0791d5 100644 --- ql/src/test/results/clientpositive/list_bucket_dml_8.q.out +++ ql/src/test/results/clientpositive/list_bucket_dml_8.q.out @@ -391,10 +391,10 @@ Protect Mode: None #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE true - numFiles 1 + numFiles 2 numRows 16 rawDataSize 136 - totalSize 102 + totalSize 310 #### A masked pattern was here #### # Storage Information @@ -506,8 +506,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 3 - numRows 984 - rawDataSize 9488 + numRows 0 + rawDataSize 0 totalSize 10586 #### A masked pattern was here #### @@ -656,7 +656,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.list_bucketing_dynamic_part - numFiles 1 + numFiles 2 numRows 16 partition_columns ds/hr partition_columns.types string:string @@ -664,7 +664,7 @@ STAGE PLANS: serialization.ddl struct list_bucketing_dynamic_part { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - totalSize 102 + totalSize 310 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe @@ -703,10 +703,10 @@ STAGE PLANS: #### A masked pattern was here #### name default.list_bucketing_dynamic_part numFiles 3 - numRows 984 + numRows 0 partition_columns ds/hr partition_columns.types string:string - rawDataSize 9488 + rawDataSize 0 serialization.ddl struct list_bucketing_dynamic_part { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe diff --git ql/src/test/results/clientpositive/mapjoin_test_outer.q.out ql/src/test/results/clientpositive/mapjoin_test_outer.q.out index b216b27..2ac219d 100644 --- ql/src/test/results/clientpositive/mapjoin_test_outer.q.out +++ ql/src/test/results/clientpositive/mapjoin_test_outer.q.out @@ -1135,7 +1135,7 @@ STAGE PLANS: src1 Fetch Operator limit: -1 - src2 + src3 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: @@ -1152,10 +1152,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - src2 + src3 TableScan - alias: src2 - Statistics: Num rows: 1 Data size: 13 Basic stats: COMPLETE Column stats: NONE + alias: src3 + Statistics: Num rows: 9 Data size: 40 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: 0 {key} {value} @@ -1170,8 +1170,8 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src3 - Statistics: Num rows: 9 Data size: 40 Basic stats: COMPLETE Column stats: NONE + alias: src2 + Statistics: Num rows: 1 Data size: 13 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Right Outer Join0 to 1 diff --git ql/src/test/results/clientpositive/nullgroup3.q.out ql/src/test/results/clientpositive/nullgroup3.q.out index 96b480c..4f6861a 100644 --- ql/src/test/results/clientpositive/nullgroup3.q.out +++ ql/src/test/results/clientpositive/nullgroup3.q.out @@ -127,9 +127,9 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tstparttbl2 - Statistics: Num rows: 0 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 16 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - Statistics: Num rows: 0 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 16 Basic stats: PARTIAL Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash @@ -319,9 +319,9 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tstparttbl2 - Statistics: Num rows: 0 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 16 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - Statistics: Num rows: 0 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 16 Basic stats: PARTIAL Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash diff --git ql/src/test/results/clientpositive/orc_createas1.q.out ql/src/test/results/clientpositive/orc_createas1.q.out index 6577bf0..4ad42e6 100644 --- ql/src/test/results/clientpositive/orc_createas1.q.out +++ ql/src/test/results/clientpositive/orc_createas1.q.out @@ -178,19 +178,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: orc_createas1b - Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 88318 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 88318 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + - Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 88318 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reduce Operator Tree: Extract - Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 88318 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 5 Statistics: Num rows: 5 Data size: 880 Basic stats: COMPLETE Column stats: NONE diff --git ql/src/test/results/clientpositive/ppd_join4.q.out ql/src/test/results/clientpositive/ppd_join4.q.out index eaa131c..2bc09ba 100644 --- ql/src/test/results/clientpositive/ppd_join4.q.out +++ ql/src/test/results/clientpositive/ppd_join4.q.out @@ -55,7 +55,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test_tbl - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 0 Data size: 8 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: ((name = 'c') and (id = 'a')) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE @@ -83,7 +83,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 0 Data size: 8 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (id = 'a') (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE diff --git ql/src/test/results/clientpositive/select_dummy_source.q.out ql/src/test/results/clientpositive/select_dummy_source.q.out index 207bd21..cb4eb55 100644 --- ql/src/test/results/clientpositive/select_dummy_source.q.out +++ ql/src/test/results/clientpositive/select_dummy_source.q.out @@ -15,14 +15,14 @@ STAGE PLANS: TableScan alias: _dummy_table Row Limit Per Split: 1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE Select Operator expressions: 'a' (type: string), 100 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -60,14 +60,14 @@ STAGE PLANS: TableScan alias: _dummy_table Row Limit Per Split: 1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE Select Operator expressions: (1 + 1) (type: int) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -105,17 +105,17 @@ STAGE PLANS: TableScan alias: _dummy_table Row Limit Per Split: 1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE Select Operator expressions: array('a','b') (type: array) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE UDTF Operator - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE function name: explode File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -152,11 +152,11 @@ STAGE PLANS: TableScan alias: _dummy_table Row Limit Per Split: 1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE Select Operator expressions: 'a' (type: string), 100 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE ListSink PREHOOK: query: select 'a', 100 @@ -185,11 +185,11 @@ STAGE PLANS: TableScan alias: _dummy_table Row Limit Per Split: 1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE Select Operator expressions: (1 + 1) (type: int) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE ListSink PREHOOK: query: select 1 + 1 @@ -218,17 +218,17 @@ STAGE PLANS: TableScan alias: _dummy_table Row Limit Per Split: 1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE Select Operator expressions: array('a','b') (type: array) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE UDTF Operator - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE function name: explode File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -267,14 +267,14 @@ STAGE PLANS: TableScan alias: _dummy_table Row Limit Per Split: 1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE Select Operator expressions: (2 + 3) (type: int), (1 + 2) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git ql/src/test/results/clientpositive/stats_list_bucket.q.out ql/src/test/results/clientpositive/stats_list_bucket.q.out index 6a16149..151e509 100644 --- ql/src/test/results/clientpositive/stats_list_bucket.q.out +++ ql/src/test/results/clientpositive/stats_list_bucket.q.out @@ -166,7 +166,7 @@ Table Parameters: numFiles 4 numRows 500 rawDataSize 4812 - totalSize 408 + totalSize 5522 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/stats_partscan_1_23.q.out ql/src/test/results/clientpositive/stats_partscan_1_23.q.out index 06fbf47..126c601 100644 --- ql/src/test/results/clientpositive/stats_partscan_1_23.q.out +++ ql/src/test/results/clientpositive/stats_partscan_1_23.q.out @@ -86,10 +86,10 @@ Protect Mode: None #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE false - numFiles 22 + numFiles 1 numRows -1 rawDataSize -1 - totalSize 6954 + totalSize 5293 #### A masked pattern was here #### # Storage Information @@ -185,10 +185,10 @@ Protect Mode: None #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE true - numFiles 22 + numFiles 1 numRows 500 rawDataSize 4812 - totalSize 6954 + totalSize 5293 #### A masked pattern was here #### # Storage Information @@ -235,10 +235,10 @@ Protect Mode: None #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE false - numFiles 22 + numFiles 1 numRows -1 rawDataSize -1 - totalSize 6954 + totalSize 5299 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/symlink_text_input_format.q.out ql/src/test/results/clientpositive/symlink_text_input_format.q.out index 7035a35..9091194 100644 --- ql/src/test/results/clientpositive/symlink_text_input_format.q.out +++ ql/src/test/results/clientpositive/symlink_text_input_format.q.out @@ -41,22 +41,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: symlink_text_input_format - Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 96 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 96 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ - Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 96 Basic stats: PARTIAL Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reduce Operator Tree: Extract - Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 96 Basic stats: PARTIAL Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 96 Basic stats: PARTIAL Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -104,22 +104,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: symlink_text_input_format - Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 96 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: value (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 96 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + - Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 96 Basic stats: PARTIAL Column stats: NONE value expressions: _col0 (type: string) Reduce Operator Tree: Extract - Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 96 Basic stats: PARTIAL Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 96 Basic stats: PARTIAL Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -167,9 +167,9 @@ STAGE PLANS: Map Operator Tree: TableScan alias: symlink_text_input_format - Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 96 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 96 Basic stats: PARTIAL Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash diff --git ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out index 090d105..29dded1 100644 --- ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out +++ ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out @@ -150,15 +150,18 @@ STAGE PLANS: partition values: part 1 properties: + COLUMN_STATS_ACCURATE true bucket_count -1 columns key,value + columns.comments columns.types string:string #### A masked pattern was here #### name default.test_tab numFiles 2 - numRows 500 + numRows 0 partition_columns part - rawDataSize 4812 + partition_columns.types string + rawDataSize 0 serialization.ddl struct test_tab { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe @@ -171,18 +174,15 @@ STAGE PLANS: properties: bucket_count -1 columns key,value + columns.comments columns.types string:string #### A masked pattern was here #### name default.test_tab - numFiles 2 - numPartitions 1 - numRows 500 partition_columns part - rawDataSize 4812 + partition_columns.types string serialization.ddl struct test_tab { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - totalSize 1761 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe name: default.test_tab @@ -291,15 +291,18 @@ STAGE PLANS: partition values: part 1 properties: + COLUMN_STATS_ACCURATE true bucket_count -1 columns key,value + columns.comments columns.types string:string #### A masked pattern was here #### name default.test_tab numFiles 2 - numRows 500 + numRows 0 partition_columns part - rawDataSize 4812 + partition_columns.types string + rawDataSize 0 serialization.ddl struct test_tab { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe @@ -312,18 +315,15 @@ STAGE PLANS: properties: bucket_count -1 columns key,value + columns.comments columns.types string:string #### A masked pattern was here #### name default.test_tab - numFiles 2 - numPartitions 1 - numRows 500 partition_columns part - rawDataSize 4812 + partition_columns.types string serialization.ddl struct test_tab { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - totalSize 1761 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe name: default.test_tab diff --git ql/src/test/results/clientpositive/udf_current_database.q.out ql/src/test/results/clientpositive/udf_current_database.q.out index 0eefb30..afbebe5 100644 --- ql/src/test/results/clientpositive/udf_current_database.q.out +++ ql/src/test/results/clientpositive/udf_current_database.q.out @@ -20,14 +20,14 @@ STAGE PLANS: TableScan alias: _dummy_table Row Limit Per Split: 1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE Select Operator expressions: current_database() (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -71,14 +71,14 @@ STAGE PLANS: TableScan alias: _dummy_table Row Limit Per Split: 1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE Select Operator expressions: current_database() (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -118,11 +118,11 @@ STAGE PLANS: TableScan alias: _dummy_table Row Limit Per Split: 1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE Select Operator expressions: current_database() (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE ListSink PREHOOK: query: select current_database() @@ -155,11 +155,11 @@ STAGE PLANS: TableScan alias: _dummy_table Row Limit Per Split: 1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE Select Operator expressions: current_database() (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 13 Basic stats: PARTIAL Column stats: COMPLETE ListSink PREHOOK: query: select current_database() diff --git ql/src/test/results/clientpositive/union_remove_1.q.out ql/src/test/results/clientpositive/union_remove_1.q.out index 286216c..fdfa222 100644 --- ql/src/test/results/clientpositive/union_remove_1.q.out +++ ql/src/test/results/clientpositive/union_remove_1.q.out @@ -205,7 +205,7 @@ Table Parameters: numFiles 2 numRows -1 rawDataSize -1 - totalSize 272 + totalSize 40 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/union_remove_10.q.out ql/src/test/results/clientpositive/union_remove_10.q.out index 0fc6c5f..addcf0a 100644 --- ql/src/test/results/clientpositive/union_remove_10.q.out +++ ql/src/test/results/clientpositive/union_remove_10.q.out @@ -268,10 +268,10 @@ Retention: 0 Table Type: MANAGED_TABLE Table Parameters: COLUMN_STATS_ACCURATE false - numFiles 2 + numFiles 3 numRows -1 rawDataSize -1 - totalSize 340 + totalSize 271 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/union_remove_12.q.out ql/src/test/results/clientpositive/union_remove_12.q.out index 579f686..50d6fc5 100644 --- ql/src/test/results/clientpositive/union_remove_12.q.out +++ ql/src/test/results/clientpositive/union_remove_12.q.out @@ -228,7 +228,7 @@ Table Parameters: numFiles 2 numRows -1 rawDataSize -1 - totalSize 272 + totalSize 194 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/union_remove_13.q.out ql/src/test/results/clientpositive/union_remove_13.q.out index 868e029..48747c2 100644 --- ql/src/test/results/clientpositive/union_remove_13.q.out +++ ql/src/test/results/clientpositive/union_remove_13.q.out @@ -251,7 +251,7 @@ Table Parameters: numFiles 2 numRows -1 rawDataSize -1 - totalSize 272 + totalSize 192 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/union_remove_14.q.out ql/src/test/results/clientpositive/union_remove_14.q.out index 47a6fa8..ba1f4a9 100644 --- ql/src/test/results/clientpositive/union_remove_14.q.out +++ ql/src/test/results/clientpositive/union_remove_14.q.out @@ -230,7 +230,7 @@ Table Parameters: numFiles 2 numRows -1 rawDataSize -1 - totalSize 272 + totalSize 194 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/union_remove_19.q.out ql/src/test/results/clientpositive/union_remove_19.q.out index a9f7fa8..f74d0fa 100644 --- ql/src/test/results/clientpositive/union_remove_19.q.out +++ ql/src/test/results/clientpositive/union_remove_19.q.out @@ -205,7 +205,7 @@ Table Parameters: numFiles 2 numRows -1 rawDataSize -1 - totalSize 272 + totalSize 40 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/union_remove_2.q.out ql/src/test/results/clientpositive/union_remove_2.q.out index 2076caf..9754579 100644 --- ql/src/test/results/clientpositive/union_remove_2.q.out +++ ql/src/test/results/clientpositive/union_remove_2.q.out @@ -212,7 +212,7 @@ Table Parameters: numFiles 3 numRows -1 rawDataSize -1 - totalSize 408 + totalSize 68 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/union_remove_20.q.out ql/src/test/results/clientpositive/union_remove_20.q.out index 016e23b..173abf8 100644 --- ql/src/test/results/clientpositive/union_remove_20.q.out +++ ql/src/test/results/clientpositive/union_remove_20.q.out @@ -207,7 +207,7 @@ Table Parameters: numFiles 2 numRows -1 rawDataSize -1 - totalSize 272 + totalSize 40 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/union_remove_21.q.out ql/src/test/results/clientpositive/union_remove_21.q.out index adfca79..6fea7b1 100644 --- ql/src/test/results/clientpositive/union_remove_21.q.out +++ ql/src/test/results/clientpositive/union_remove_21.q.out @@ -204,7 +204,7 @@ Table Parameters: numFiles 2 numRows -1 rawDataSize -1 - totalSize 272 + totalSize 20 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/union_remove_22.q.out ql/src/test/results/clientpositive/union_remove_22.q.out index 9afa79f..d922f15 100644 --- ql/src/test/results/clientpositive/union_remove_22.q.out +++ ql/src/test/results/clientpositive/union_remove_22.q.out @@ -208,7 +208,7 @@ Table Parameters: numFiles 2 numRows -1 rawDataSize -1 - totalSize 272 + totalSize 60 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/union_remove_23.q.out ql/src/test/results/clientpositive/union_remove_23.q.out index 38879d2..2c02de8 100644 --- ql/src/test/results/clientpositive/union_remove_23.q.out +++ ql/src/test/results/clientpositive/union_remove_23.q.out @@ -246,7 +246,7 @@ Table Parameters: numFiles 2 numRows -1 rawDataSize -1 - totalSize 272 + totalSize 40 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/union_remove_24.q.out ql/src/test/results/clientpositive/union_remove_24.q.out index 6a68c3e..d8f21c8 100644 --- ql/src/test/results/clientpositive/union_remove_24.q.out +++ ql/src/test/results/clientpositive/union_remove_24.q.out @@ -203,7 +203,7 @@ Table Parameters: numFiles 2 numRows -1 rawDataSize -1 - totalSize 272 + totalSize 60 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/union_remove_4.q.out ql/src/test/results/clientpositive/union_remove_4.q.out index 2669a82..e065fd3 100644 --- ql/src/test/results/clientpositive/union_remove_4.q.out +++ ql/src/test/results/clientpositive/union_remove_4.q.out @@ -249,7 +249,7 @@ Table Parameters: numFiles 2 numRows -1 rawDataSize -1 - totalSize 272 + totalSize 40 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/union_remove_5.q.out ql/src/test/results/clientpositive/union_remove_5.q.out index f83a6be..06986a2 100644 --- ql/src/test/results/clientpositive/union_remove_5.q.out +++ ql/src/test/results/clientpositive/union_remove_5.q.out @@ -258,7 +258,7 @@ Table Parameters: numFiles 3 numRows -1 rawDataSize -1 - totalSize 408 + totalSize 68 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/union_remove_7.q.out ql/src/test/results/clientpositive/union_remove_7.q.out index 79ae23d..d6aa057 100644 --- ql/src/test/results/clientpositive/union_remove_7.q.out +++ ql/src/test/results/clientpositive/union_remove_7.q.out @@ -209,7 +209,7 @@ Table Parameters: numFiles 2 numRows -1 rawDataSize -1 - totalSize 272 + totalSize 178 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/union_remove_8.q.out ql/src/test/results/clientpositive/union_remove_8.q.out index 8851546..88a9834 100644 --- ql/src/test/results/clientpositive/union_remove_8.q.out +++ ql/src/test/results/clientpositive/union_remove_8.q.out @@ -216,7 +216,7 @@ Table Parameters: numFiles 3 numRows -1 rawDataSize -1 - totalSize 408 + totalSize 271 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/union_remove_9.q.out ql/src/test/results/clientpositive/union_remove_9.q.out index 8a43863..6adbf0e 100644 --- ql/src/test/results/clientpositive/union_remove_9.q.out +++ ql/src/test/results/clientpositive/union_remove_9.q.out @@ -255,7 +255,7 @@ Table Parameters: numFiles 2 numRows -1 rawDataSize -1 - totalSize 272 + totalSize 192 #### A masked pattern was here #### # Storage Information