diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index db942b0..44e3d9e 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1391,9 +1391,6 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { "A lower value for error indicates higher accuracy and a higher compute cost."), HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION("hive.metastore.stats.ndv.densityfunction", false, "Whether to use density function to estimate the NDV for the whole table based on the NDV of partitions"), - HIVE_STATS_KEY_PREFIX_MAX_LENGTH("hive.stats.key.prefix.max.length", 150, - "Determines if when the prefix of the key used for intermediate stats collection\n" + - "exceeds a certain length, a hash of the key is used instead. If the value < 0 then hashing"), HIVE_STATS_KEY_PREFIX("hive.stats.key.prefix", "", "", true), // internal usage only // if length of variable length data type cannot be determined this length will be used. HIVE_STATS_MAX_VARIABLE_LENGTH("hive.stats.max.variable.length", 100, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 7459bba..32bfcf5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -1148,8 +1148,6 @@ private void publishStats() throws HiveException { String taskID = Utilities.getTaskIdFromFilename(Utilities.getTaskId(hconf)); String spSpec = conf.getStaticSpec(); - int maxKeyLength = conf.getMaxStatsKeyPrefixLength(); - for (Map.Entry entry : valToPaths.entrySet()) { String fspKey = entry.getKey(); // DP/LB FSPaths fspValue = entry.getValue(); @@ -1176,7 +1174,7 @@ private void publishStats() throws HiveException { // use lowercase table name as prefix here, as StatsTask get table name from metastore to fetch counter. String prefix = conf.getTableInfo().getTableName().toLowerCase(); prefix = Utilities.join(prefix, spSpec, dpSpec); - prefix = Utilities.getHashedStatsPrefix(prefix, maxKeyLength); + prefix = prefix.endsWith(Path.SEPARATOR) ? prefix : prefix + Path.SEPARATOR; Map statsToPublish = new HashMap(); for (String statType : fspValue.stat.getStoredStats()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java index c50d5b6..edf69fe 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java @@ -28,6 +28,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -155,8 +156,6 @@ private int aggregateStats() { String tableFullName = table.getDbName() + "." + table.getTableName(); - int maxPrefixLength = StatsFactory.getMaxPrefixLength(conf); - if (partitions == null) { org.apache.hadoop.hive.metastore.api.Table tTable = table.getTTable(); Map parameters = tTable.getParameters(); @@ -173,7 +172,7 @@ private int aggregateStats() { if (statsAggregator != null) { String prefix = getAggregationPrefix(table, null); - updateStats(statsAggregator, parameters, prefix, maxPrefixLength, atomic); + updateStats(statsAggregator, parameters, prefix, atomic); } updateQuickStats(wh, parameters, tTable.getSd()); @@ -209,7 +208,7 @@ private int aggregateStats() { if (statsAggregator != null) { String prefix = getAggregationPrefix(table, partn); - updateStats(statsAggregator, parameters, prefix, maxPrefixLength, atomic); + updateStats(statsAggregator, parameters, prefix, atomic); } updateQuickStats(wh, parameters, tPart.getSd()); @@ -252,7 +251,7 @@ private String getAggregationPrefix(Table table, Partition partition) throws MetaException { // prefix is of the form dbName.tblName - String prefix = table.getDbName()+"."+table.getTableName(); + String prefix = table.getDbName() + "." + MetaStoreUtils.encodeTableName(table.getTableName()); if (partition != null) { return Utilities.join(prefix, Warehouse.makePartPath(partition.getSpec())); } @@ -301,10 +300,10 @@ private boolean existStats(Map parameters) { } private void updateStats(StatsAggregator statsAggregator, - Map parameters, String prefix, int maxPrefixLength, boolean atomic) + Map parameters, String prefix, boolean atomic) throws HiveException { - String aggKey = Utilities.getHashedStatsPrefix(prefix, maxPrefixLength); + String aggKey = prefix.endsWith(Path.SEPARATOR) ? prefix : prefix + Path.SEPARATOR; for (String statType : StatsSetupConst.statsRequireCompute) { String value = statsAggregator.aggregateStats(aggKey, statType); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java index 90c83e6..32806dc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java @@ -312,8 +312,7 @@ private void publishStats() throws HiveException { statsToPublish.clear(); String prefix = Utilities.join(conf.getStatsAggPrefix(), pspecs); - int maxKeyLength = conf.getMaxStatsKeyPrefixLength(); - String key = Utilities.getHashedStatsPrefix(prefix, maxKeyLength); + String key = prefix.endsWith(Path.SEPARATOR) ? prefix : prefix + Path.SEPARATOR; for(String statType : stats.get(pspecs).getStoredStats()) { statsToPublish.put(statType, Long.toString(stats.get(pspecs).getStat(statType))); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 4eb46ff..dacb80f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -3026,31 +3026,6 @@ public static StatsPublisher getStatsPublisher(JobConf jc) { return factory == null ? null : factory.getStatsPublisher(); } - /** - * If statsPrefix's length is greater than maxPrefixLength and maxPrefixLength > 0, - * then it returns an MD5 hash of statsPrefix followed by path separator, otherwise - * it returns statsPrefix - * - * @param statsPrefix prefix of stats key - * @param maxPrefixLength max length of stats key - * @return if the length of prefix is longer than max, return MD5 hashed value of the prefix - */ - public static String getHashedStatsPrefix(String statsPrefix, int maxPrefixLength) { - // todo: this might return possibly longer prefix than - // maxPrefixLength (if set) when maxPrefixLength - postfixLength < 17, - // which would make stat values invalid (especially for 'counter' type) - if (maxPrefixLength >= 0 && statsPrefix.length() > maxPrefixLength) { - try { - MessageDigest digester = MessageDigest.getInstance("MD5"); - digester.update(statsPrefix.getBytes()); - return new String(digester.digest()) + Path.SEPARATOR; // 17 byte - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException(e); - } - } - return statsPrefix.endsWith(Path.SEPARATOR) ? statsPrefix : statsPrefix + Path.SEPARATOR; - } - public static String join(String... elements) { StringBuilder builder = new StringBuilder(); for (String element : elements) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java index 336d490..31eee45 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java @@ -30,6 +30,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; @@ -272,14 +273,13 @@ private void printConfigInfo() throws IOException { StatsWork statsWork = statsTask.getWork(); String tablePrefix = getTablePrefix(statsWork); List> partitionSpecs = getPartitionSpecs(statsWork); - int maxPrefixLength = StatsFactory.getMaxPrefixLength(conf); if (partitionSpecs == null) { - prefixs.add(Utilities.getHashedStatsPrefix(tablePrefix, maxPrefixLength)); + prefixs.add(tablePrefix.endsWith(Path.SEPARATOR) ? tablePrefix : tablePrefix + Path.SEPARATOR); } else { for (Map partitionSpec : partitionSpecs) { String prefixWithPartition = Utilities.join(tablePrefix, Warehouse.makePartPath(partitionSpec)); - prefixs.add(Utilities.getHashedStatsPrefix(prefixWithPartition, maxPrefixLength)); + prefixs.add(prefixWithPartition.endsWith(Path.SEPARATOR) ? prefixWithPartition : prefixWithPartition + Path.SEPARATOR); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanMapper.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanMapper.java index 8a5360e..09e4a47 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanMapper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanMapper.java @@ -24,6 +24,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -153,9 +154,9 @@ private void publishStats() throws HiveException { throw new HiveException(ErrorMsg.STATSPUBLISHER_CONNECTION_ERROR.getErrorCodedMsg()); } - int maxPrefixLength = StatsFactory.getMaxPrefixLength(jc); // construct key used to store stats in intermediate db - String key = Utilities.getHashedStatsPrefix(statsAggKeyPrefix, maxPrefixLength); + String key = statsAggKeyPrefix.endsWith(Path.SEPARATOR) ? statsAggKeyPrefix : statsAggKeyPrefix + + Path.SEPARATOR; // construct statistics to be stored Map statsToPublish = new HashMap(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 82514d4..ecdaa55 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -110,7 +110,6 @@ import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.ql.plan.TezWork; -import org.apache.hadoop.hive.ql.stats.StatsFactory; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; @@ -1498,7 +1497,6 @@ public static void addStatsTask(FileSinkOperator nd, MoveTask mvTask, // mark the MapredWork and FileSinkOperator for gathering stats nd.getConf().setGatherStats(true); nd.getConf().setStatsReliable(hconf.getBoolVar(ConfVars.HIVE_STATS_RELIABLE)); - nd.getConf().setMaxStatsKeyPrefixLength(StatsFactory.getMaxPrefixLength(hconf)); // mrWork.addDestinationTable(nd.getConf().getTableInfo().getTableName()); // subscribe feeds from the MoveTask so that MoveTask can forward the list diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java index 9acc7b7..d0f28d8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java @@ -27,6 +27,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.GroupByOperator; @@ -180,7 +181,7 @@ private void replaceTableScanProcess(TableScanOperator scanOperator) throws Sema TableScanDesc indexTableScanDesc = new TableScanDesc(indexTableHandle); indexTableScanDesc.setGatherStats(false); - String k = indexTableName + Path.SEPARATOR; + String k = MetaStoreUtils.encodeTableName(indexTableName) + Path.SEPARATOR; indexTableScanDesc.setStatsAggPrefix(k); scanOperator.setConf(indexTableScanDesc); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 1b7873d..0af7c55 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -9491,7 +9491,6 @@ private void setupStats(TableScanDesc tsDesc, QBParseInfo qbp, Table tab, String } tsDesc.setGatherStats(true); tsDesc.setStatsReliable(conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE)); - tsDesc.setMaxStatsKeyPrefixLength(StatsFactory.getMaxPrefixLength(conf)); // append additional virtual columns for storing statistics Iterator vcs = VirtualColumn.getStatsRegistry(conf).iterator(); @@ -9520,7 +9519,7 @@ private void setupStats(TableScanDesc tsDesc, QBParseInfo qbp, Table tab, String // db_name.table_name + partitionSec // as the prefix for easy of read during explain and debugging. // Currently, partition spec can only be static partition. - String k = tblName + Path.SEPARATOR; + String k = MetaStoreUtils.encodeTableName(tblName) + Path.SEPARATOR; tsDesc.setStatsAggPrefix(tab.getDbName()+"."+k); // set up WriteEntity for replication diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java index e87701f..40c23a5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java @@ -332,7 +332,6 @@ public void processFileSink(GenSparkProcContext context, FileSinkOperator fileSi for (FileSinkOperator fsOp : fileSinkList) { fsOp.getConf().setGatherStats(fileSink.getConf().isGatherStats()); fsOp.getConf().setStatsReliable(fileSink.getConf().isStatsReliable()); - fsOp.getConf().setMaxStatsKeyPrefixLength(fileSink.getConf().getMaxStatsKeyPrefixLength()); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java index 9d6318a..40a8477 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java @@ -85,7 +85,6 @@ private boolean statsReliable; private ListBucketingCtx lbCtx; - private int maxStatsKeyPrefixLength = -1; private String statsTmpDir; private boolean statsCollectRawDataSize; @@ -152,7 +151,6 @@ public Object clone() throws CloneNotSupportedException { ret.setParentDir(parentDir); ret.setLinkedFileSinkDesc(linkedFileSinkDesc); ret.setStatsReliable(statsReliable); - ret.setMaxStatsKeyPrefixLength(maxStatsKeyPrefixLength); ret.setStatsCollectRawDataSize(statsCollectRawDataSize); ret.setDpSortState(dpSortState); ret.setWriteType(writeType); @@ -400,14 +398,6 @@ public void setLinkedFileSinkDesc(List linkedFileSinkDesc) { this.linkedFileSinkDesc = linkedFileSinkDesc; } - public int getMaxStatsKeyPrefixLength() { - return maxStatsKeyPrefixLength; - } - - public void setMaxStatsKeyPrefixLength(int maxStatsKeyPrefixLength) { - this.maxStatsKeyPrefixLength = maxStatsKeyPrefixLength; - } - public boolean isStatsCollectRawDataSize() { return statsCollectRawDataSize; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java index b354f98..43bf7c5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.TableSample; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -71,7 +70,6 @@ */ private boolean gatherStats; private boolean statsReliable; - private int maxStatsKeyPrefixLength = -1; private String tmpStatsDir; private ExprNodeGenericFuncDesc filterExpr; @@ -256,14 +254,6 @@ public void setStatsReliable(boolean statsReliable) { this.statsReliable = statsReliable; } - public int getMaxStatsKeyPrefixLength() { - return maxStatsKeyPrefixLength; - } - - public void setMaxStatsKeyPrefixLength(int maxStatsKeyPrefixLength) { - this.maxStatsKeyPrefixLength = maxStatsKeyPrefixLength; - } - public void setRowLimit(int rowLimit) { this.rowLimit = rowLimit; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsFactory.java index 9f4ed67..97bad32 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsFactory.java @@ -29,7 +29,6 @@ import org.apache.hadoop.util.ReflectionUtils; import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS; -import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_STATS_KEY_PREFIX_MAX_LENGTH; /** * A factory of stats publisher and aggregator implementations of the @@ -43,16 +42,6 @@ private Class aggregatorImplementation; private final Configuration jobConf; - public static int getMaxPrefixLength(Configuration conf) { - - if (HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.fs.name())) { - // no limit on prefix for fs. - return -1; - } - int maxPrefixLength = HiveConf.getIntVar(conf, HIVE_STATS_KEY_PREFIX_MAX_LENGTH); - return maxPrefixLength; - } - public static StatsFactory newFactory(Configuration conf) { return newFactory(HiveConf.getVar(conf, HIVESTATSDBCLASS), conf); } diff --git a/ql/src/test/queries/clientpositive/special_character_in_tabnames_1.q b/ql/src/test/queries/clientpositive/special_character_in_tabnames_1.q index 7867ae1..7a72193 100644 --- a/ql/src/test/queries/clientpositive/special_character_in_tabnames_1.q +++ b/ql/src/test/queries/clientpositive/special_character_in_tabnames_1.q @@ -1072,4 +1072,9 @@ insert overwrite table `src/_/cbo` select * from src; select * from `src/_/cbo` limit 1; - +drop table `t//`; +create table `t//` (col string); +insert into `t//` values(1); +insert into `t//` values(null); +analyze table `t//` compute statistics; +explain select * from `t//`; diff --git a/ql/src/test/results/clientpositive/special_character_in_tabnames_1.q.out b/ql/src/test/results/clientpositive/special_character_in_tabnames_1.q.out index bd0088a..cb949e4 100644 --- a/ql/src/test/results/clientpositive/special_character_in_tabnames_1.q.out +++ b/ql/src/test/results/clientpositive/special_character_in_tabnames_1.q.out @@ -19548,3 +19548,62 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src/_/cbo #### A masked pattern was here #### 238 val_238 +PREHOOK: query: drop table `t//` +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table `t//` +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table `t//` (col string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t// +POSTHOOK: query: create table `t//` (col string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t// +PREHOOK: query: insert into `t//` values(1) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@t// +POSTHOOK: query: insert into `t//` values(1) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@t// +POSTHOOK: Lineage: t//.col SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: insert into `t//` values(null) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@t// +POSTHOOK: query: insert into `t//` values(null) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@t// +POSTHOOK: Lineage: t//.col SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: analyze table `t//` compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@t// +PREHOOK: Output: default@t// +POSTHOOK: query: analyze table `t//` compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t// +POSTHOOK: Output: default@t// +PREHOOK: query: explain select * from `t//` +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from `t//` +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: t// + Statistics: Num rows: 2 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: col (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 3 Basic stats: COMPLETE Column stats: NONE + ListSink +